Stream Duties Client Implementation (#5867)

* include validator client stream

* Update validator/client/validator_attest.go

* gazelle

* rem extraneous logs

* fixing tests

* resolve most tests

* gaz

* add lock

* ivan feedback

* pass tests for update protect

* gaz

* duties gaz

* no need for canonical head slot

* fix ctx leak

* fmt

* add in feature flag

* add streaming subpackage

* add polling/streaming separation

* able to build

* fix duplicate package names

* fix polling

* imports

* confirm it works

* fixed up comment

* go lint comments

* gaz

* build

* Update validator/client/streaming/service_test.go

Co-authored-by: terence tsao <terence@prysmaticlabs.com>

* tidy

* fmt

* add stream duties to e2e

* add stream duties to e2e flags

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
This commit is contained in:
Raul Jordan
2020-06-18 13:30:05 -05:00
committed by GitHub
parent 10af753f59
commit 7067c84c69
45 changed files with 4630 additions and 206 deletions

View File

@@ -21,7 +21,8 @@ go_library(
"//slasher:__subpackages__",
"//tools/blocktree:__pkg__",
"//tools/pcli:__pkg__",
"//validator/client:__pkg__",
"//validator/client/streaming:__pkg__",
"//validator/client/polling:__pkg__",
],
deps = [
"//proto/beacon/p2p/v1:go_default_library",

25
go.sum
View File

@@ -152,7 +152,6 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018 h1:6xT9KW8zLC5IlbaIF5Q7JNieBoACT7iW0YTxQHR0in0=
github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4=
github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f h1:BOaYiTvg8p9vBUXpklC22XSK/mifLF7lG9jtmYYi3Tc=
github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4=
@@ -268,7 +267,6 @@ github.com/golang/gddo v0.0.0-20200528160355-8d077c1d8f4c/go.mod h1:sam69Hju0uq+
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -392,7 +390,6 @@ github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUP
github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M=
github.com/ipfs/go-cid v0.0.5 h1:o0Ix8e/ql7Zb5UVUJEUfjsWCIY8t48++9lR8qi6oiJU=
github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog=
github.com/ipfs/go-cid v0.0.6 h1:go0y+GcDOGeJIV01FeBsta4FHngoA4Wz7KMeLkXAhMs=
github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I=
@@ -426,7 +423,6 @@ github.com/ipfs/go-log v1.0.4 h1:6nLQdX4W8P9yZZFH7mO+X/PzjN8Laozm/lMJ6esdgzY=
github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs=
github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0=
github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0=
github.com/ipfs/go-log/v2 v2.0.5 h1:fL4YI+1g5V/b1Yxr1qAiXTMg1H8z9vx/VmJxBuQMHvU=
github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw=
github.com/ipfs/go-log/v2 v2.1.1 h1:G4TtqN+V9y9HY9TA6BwbCVyyBZ2B9MbCjR2MtGx8FR0=
github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM=
@@ -636,7 +632,6 @@ github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MB
github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0=
github.com/libp2p/go-libp2p-testing v0.1.1 h1:U03z3HnGI7Ni8Xx6ONVZvUFOAzWYmolWf5W5jAOPNmU=
github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0=
github.com/libp2p/go-libp2p-tls v0.1.3 h1:twKMhMu44jQO+HgQK9X8NHO5HkeJu2QbhLzLJpa8oNM=
github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M=
github.com/libp2p/go-libp2p-tls v0.1.4-0.20200421131144-8a8ad624a291 h1:Ge/2CYttU7XdkPPqQ7e3TiuMFneLie1rM/UjRxPPGsI=
github.com/libp2p/go-libp2p-tls v0.1.4-0.20200421131144-8a8ad624a291/go.mod h1:j1RjQWh/Ek3CRkHIn9sbVzW++n+yK2AnWtM4kZqlTFY=
@@ -648,7 +643,6 @@ github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8ME
github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI=
github.com/libp2p/go-libp2p-yamux v0.2.2/go.mod h1:lIohaR0pT6mOt0AZ0L2dFze9hds9Req3OfS+B+dv4qw=
github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ48OpsfmQVTErwA=
github.com/libp2p/go-libp2p-yamux v0.2.7 h1:vzKu0NVtxvEIDGCv6mjKRcK0gipSgaXmJZ6jFv0d/dk=
github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU=
github.com/libp2p/go-libp2p-yamux v0.2.8 h1:0s3ELSLu2O7hWKfX1YjzudBKCP0kZ+m9e2+0veXzkn4=
github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4=
@@ -680,11 +674,9 @@ github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQza
github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs=
github.com/libp2p/go-reuseport-transport v0.0.3 h1:zzOeXnTooCkRvoH+bSXEfXhn76+LAiwoneM0gnXjF2M=
github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM=
github.com/libp2p/go-sockaddr v0.0.2 h1:tCuXfpA9rq7llM/v834RKc/Xvovy/AqM9kHvTV/jY/Q=
github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k=
github.com/libp2p/go-sockaddr v0.1.0 h1:Y4s3/jNoryVRKEBrkJ576F17CPOaMIzUeCsg7dlTDj0=
github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k=
github.com/libp2p/go-stream-muxer v0.0.1 h1:Ce6e2Pyu+b5MC1k3eeFtAax0pW4gc6MosYSLV05UeLw=
github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14=
github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc=
github.com/libp2p/go-stream-muxer-multistream v0.3.0 h1:TqnSHPJEIqDEO7h1wZZ0p3DXdvDSiLHQidKKUGZtiOY=
@@ -695,7 +687,6 @@ github.com/libp2p/go-tcp-transport v0.2.0 h1:YoThc549fzmNJIh7XjHVtMIFaEDRtIrtWci
github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0=
github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw7yT74kj3raBFuo=
github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM=
github.com/libp2p/go-ws-transport v0.3.0 h1:mjo6pL5aVR9rCjl9wNq3DupbaQlyR61pzoOT2MdtxaA=
github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk=
github.com/libp2p/go-ws-transport v0.3.1 h1:ZX5rWB8nhRRJVaPO6tmkGI/Xx8XNboYX20PW5hXIscw=
github.com/libp2p/go-ws-transport v0.3.1/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk=
@@ -703,9 +694,7 @@ github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZ
github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow=
github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow=
github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow=
github.com/libp2p/go-yamux v1.3.5 h1:ibuz4naPAully0pN6J/kmUARiqLpnDQIzI/8GCOrljg=
github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow=
github.com/libp2p/go-yamux v1.3.6 h1:O5qcBXRcfqecvQ/My9NqDNHB3/5t58yuJYqthcKhhgE=
github.com/libp2p/go-yamux v1.3.6/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow=
github.com/libp2p/go-yamux v1.3.7 h1:v40A1eSPJDIZwz2AvrV3cxpTZEGDP11QJbukmEhYyQI=
github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE=
@@ -723,7 +712,6 @@ github.com/marten-seemann/qtls v0.9.1/go.mod h1:T1MmAdDPyISzxlK6kjRr0pcZFBVd1OZb
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.0.10-0.20170816031813-ad5389df28cd/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
@@ -732,7 +720,6 @@ github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HN
github.com/mattn/go-isatty v0.0.2/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw=
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM=
@@ -808,7 +795,6 @@ github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysj
github.com/multiformats/go-multiaddr-net v0.1.5 h1:QoRKvu0xHN1FCFJcMQLbG/yQE2z441L5urvG3+qyz7g=
github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA=
github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs=
github.com/multiformats/go-multibase v0.0.2 h1:2pAgScmS1g9XjH7EtAfNhTuyrWYEWcxy0G5Wo85hWDA=
github.com/multiformats/go-multibase v0.0.2/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs=
github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk=
github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc=
@@ -896,7 +882,6 @@ github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA=
github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.6.0 h1:YVPodQOcK15POxhgARIvnDRVpLcuK8mglnMrWfyrw6A=
github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4=
@@ -931,7 +916,6 @@ github.com/prysmaticlabs/ethereumapis v0.0.0-20200617012222-f52a0eff2886/go.mod
github.com/prysmaticlabs/go-bitfield v0.0.0-20191017011753-53b773adde52/go.mod h1:hCwmef+4qXWjv0jLDbQdWnL0Ol7cS7/lCSS26WR+u6s=
github.com/prysmaticlabs/go-bitfield v0.0.0-20200322041314-62c2aee71669 h1:cX6YRZnZ9sgMqM5U14llxUiXVNJ3u07Res1IIjTOgtI=
github.com/prysmaticlabs/go-bitfield v0.0.0-20200322041314-62c2aee71669/go.mod h1:hCwmef+4qXWjv0jLDbQdWnL0Ol7cS7/lCSS26WR+u6s=
github.com/prysmaticlabs/go-ssz v0.0.0-20200101200214-e24db4d9e963 h1:Th5ufPIaL5s/7i3gXHTgiTwfsUhWDP/PwFRiI6qV6v0=
github.com/prysmaticlabs/go-ssz v0.0.0-20200101200214-e24db4d9e963/go.mod h1:VecIJZrewdAuhVckySLFt2wAAHRME934bSDurP8ftkc=
github.com/prysmaticlabs/go-ssz v0.0.0-20200605034351-b6a925e519d0 h1:V4o7uJqGXAuz6ZpwxhT4cnVjRb/XxpBmTKp/lVVr05k=
github.com/prysmaticlabs/go-ssz v0.0.0-20200605034351-b6a925e519d0/go.mod h1:VecIJZrewdAuhVckySLFt2wAAHRME934bSDurP8ftkc=
@@ -1028,7 +1012,6 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d h1:gZZadD8H+fF+n9CmNhYL1Y0dJB+kLOmKd7FbPJLeGHs=
github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA=
@@ -1043,7 +1026,6 @@ github.com/tyler-smith/go-bip39 v1.0.2/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2
github.com/uber/jaeger-client-go v2.15.0+incompatible h1:NP3qsSqNxh8VYr956ur1N/1C1PjvOJnJykCzcD5QHbk=
github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
@@ -1095,7 +1077,6 @@ github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k=
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc=
github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM=
github.com/whyrusleeping/go-logging v0.0.1 h1:fwpzlmT0kRC/Fmd0MdmGgJG/CXIZ6gFq46FQZjprUcc=
github.com/whyrusleeping/go-logging v0.0.1/go.mod h1:lDPYj54zutzG1XYfHAhcc7oNXEburHQBn+Iqd4yS4vE=
github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8=
github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA=
@@ -1240,7 +1221,6 @@ golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200519113804-d87ec0cfa476 h1:E7ct1C6/33eOdrGZKMoyntcEvs2dwZnDe30crG5vpYU=
golang.org/x/net v0.0.0-20200519113804-d87ec0cfa476/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200528225125-3c3fba18258b h1:IYiJPiJfzktmDAO1HQiwjMjwjlYKHAL7KzeD544RJPs=
golang.org/x/net v0.0.0-20200528225125-3c3fba18258b/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
@@ -1377,7 +1357,6 @@ google.golang.org/api v0.0.0-20170921000349-586095a6e407/go.mod h1:4mhQ8q/RsB7i+
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
google.golang.org/api v0.4.0 h1:KKgc1aqhV8wDPbDzlDtpvyjZFY3vjz85FP7p4wcQUyI=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
@@ -1417,7 +1396,6 @@ google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvx
google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200218151345-dad8c97a84f5/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 h1:fiNLklpBwWK1mth30Hlwk+fcdBmIALlgF5iy77O37Ig=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200528191852-705c0b31589b h1:nl5tymnV+50ACFZUDAP+xFCe3Zh3SWdMDx+ernZSKNA=
@@ -1445,7 +1423,6 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE
google.golang.org/protobuf v1.20.1/go.mod h1:KqelGeouBkcbcuB3HCk4/YH2tmNLk6YSWA5LIWeI/lY=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
@@ -1484,7 +1461,6 @@ gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHO
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
@@ -1526,7 +1502,6 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=

View File

@@ -13,7 +13,8 @@
},
"lostcancel": {
"exclude_files": {
"validator/client/runner.go": "No need to cancel right when goroutines begin",
"validator/client/streaming/runner.go": "No need to cancel right when goroutines begin",
"validator/client/polling/runner.go": "No need to cancel right when goroutines begin",
"external/.*": "Third party code"
}
},

View File

@@ -35,6 +35,7 @@ type Flags struct {
E2EConfig bool //E2EConfig made specifically for testing, do not use except in E2E.
// Feature related flags.
EnableStreamDuties bool // Enable streaming of validator duties instead of a polling-based approach.
WriteSSZStateTransitions bool // WriteSSZStateTransitions to tmp directory.
InitSyncNoVerify bool // InitSyncNoVerify when initial syncing w/o verifying block's contents.
DisableDynamicCommitteeSubnets bool // Disables dynamic attestation committee subnets via p2p.
@@ -272,6 +273,10 @@ func ConfigureValidator(ctx *cli.Context) {
complainOnDeprecatedFlags(ctx)
cfg := &Flags{}
cfg = configureConfig(ctx, cfg)
if ctx.Bool(enableStreamDuties.Name) {
log.Warn("Enabled validator duties streaming.")
cfg.EnableStreamDuties = true
}
if ctx.Bool(enableProtectProposerFlag.Name) {
log.Warn("Enabled validator proposal slashing protection.")
cfg.ProtectProposer = true

View File

@@ -157,6 +157,10 @@ var (
Name: "disable-reduce-attester-state-copy",
Usage: "Disables the feature to reduce the amount of state copies for attester rpc",
}
enableStreamDuties = &cli.BoolFlag{
Name: "enable-stream-duties",
Usage: "Enables validator duties streaming in the validator client",
}
enableKadDht = &cli.BoolFlag{
Name: "enable-kad-dht",
Usage: "Enables libp2p's kademlia based discovery to start running",
@@ -486,6 +490,7 @@ var ValidatorFlags = append(deprecatedFlags, []cli.Flag{
e2eConfigFlag,
enableProtectAttesterFlag,
enableProtectProposerFlag,
enableStreamDuties,
enableExternalSlasherProtectionFlag,
disableDomainDataCacheFlag,
waitForSyncedFlag,
@@ -503,6 +508,7 @@ var E2EValidatorFlags = []string{
"--wait-for-synced",
"--enable-protect-attester",
"--enable-protect-proposer",
"--enable-stream-duties",
}
// BeaconChainFlags contains a list of all the feature flags that apply to the beacon-chain client.

View File

@@ -22,7 +22,7 @@ go_library(
"//shared/params:go_default_library",
"//shared/version:go_default_library",
"//validator/accounts:go_default_library",
"//validator/client:go_default_library",
"//validator/client/streaming:go_default_library",
"//validator/flags:go_default_library",
"//validator/node:go_default_library",
"@com_github_joonix_log//:go_default_library",
@@ -63,7 +63,8 @@ go_image(
"//shared/params:go_default_library",
"//shared/version:go_default_library",
"//validator/accounts:go_default_library",
"//validator/client:go_default_library",
"//validator/client/polling:go_default_library",
"//validator/client/streaming:go_default_library",
"//validator/flags:go_default_library",
"//validator/node:go_default_library",
"@com_github_joonix_log//:go_default_library",

View File

@@ -0,0 +1,12 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["metrics.go"],
importpath = "github.com/prysmaticlabs/prysm/validator/client/metrics",
visibility = ["//validator/client:__subpackages__"],
deps = [
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
],
)

View File

@@ -0,0 +1,121 @@
package metrics
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
// ValidatorStatusesGaugeVec used to track validator statuses by public key.
ValidatorStatusesGaugeVec = promauto.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "validator",
Name: "statuses",
Help: "validator statuses: 0 UNKNOWN, 1 DEPOSITED, 2 PENDING, 3 ACTIVE, 4 EXITING, 5 SLASHING, 6 EXITED",
},
[]string{
// Validator pubkey.
"pubkey",
},
)
// ValidatorAggSuccessVec used to count successful aggregations.
ValidatorAggSuccessVec = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: "validator",
Name: "successful_aggregations",
},
[]string{
// validator pubkey
"pubkey",
},
)
// ValidatorAggFailVec used to count failed aggregations.
ValidatorAggFailVec = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: "validator",
Name: "failed_aggregations",
},
[]string{
// validator pubkey
"pubkey",
},
)
// ValidatorProposeSuccessVec used to count successful proposals.
ValidatorProposeSuccessVec = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: "validator",
Name: "successful_proposals",
},
[]string{
// validator pubkey
"pubkey",
},
)
// ValidatorProposeFailVec used to count failed proposals.
ValidatorProposeFailVec = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: "validator",
Name: "failed_proposals",
},
[]string{
// validator pubkey
"pubkey",
},
)
// ValidatorProposeFailVecSlasher used to count failed proposals by slashing protection.
ValidatorProposeFailVecSlasher = promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "validator_proposals_rejected_total",
Help: "Count the block proposals rejected by slashing protection.",
},
[]string{
// validator pubkey
"pubkey",
},
)
// ValidatorBalancesGaugeVec used to keep track of validator balances by public key.
ValidatorBalancesGaugeVec = promauto.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "validator",
Name: "balance",
Help: "current validator balance.",
},
[]string{
// validator pubkey
"pubkey",
},
)
// ValidatorAttestSuccessVec used to count successful attestations.
ValidatorAttestSuccessVec = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: "validator",
Name: "successful_attestations",
},
[]string{
// validator pubkey
"pubkey",
},
)
// ValidatorAttestFailVec used to count failed attestations.
ValidatorAttestFailVec = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: "validator",
Name: "failed_attestations",
},
[]string{
// validator pubkey
"pubkey",
},
)
// ValidatorAttestFailVecSlasher used to count failed attestations by slashing protection.
ValidatorAttestFailVecSlasher = promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "validator_attestations_rejected_total",
Help: "Count the attestations rejected by slashing protection.",
},
[]string{
// validator pubkey
"pubkey",
},
)
)

View File

@@ -13,7 +13,7 @@ go_library(
"validator_metrics.go",
"validator_propose.go",
],
importpath = "github.com/prysmaticlabs/prysm/validator/client",
importpath = "github.com/prysmaticlabs/prysm/validator/client/polling",
visibility = ["//validator:__subpackages__"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
@@ -28,6 +28,7 @@ go_library(
"//shared/params:go_default_library",
"//shared/roughtime:go_default_library",
"//shared/slotutil:go_default_library",
"//validator/client/metrics:go_default_library",
"//validator/db:go_default_library",
"//validator/keymanager:go_default_library",
"//validator/slashing-protection:go_default_library",
@@ -40,8 +41,6 @@ go_library(
"@com_github_grpc_ecosystem_go_grpc_prometheus//:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",

View File

@@ -1,4 +1,4 @@
package client
package polling
import (
"context"

View File

@@ -1,4 +1,4 @@
package client
package polling
import (
"context"

View File

@@ -1,4 +1,4 @@
package client
package polling
import (
"context"

View File

@@ -1,6 +1,4 @@
// Package client defines the entire lifecycle of a validator in eth2 it is responsible
// for interacting with a beacon node to determine and perform validator duties.
package client
package polling
import (
"context"

View File

@@ -1,4 +1,4 @@
package client
package polling
import (
"context"

View File

@@ -1,5 +1,6 @@
// Package client represents the functionality to act as a validator.
package client
// Package polling represents a gRPC polling-based implementation
// of an eth2 validator client.
package polling
import (
"context"
@@ -17,8 +18,6 @@ import (
ptypes "github.com/gogo/protobuf/types"
lru "github.com/hashicorp/golang-lru"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
slashpb "github.com/prysmaticlabs/prysm/proto/slashing"
@@ -27,6 +26,7 @@ import (
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/slotutil"
"github.com/prysmaticlabs/prysm/validator/client/metrics"
"github.com/prysmaticlabs/prysm/validator/db"
"github.com/prysmaticlabs/prysm/validator/keymanager"
slashingprotection "github.com/prysmaticlabs/prysm/validator/slashing-protection"
@@ -67,18 +67,6 @@ type validator struct {
protector slashingprotection.Protector
}
var validatorStatusesGaugeVec = promauto.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "validator",
Name: "statuses",
Help: "validator statuses: 0 UNKNOWN, 1 DEPOSITED, 2 PENDING, 3 ACTIVE, 4 EXITING, 5 SLASHING, 6 EXITED",
},
[]string{
// Validator pubkey.
"pubkey",
},
)
// Done cleans up the validator.
func (v *validator) Done() {
v.ticker.Done()
@@ -249,7 +237,7 @@ func (v *validator) checkAndLogValidatorStatus(validatorStatuses []*ethpb.Valida
log := log.WithFields(fields)
if v.emitAccountMetrics {
fmtKey := fmt.Sprintf("%#x", status.PublicKey)
validatorStatusesGaugeVec.WithLabelValues(fmtKey).Set(float64(status.Status.Status))
metrics.ValidatorStatusesGaugeVec.WithLabelValues(fmtKey).Set(float64(status.Status.Status))
}
switch status.Status.Status {
case ethpb.ValidatorStatus_UNKNOWN_STATUS:
@@ -565,7 +553,7 @@ func (v *validator) logDuties(slot uint64, duties []*ethpb.DutiesResponse_Duty)
for _, duty := range duties {
if v.emitAccountMetrics {
fmtKey := fmt.Sprintf("%#x", duty.PublicKey)
validatorStatusesGaugeVec.WithLabelValues(fmtKey).Set(float64(duty.Status))
metrics.ValidatorStatusesGaugeVec.WithLabelValues(fmtKey).Set(float64(duty.Status))
}
// Only interested in validators who are attesting/proposing.

View File

@@ -1,4 +1,4 @@
package client
package polling
import (
"context"
@@ -6,39 +6,15 @@ import (
"time"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/roughtime"
"github.com/prysmaticlabs/prysm/shared/slotutil"
"github.com/prysmaticlabs/prysm/validator/client/metrics"
"go.opencensus.io/trace"
)
var (
validatorAggSuccessVec = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: "validator",
Name: "successful_aggregations",
},
[]string{
// validator pubkey
"pubkey",
},
)
validatorAggFailVec = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: "validator",
Name: "failed_aggregations",
},
[]string{
// validator pubkey
"pubkey",
},
)
)
// SubmitAggregateAndProof submits the validator's signed slot signature to the beacon node
// via gRPC. Beacon node will verify the slot signature and determine if the validator is also
// an aggregator. If yes, then beacon node will broadcast aggregated signature and
@@ -54,7 +30,7 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot uint64, pu
if err != nil {
log.Errorf("Could not fetch validator assignment: %v", err)
if v.emitAccountMetrics {
validatorAggFailVec.WithLabelValues(fmtKey).Inc()
metrics.ValidatorAggFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
@@ -72,7 +48,7 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot uint64, pu
if err != nil {
log.Errorf("Could not sign slot: %v", err)
if v.emitAccountMetrics {
validatorAggFailVec.WithLabelValues(fmtKey).Inc()
metrics.ValidatorAggFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
@@ -91,7 +67,7 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot uint64, pu
if err != nil {
log.WithField("slot", slot).Errorf("Could not submit slot signature to beacon node: %v", err)
if v.emitAccountMetrics {
validatorAggFailVec.WithLabelValues(fmtKey).Inc()
metrics.ValidatorAggFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
@@ -109,7 +85,7 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot uint64, pu
if err != nil {
log.Errorf("Could not submit signed aggregate and proof to beacon node: %v", err)
if v.emitAccountMetrics {
validatorAggFailVec.WithLabelValues(fmtKey).Inc()
metrics.ValidatorAggFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
@@ -117,12 +93,12 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot uint64, pu
if err := v.addIndicesToLog(duty); err != nil {
log.Errorf("Could not add aggregator indices to logs: %v", err)
if v.emitAccountMetrics {
validatorAggFailVec.WithLabelValues(fmtKey).Inc()
metrics.ValidatorAggFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
if v.emitAccountMetrics {
validatorAggSuccessVec.WithLabelValues(fmtKey).Inc()
metrics.ValidatorAggSuccessVec.WithLabelValues(fmtKey).Inc()
}
}

View File

@@ -1,4 +1,4 @@
package client
package polling
import (
"context"

View File

@@ -1,4 +1,4 @@
package client
package polling
import (
"bytes"
@@ -7,8 +7,6 @@ import (
"fmt"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
@@ -20,44 +18,12 @@ import (
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/roughtime"
"github.com/prysmaticlabs/prysm/shared/slotutil"
"github.com/prysmaticlabs/prysm/validator/client/metrics"
"github.com/prysmaticlabs/prysm/validator/keymanager"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
var (
validatorAttestSuccessVec = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: "validator",
Name: "successful_attestations",
},
[]string{
// validator pubkey
"pubkey",
},
)
validatorAttestFailVec = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: "validator",
Name: "failed_attestations",
},
[]string{
// validator pubkey
"pubkey",
},
)
validatorAttestFailVecSlasher = promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "validator_attestations_rejected_total",
Help: "Count the attestations rejected by slashing protection.",
},
[]string{
// validator pubkey
"pubkey",
},
)
)
// SubmitAttestation completes the validator client's attester responsibility at a given slot.
// It fetches the latest beacon block head along with the latest canonical beacon state
// information in order to sign the block and include information about the validator's
@@ -73,7 +39,7 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot uint64, pubKey [
if err != nil {
log.WithError(err).Error("Could not fetch validator assignment")
if v.emitAccountMetrics {
validatorAttestFailVec.WithLabelValues(fmtKey).Inc()
metrics.ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
@@ -95,7 +61,7 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot uint64, pubKey [
if err != nil {
log.WithError(err).Error("Could not request attestation to sign at slot")
if v.emitAccountMetrics {
validatorAttestFailVec.WithLabelValues(fmtKey).Inc()
metrics.ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
@@ -107,7 +73,7 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot uint64, pubKey [
"targetEpoch": data.Target.Epoch,
}).Error("Attempted to make a slashable attestation, rejected")
if v.emitAccountMetrics {
validatorAttestFailVec.WithLabelValues(fmtKey).Inc()
metrics.ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
@@ -117,7 +83,7 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot uint64, pubKey [
if err != nil {
log.WithError(err).Error("Could not sign attestation")
if v.emitAccountMetrics {
validatorAttestFailVec.WithLabelValues(fmtKey).Inc()
metrics.ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
@@ -134,7 +100,7 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot uint64, pubKey [
if !found {
log.Errorf("Validator ID %d not found in committee of %v", duty.ValidatorIndex, duty.Committee)
if v.emitAccountMetrics {
validatorAttestFailVec.WithLabelValues(fmtKey).Inc()
metrics.ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
@@ -159,7 +125,7 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot uint64, pubKey [
"targetEpoch": data.Target.Epoch,
}).Error("Attempted to make a slashable attestation, rejected by external slasher service")
if v.emitAccountMetrics {
validatorAttestFailVecSlasher.WithLabelValues(fmtKey).Inc()
metrics.ValidatorAttestFailVecSlasher.WithLabelValues(fmtKey).Inc()
}
return
}
@@ -168,7 +134,7 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot uint64, pubKey [
if err != nil {
log.WithError(err).Error("Could not submit attestation to beacon node")
if v.emitAccountMetrics {
validatorAttestFailVec.WithLabelValues(fmtKey).Inc()
metrics.ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
@@ -176,7 +142,7 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot uint64, pubKey [
if err := v.saveAttesterIndexToData(data, duty.ValidatorIndex); err != nil {
log.WithError(err).Error("Could not save validator index for logging")
if v.emitAccountMetrics {
validatorAttestFailVec.WithLabelValues(fmtKey).Inc()
metrics.ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
@@ -189,7 +155,7 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot uint64, pubKey [
}
if v.emitAccountMetrics {
validatorAttestSuccessVec.WithLabelValues(fmtKey).Inc()
metrics.ValidatorAttestSuccessVec.WithLabelValues(fmtKey).Inc()
}
span.AddAttributes(

View File

@@ -1,4 +1,4 @@
package client
package polling
import (
"context"

View File

@@ -1,4 +1,4 @@
package client
package polling
import (
"fmt"

View File

@@ -1,29 +1,16 @@
package client
package polling
import (
"context"
"fmt"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/validator/client/metrics"
"github.com/sirupsen/logrus"
)
var validatorBalancesGaugeVec = promauto.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "validator",
Name: "balance",
Help: "current validator balance.",
},
[]string{
// validator pubkey
"pubkey",
},
)
// LogValidatorGainsAndLosses logs important metrics related to this validator client's
// responsibilities throughout the beacon chain's lifecycle. It logs absolute accrued rewards
// and penalties over time, percentage gain/loss, and gives the end user a better idea
@@ -54,7 +41,7 @@ func (v *validator) LogValidatorGainsAndLosses(ctx context.Context, slot uint64)
if v.emitAccountMetrics {
for _, missingPubKey := range resp.MissingValidators {
fmtKey := fmt.Sprintf("%#x", missingPubKey[:])
validatorBalancesGaugeVec.WithLabelValues(fmtKey).Set(0)
metrics.ValidatorBalancesGaugeVec.WithLabelValues(fmtKey).Set(0)
}
}
@@ -92,7 +79,7 @@ func (v *validator) LogValidatorGainsAndLosses(ctx context.Context, slot uint64)
"percentChange": fmt.Sprintf("%.5f%%", percentNet*100),
}).Info("Previous epoch voting summary")
if v.emitAccountMetrics {
validatorBalancesGaugeVec.WithLabelValues(fmtKey).Set(newBalance)
metrics.ValidatorBalancesGaugeVec.WithLabelValues(fmtKey).Set(newBalance)
}
}

View File

@@ -1,4 +1,4 @@
package client
package polling
// Validator client proposer functions.
import (
@@ -6,8 +6,6 @@ import (
"fmt"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
@@ -17,44 +15,12 @@ import (
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/validator/client/metrics"
"github.com/prysmaticlabs/prysm/validator/keymanager"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
var (
validatorProposeSuccessVec = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: "validator",
Name: "successful_proposals",
},
[]string{
// validator pubkey
"pubkey",
},
)
validatorProposeFailVec = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: "validator",
Name: "failed_proposals",
},
[]string{
// validator pubkey
"pubkey",
},
)
validatorProposeFailVecSlasher = promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "validator_proposals_rejected_total",
Help: "Count the block proposals rejected by slashing protection.",
},
[]string{
// validator pubkey
"pubkey",
},
)
)
// ProposeBlock A new beacon block for a given slot. This method collects the
// previous beacon block, any pending deposits, and ETH1 data from the beacon
// chain node to construct the new block. The new block is then processed with
@@ -78,7 +44,7 @@ func (v *validator) ProposeBlock(ctx context.Context, slot uint64, pubKey [48]by
if err != nil {
log.WithError(err).Error("Failed to sign randao reveal")
if v.emitAccountMetrics {
validatorProposeFailVec.WithLabelValues(fmtKey).Inc()
metrics.ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
@@ -92,7 +58,7 @@ func (v *validator) ProposeBlock(ctx context.Context, slot uint64, pubKey [48]by
if err != nil {
log.WithField("blockSlot", slot).WithError(err).Error("Failed to request block from beacon node")
if v.emitAccountMetrics {
validatorProposeFailVec.WithLabelValues(fmtKey).Inc()
metrics.ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
@@ -103,7 +69,7 @@ func (v *validator) ProposeBlock(ctx context.Context, slot uint64, pubKey [48]by
if err != nil {
log.WithError(err).Error("Failed to get proposal history")
if v.emitAccountMetrics {
validatorProposeFailVec.WithLabelValues(fmtKey).Inc()
metrics.ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
@@ -112,7 +78,7 @@ func (v *validator) ProposeBlock(ctx context.Context, slot uint64, pubKey [48]by
if slotBits.BitAt(slot % params.BeaconConfig().SlotsPerEpoch) {
log.WithField("epoch", epoch).Error("Tried to sign a double proposal, rejected")
if v.emitAccountMetrics {
validatorProposeFailVec.WithLabelValues(fmtKey).Inc()
metrics.ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
@@ -123,7 +89,7 @@ func (v *validator) ProposeBlock(ctx context.Context, slot uint64, pubKey [48]by
if err != nil {
log.WithError(err).Error("Failed to sign block")
if v.emitAccountMetrics {
validatorProposeFailVec.WithLabelValues(fmtKey).Inc()
metrics.ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
@@ -140,7 +106,7 @@ func (v *validator) ProposeBlock(ctx context.Context, slot uint64, pubKey [48]by
if !v.protector.VerifyBlock(ctx, bh) {
log.WithField("epoch", epoch).Error("Tried to sign a double proposal, rejected by external slasher")
if v.emitAccountMetrics {
validatorProposeFailVecSlasher.WithLabelValues(fmtKey).Inc()
metrics.ValidatorProposeFailVecSlasher.WithLabelValues(fmtKey).Inc()
}
return
}
@@ -151,7 +117,7 @@ func (v *validator) ProposeBlock(ctx context.Context, slot uint64, pubKey [48]by
if err != nil {
log.WithError(err).Error("Failed to propose block")
if v.emitAccountMetrics {
validatorProposeFailVec.WithLabelValues(fmtKey).Inc()
metrics.ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
@@ -161,14 +127,14 @@ func (v *validator) ProposeBlock(ctx context.Context, slot uint64, pubKey [48]by
if err := v.db.SaveProposalHistoryForEpoch(ctx, pubKey[:], epoch, slotBits); err != nil {
log.WithError(err).Error("Failed to save updated proposal history")
if v.emitAccountMetrics {
validatorProposeFailVec.WithLabelValues(fmtKey).Inc()
metrics.ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
}
if v.emitAccountMetrics {
validatorProposeSuccessVec.WithLabelValues(fmtKey).Inc()
metrics.ValidatorProposeSuccessVec.WithLabelValues(fmtKey).Inc()
}
span.AddAttributes(

View File

@@ -1,4 +1,4 @@
package client
package polling
import (
"context"

View File

@@ -1,4 +1,4 @@
package client
package polling
import (
"context"

View File

@@ -0,0 +1,98 @@
load("@prysm//tools/go:def.bzl", "go_library")
load("@io_bazel_rules_go//go:def.bzl", "go_test")
go_library(
name = "go_default_library",
srcs = [
"runner.go",
"service.go",
"validator.go",
"validator_aggregate.go",
"validator_attest.go",
"validator_duties.go",
"validator_log.go",
"validator_metrics.go",
"validator_propose.go",
],
importpath = "github.com/prysmaticlabs/prysm/validator/client/streaming",
visibility = ["//validator:__subpackages__"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//proto/slashing:go_default_library",
"//shared/blockutil:go_default_library",
"//shared/bls:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/grpcutils:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/params:go_default_library",
"//shared/roughtime:go_default_library",
"//shared/slotutil:go_default_library",
"//validator/client/metrics:go_default_library",
"//validator/db:go_default_library",
"//validator/keymanager:go_default_library",
"//validator/slashing-protection:go_default_library",
"@com_github_dgraph_io_ristretto//:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_gogo_protobuf//types:go_default_library",
"@com_github_grpc_ecosystem_go_grpc_middleware//:go_default_library",
"@com_github_grpc_ecosystem_go_grpc_middleware//retry:go_default_library",
"@com_github_grpc_ecosystem_go_grpc_middleware//tracing/opentracing:go_default_library",
"@com_github_grpc_ecosystem_go_grpc_prometheus//:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//plugin/ocgrpc:go_default_library",
"@io_opencensus_go//trace:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//credentials:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
],
)
go_test(
name = "go_default_test",
size = "small",
srcs = [
"fake_validator_test.go",
"runner_test.go",
"service_test.go",
"validator_aggregate_test.go",
"validator_attest_test.go",
"validator_duties_test.go",
"validator_propose_test.go",
"validator_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//proto/slashing:go_default_library",
"//shared:go_default_library",
"//shared/bls:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/keystore:go_default_library",
"//shared/mock:go_default_library",
"//shared/params:go_default_library",
"//shared/roughtime:go_default_library",
"//shared/slotutil:go_default_library",
"//shared/testutil:go_default_library",
"//validator/accounts:go_default_library",
"//validator/db:go_default_library",
"//validator/keymanager:go_default_library",
"@com_github_gogo_protobuf//types:go_default_library",
"@com_github_golang_mock//gomock:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@in_gopkg_d4l3k_messagediff_v1//:go_default_library",
],
)

View File

@@ -0,0 +1,112 @@
package streaming
import (
"context"
"time"
)
var _ = Validator(&fakeValidator{})
type fakeValidator struct {
DoneCalled bool
WaitForActivationCalled bool
WaitForChainStartCalled bool
WaitForSyncCalled bool
WaitForSyncedCalled bool
NextSlotCalled bool
StreamDutiesCalled bool
UpdateProtectionsCalled bool
RoleAtCalled bool
AttestToBlockHeadCalled bool
ProposeBlockCalled bool
LogValidatorGainsAndLossesCalled bool
SaveProtectionsCalled bool
SlotDeadlineCalled bool
ProposeBlockArg1 uint64
AttestToBlockHeadArg1 uint64
RoleAtArg1 uint64
NextSlotRet <-chan uint64
PublicKey string
StreamDutiesRet error
RolesAtRet []validatorRole
}
func (fv *fakeValidator) Done() {
fv.DoneCalled = true
}
func (fv *fakeValidator) WaitForChainStart(_ context.Context) error {
fv.WaitForChainStartCalled = true
return nil
}
func (fv *fakeValidator) WaitForActivation(_ context.Context) error {
fv.WaitForActivationCalled = true
return nil
}
func (fv *fakeValidator) WaitForSync(_ context.Context) error {
fv.WaitForSyncCalled = true
return nil
}
func (fv *fakeValidator) WaitForSynced(_ context.Context) error {
fv.WaitForSyncedCalled = true
return nil
}
func (fv *fakeValidator) SlotDeadline(_ uint64) time.Time {
fv.SlotDeadlineCalled = true
return time.Now()
}
func (fv *fakeValidator) NextSlot() <-chan uint64 {
fv.NextSlotCalled = true
return fv.NextSlotRet
}
func (fv *fakeValidator) StreamDuties(_ context.Context) error {
fv.StreamDutiesCalled = true
return fv.StreamDutiesRet
}
func (fv *fakeValidator) UpdateProtections(_ context.Context, slot uint64) error {
fv.UpdateProtectionsCalled = true
return nil
}
func (fv *fakeValidator) LogValidatorGainsAndLosses(_ context.Context, slot uint64) error {
fv.LogValidatorGainsAndLossesCalled = true
return nil
}
func (fv *fakeValidator) SaveProtections(_ context.Context) error {
fv.SaveProtectionsCalled = true
return nil
}
func (fv *fakeValidator) RolesAt(_ context.Context, slot uint64) (map[[48]byte][]validatorRole, error) {
fv.RoleAtCalled = true
fv.RoleAtArg1 = slot
vr := make(map[[48]byte][]validatorRole)
vr[[48]byte{1}] = fv.RolesAtRet
return vr, nil
}
func (fv *fakeValidator) SubmitAttestation(_ context.Context, slot uint64, pubKey [48]byte) {
fv.AttestToBlockHeadCalled = true
fv.AttestToBlockHeadArg1 = slot
}
func (fv *fakeValidator) ProposeBlock(_ context.Context, slot uint64, pubKey [48]byte) {
fv.ProposeBlockCalled = true
fv.ProposeBlockArg1 = slot
}
func (fv *fakeValidator) SubmitAggregateAndProof(_ context.Context, slot uint64, pubKey [48]byte) {}
func (fv *fakeValidator) LogAttestationsSubmitted() {}
func (fv *fakeValidator) UpdateDomainDataCaches(context.Context, uint64) {}
func (fv *fakeValidator) CurrentSlot() uint64 { return 0 }

View File

@@ -0,0 +1,153 @@
package streaming
import (
"context"
"fmt"
"sync"
"time"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"go.opencensus.io/trace"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// Validator interface defines the primary methods of a validator client.
type Validator interface {
Done()
WaitForChainStart(ctx context.Context) error
WaitForSync(ctx context.Context) error
WaitForSynced(ctx context.Context) error
WaitForActivation(ctx context.Context) error
NextSlot() <-chan uint64
CurrentSlot() uint64
SlotDeadline(slot uint64) time.Time
LogValidatorGainsAndLosses(ctx context.Context, slot uint64) error
StreamDuties(ctx context.Context) error
UpdateProtections(ctx context.Context, slot uint64) error
RolesAt(ctx context.Context, slot uint64) (map[[48]byte][]validatorRole, error) // validator pubKey -> roles
SubmitAttestation(ctx context.Context, slot uint64, pubKey [48]byte)
ProposeBlock(ctx context.Context, slot uint64, pubKey [48]byte)
SubmitAggregateAndProof(ctx context.Context, slot uint64, pubKey [48]byte)
LogAttestationsSubmitted()
SaveProtections(ctx context.Context) error
UpdateDomainDataCaches(ctx context.Context, slot uint64)
}
// Run the main validator routine. This routine exits if the context is
// canceled.
//
// Order of operations:
// 1 - Initialize validator data
// 2 - Wait for validator activation
// 3 - Listen to a server-side stream of validator duties
// 4 - Wait for the next slot start
// 5 - Determine role at current slot
// 6 - Perform assigned role, if any
func run(ctx context.Context, v Validator) {
defer v.Done()
if featureconfig.Get().WaitForSynced {
if err := v.WaitForSynced(ctx); err != nil {
log.Fatalf("Could not determine if chain started and beacon node is synced: %v", err)
}
} else {
if err := v.WaitForChainStart(ctx); err != nil {
log.Fatalf("Could not determine if beacon chain started: %v", err)
}
if err := v.WaitForSync(ctx); err != nil {
log.Fatalf("Could not determine if beacon node synced: %v", err)
}
}
if err := v.WaitForActivation(ctx); err != nil {
log.Fatalf("Could not wait for validator activation: %v", err)
}
// We listen to a server-side stream of validator duties in the
// background of the validator client.
go func() {
if err := v.StreamDuties(ctx); err != nil {
handleAssignmentError(err, v.CurrentSlot())
}
}()
for {
ctx, span := trace.StartSpan(ctx, "validator.processSlot")
select {
case <-ctx.Done():
log.Info("Context canceled, stopping validator")
return // Exit if context is canceled.
case slot := <-v.NextSlot():
span.AddAttributes(trace.Int64Attribute("slot", int64(slot)))
deadline := v.SlotDeadline(slot)
slotCtx, _ := context.WithDeadline(ctx, deadline)
// Report this validator client's rewards and penalties throughout its lifecycle.
log := log.WithField("slot", slot)
log.WithField("deadline", deadline).Debug("Set deadline for proposals and attestations")
if err := v.LogValidatorGainsAndLosses(slotCtx, slot); err != nil {
log.WithError(err).Error("Could not report validator's rewards/penalties")
}
if featureconfig.Get().ProtectAttester {
if err := v.UpdateProtections(ctx, slot); err != nil {
log.WithError(err).Error("Could not update validator protection")
}
}
// Start fetching domain data for the next epoch.
if helpers.IsEpochEnd(slot) {
go v.UpdateDomainDataCaches(ctx, slot+1)
}
var wg sync.WaitGroup
allRoles, err := v.RolesAt(ctx, slot)
if err != nil {
log.WithError(err).Error("Could not get validator roles")
continue
}
for id, roles := range allRoles {
wg.Add(len(roles))
for _, role := range roles {
go func(role validatorRole, id [48]byte) {
defer wg.Done()
switch role {
case roleAttester:
v.SubmitAttestation(slotCtx, slot, id)
case roleProposer:
v.ProposeBlock(slotCtx, slot, id)
case roleAggregator:
v.SubmitAggregateAndProof(slotCtx, slot, id)
case roleUnknown:
log.WithField("pubKey", fmt.Sprintf("%#x", bytesutil.Trunc(id[:]))).Trace("No active roles, doing nothing")
default:
log.Warnf("Unhandled role %v", role)
}
}(role, id)
}
}
// Wait for all processes to complete, then report span complete.
go func() {
wg.Wait()
v.LogAttestationsSubmitted()
if featureconfig.Get().ProtectAttester {
if err := v.SaveProtections(ctx); err != nil {
log.WithError(err).Error("Could not save validator protection")
}
}
span.End()
}()
}
}
}
func handleAssignmentError(err error, slot uint64) {
if errCode, ok := status.FromError(err); ok && errCode.Code() == codes.NotFound {
log.WithField(
"epoch", slot/params.BeaconConfig().SlotsPerEpoch,
).Warn("Validator not yet assigned to epoch")
} else {
log.WithField("error", err).Error("Failed to update assignments")
}
}

View File

@@ -0,0 +1,153 @@
package streaming
import (
"context"
"testing"
"time"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
)
func cancelledContext() context.Context {
ctx, cancel := context.WithCancel(context.Background())
cancel()
return ctx
}
func TestCancelledContext_CleansUpValidator(t *testing.T) {
v := &fakeValidator{}
run(cancelledContext(), v)
if !v.DoneCalled {
t.Error("Expected Done() to be called")
}
}
func TestCancelledContext_WaitsForChainStart(t *testing.T) {
v := &fakeValidator{}
run(cancelledContext(), v)
if !v.WaitForChainStartCalled {
t.Error("Expected WaitForChainStart() to be called")
}
}
func TestCancelledContext_WaitsForSynced(t *testing.T) {
cfg := &featureconfig.Flags{
WaitForSynced: true,
}
reset := featureconfig.InitWithReset(cfg)
defer reset()
v := &fakeValidator{}
run(cancelledContext(), v)
if !v.WaitForSyncedCalled {
t.Error("Expected WaitForSynced() to be called")
}
}
func TestCancelledContext_WaitsForActivation(t *testing.T) {
v := &fakeValidator{}
run(cancelledContext(), v)
if !v.WaitForActivationCalled {
t.Error("Expected WaitForActivation() to be called")
}
}
func TestRoleAt_NextSlot(t *testing.T) {
v := &fakeValidator{}
ctx, cancel := context.WithCancel(context.Background())
slot := uint64(55)
ticker := make(chan uint64)
v.NextSlotRet = ticker
go func() {
ticker <- slot
cancel()
}()
run(ctx, v)
if !v.RoleAtCalled {
t.Fatalf("Expected RoleAt(%d) to be called", slot)
}
if v.RoleAtArg1 != slot {
t.Errorf("RoleAt called with the wrong arg. Want=%d, got=%d", slot, v.RoleAtArg1)
}
}
func TestAttests_NextSlot(t *testing.T) {
v := &fakeValidator{}
ctx, cancel := context.WithCancel(context.Background())
slot := uint64(55)
ticker := make(chan uint64)
v.NextSlotRet = ticker
v.RolesAtRet = []validatorRole{roleAttester}
go func() {
ticker <- slot
cancel()
}()
timer := time.NewTimer(200 * time.Millisecond)
run(ctx, v)
<-timer.C
if !v.AttestToBlockHeadCalled {
t.Fatalf("SubmitAttestation(%d) was not called", slot)
}
if v.AttestToBlockHeadArg1 != slot {
t.Errorf("SubmitAttestation was called with wrong arg. Want=%d, got=%d", slot, v.AttestToBlockHeadArg1)
}
}
func TestProposes_NextSlot(t *testing.T) {
v := &fakeValidator{}
ctx, cancel := context.WithCancel(context.Background())
slot := uint64(55)
ticker := make(chan uint64)
v.NextSlotRet = ticker
v.RolesAtRet = []validatorRole{roleProposer}
go func() {
ticker <- slot
cancel()
}()
timer := time.NewTimer(200 * time.Millisecond)
run(ctx, v)
<-timer.C
if !v.ProposeBlockCalled {
t.Fatalf("ProposeBlock(%d) was not called", slot)
}
if v.ProposeBlockArg1 != slot {
t.Errorf("ProposeBlock was called with wrong arg. Want=%d, got=%d", slot, v.AttestToBlockHeadArg1)
}
}
func TestBothProposesAndAttests_NextSlot(t *testing.T) {
v := &fakeValidator{}
ctx, cancel := context.WithCancel(context.Background())
slot := uint64(55)
ticker := make(chan uint64)
v.NextSlotRet = ticker
v.RolesAtRet = []validatorRole{roleAttester, roleProposer}
go func() {
ticker <- slot
cancel()
}()
timer := time.NewTimer(200 * time.Millisecond)
run(ctx, v)
<-timer.C
if !v.AttestToBlockHeadCalled {
t.Fatalf("SubmitAttestation(%d) was not called", slot)
}
if v.AttestToBlockHeadArg1 != slot {
t.Errorf("SubmitAttestation was called with wrong arg. Want=%d, got=%d", slot, v.AttestToBlockHeadArg1)
}
if !v.ProposeBlockCalled {
t.Fatalf("ProposeBlock(%d) was not called", slot)
}
if v.ProposeBlockArg1 != slot {
t.Errorf("ProposeBlock was called with wrong arg. Want=%d, got=%d", slot, v.AttestToBlockHeadArg1)
}
}

View File

@@ -0,0 +1,249 @@
package streaming
import (
"context"
"strings"
"github.com/dgraph-io/ristretto"
middleware "github.com/grpc-ecosystem/go-grpc-middleware"
grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry"
grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
lru "github.com/hashicorp/golang-lru"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/grpcutils"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/validator/db"
"github.com/prysmaticlabs/prysm/validator/keymanager"
slashingprotection "github.com/prysmaticlabs/prysm/validator/slashing-protection"
"github.com/sirupsen/logrus"
"go.opencensus.io/plugin/ocgrpc"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/metadata"
)
var log = logrus.WithField("prefix", "validator")
// ValidatorService represents a service to manage the validator client
// routine.
type ValidatorService struct {
ctx context.Context
cancel context.CancelFunc
validator Validator
graffiti []byte
conn *grpc.ClientConn
endpoint string
withCert string
dataDir string
keyManager keymanager.KeyManager
logValidatorBalances bool
emitAccountMetrics bool
maxCallRecvMsgSize int
grpcRetries uint
grpcHeaders []string
protector slashingprotection.Protector
}
// Config for the validator service.
type Config struct {
Endpoint string
DataDir string
CertFlag string
GraffitiFlag string
KeyManager keymanager.KeyManager
LogValidatorBalances bool
EmitAccountMetrics bool
GrpcMaxCallRecvMsgSizeFlag int
GrpcRetriesFlag uint
GrpcHeadersFlag string
Protector slashingprotection.Protector
}
// NewValidatorService creates a new validator service for the service
// registry.
func NewValidatorService(ctx context.Context, cfg *Config) (*ValidatorService, error) {
ctx, cancel := context.WithCancel(ctx)
return &ValidatorService{
ctx: ctx,
cancel: cancel,
endpoint: cfg.Endpoint,
withCert: cfg.CertFlag,
dataDir: cfg.DataDir,
graffiti: []byte(cfg.GraffitiFlag),
keyManager: cfg.KeyManager,
logValidatorBalances: cfg.LogValidatorBalances,
emitAccountMetrics: cfg.EmitAccountMetrics,
maxCallRecvMsgSize: cfg.GrpcMaxCallRecvMsgSizeFlag,
grpcRetries: cfg.GrpcRetriesFlag,
grpcHeaders: strings.Split(cfg.GrpcHeadersFlag, ","),
protector: cfg.Protector,
}, nil
}
// Start the validator service. Launches the main go routine for the validator
// client.
func (v *ValidatorService) Start() {
streamInterceptor := grpc.WithStreamInterceptor(middleware.ChainStreamClient(
grpc_opentracing.StreamClientInterceptor(),
grpc_prometheus.StreamClientInterceptor,
grpc_retry.StreamClientInterceptor(),
))
dialOpts := ConstructDialOptions(
v.maxCallRecvMsgSize, v.withCert, v.grpcHeaders, v.grpcRetries, streamInterceptor)
if dialOpts == nil {
return
}
conn, err := grpc.DialContext(v.ctx, v.endpoint, dialOpts...)
if err != nil {
log.Errorf("Could not dial endpoint: %s, %v", v.endpoint, err)
return
}
log.Debug("Successfully started gRPC connection")
pubkeys, err := v.keyManager.FetchValidatingKeys()
if err != nil {
log.Errorf("Could not get validating keys: %v", err)
return
}
valDB, err := db.NewKVStore(v.dataDir, pubkeys)
if err != nil {
log.Errorf("Could not initialize db: %v", err)
return
}
v.conn = conn
cache, err := ristretto.NewCache(&ristretto.Config{
NumCounters: 1920, // number of keys to track.
MaxCost: 192, // maximum cost of cache, 1 item = 1 cost.
BufferItems: 64, // number of keys per Get buffer.
})
if err != nil {
panic(err)
}
aggregatedSlotCommitteeIDCache, err := lru.New(int(params.BeaconConfig().MaxCommitteesPerSlot))
if err != nil {
log.Errorf("Could not initialize cache: %v", err)
return
}
v.validator = &validator{
db: valDB,
dutiesByEpoch: make(map[uint64][]*ethpb.DutiesResponse_Duty, 2), // 2 epochs worth of duties.
validatorClient: ethpb.NewBeaconNodeValidatorClient(v.conn),
beaconClient: ethpb.NewBeaconChainClient(v.conn),
node: ethpb.NewNodeClient(v.conn),
keyManager: v.keyManager,
graffiti: v.graffiti,
logValidatorBalances: v.logValidatorBalances,
emitAccountMetrics: v.emitAccountMetrics,
prevBalance: make(map[[48]byte]uint64),
attLogs: make(map[[32]byte]*attSubmitted),
domainDataCache: cache,
aggregatedSlotCommitteeIDCache: aggregatedSlotCommitteeIDCache,
protector: v.protector,
}
go run(v.ctx, v.validator)
}
// Stop the validator service.
func (v *ValidatorService) Stop() error {
v.cancel()
log.Info("Stopping service")
if v.conn != nil {
return v.conn.Close()
}
return nil
}
// Status of the validator service's health.
func (v *ValidatorService) Status() error {
if v.conn == nil {
return errors.New("no connection to beacon RPC")
}
return nil
}
// signObject signs a generic object, with protection if available.
func (v *validator) signObject(pubKey [48]byte, object interface{}, domain []byte) (*bls.Signature, error) {
if protectingKeymanager, supported := v.keyManager.(keymanager.ProtectingKeyManager); supported {
root, err := ssz.HashTreeRoot(object)
if err != nil {
return nil, err
}
return protectingKeymanager.SignGeneric(pubKey, root, bytesutil.ToBytes32(domain))
}
root, err := helpers.ComputeSigningRoot(object, domain)
if err != nil {
return nil, err
}
return v.keyManager.Sign(pubKey, root)
}
// ConstructDialOptions constructs a list of grpc dial options
func ConstructDialOptions(
maxCallRecvMsgSize int,
withCert string,
grpcHeaders []string,
grpcRetries uint,
extraOpts ...grpc.DialOption,
) []grpc.DialOption {
var transportSecurity grpc.DialOption
if withCert != "" {
creds, err := credentials.NewClientTLSFromFile(withCert, "")
if err != nil {
log.Errorf("Could not get valid credentials: %v", err)
return nil
}
transportSecurity = grpc.WithTransportCredentials(creds)
} else {
transportSecurity = grpc.WithInsecure()
log.Warn("You are using an insecure gRPC connection! Please provide a certificate and key to use a secure connection.")
}
if maxCallRecvMsgSize == 0 {
maxCallRecvMsgSize = 10 * 5 << 20 // Default 50Mb
}
md := make(metadata.MD)
for _, hdr := range grpcHeaders {
if hdr != "" {
ss := strings.Split(hdr, "=")
if len(ss) != 2 {
log.Warnf("Incorrect gRPC header flag format. Skipping %v", hdr)
continue
}
md.Set(ss[0], ss[1])
}
}
dialOpts := []grpc.DialOption{
transportSecurity,
grpc.WithDefaultCallOptions(
grpc.MaxCallRecvMsgSize(maxCallRecvMsgSize),
grpc_retry.WithMax(grpcRetries),
grpc.Header(&md),
),
grpc.WithStatsHandler(&ocgrpc.ClientHandler{}),
grpc.WithUnaryInterceptor(middleware.ChainUnaryClient(
grpc_opentracing.UnaryClientInterceptor(),
grpc_prometheus.UnaryClientInterceptor,
grpc_retry.UnaryClientInterceptor(),
grpcutils.LogGRPCRequests,
)),
}
for _, opt := range extraOpts {
dialOpts = append(dialOpts, opt)
}
return dialOpts
}

View File

@@ -0,0 +1,132 @@
package streaming
import (
"context"
"os"
"strings"
"testing"
"time"
"github.com/prysmaticlabs/prysm/shared"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/keystore"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/validator/accounts"
"github.com/prysmaticlabs/prysm/validator/keymanager"
logTest "github.com/sirupsen/logrus/hooks/test"
)
var _ = shared.Service(&ValidatorService{})
var validatorKey *keystore.Key
var validatorPubKey [48]byte
var keyMap map[[48]byte]*keystore.Key
var keyMapThreeValidators map[[48]byte]*keystore.Key
var testKeyManager keymanager.KeyManager
var testKeyManagerThreeValidators keymanager.KeyManager
func keySetup() {
keyMap = make(map[[48]byte]*keystore.Key)
keyMapThreeValidators = make(map[[48]byte]*keystore.Key)
var err error
validatorKey, err = keystore.NewKey()
if err != nil {
log.WithError(err).Debug("Cannot create key")
}
copy(validatorPubKey[:], validatorKey.PublicKey.Marshal())
keyMap[validatorPubKey] = validatorKey
sks := make([]*bls.SecretKey, 1)
sks[0] = validatorKey.SecretKey
testKeyManager = keymanager.NewDirect(sks)
sks = make([]*bls.SecretKey, 3)
for i := 0; i < 3; i++ {
vKey, err := keystore.NewKey()
if err != nil {
log.WithError(err).Debug("Cannot create key")
}
var pubKey [48]byte
copy(pubKey[:], vKey.PublicKey.Marshal())
keyMapThreeValidators[pubKey] = vKey
sks[i] = vKey.SecretKey
}
testKeyManagerThreeValidators = keymanager.NewDirect(sks)
}
func TestMain(m *testing.M) {
dir := testutil.TempDir() + "/keystore1"
defer func() {
if err := os.RemoveAll(dir); err != nil {
log.WithError(err).Debug("Cannot remove keystore folder")
}
}()
if err := accounts.NewValidatorAccount(dir, "1234"); err != nil {
log.WithError(err).Debug("Cannot create validator account")
}
keySetup()
os.Exit(m.Run())
}
func TestStop_CancelsContext(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
vs := &ValidatorService{
ctx: ctx,
cancel: cancel,
}
if err := vs.Stop(); err != nil {
t.Error(err)
}
select {
case <-time.After(1 * time.Second):
t.Error("Context not canceled within 1s")
case <-vs.ctx.Done():
}
}
func TestLifecycle(t *testing.T) {
hook := logTest.NewGlobal()
// Use canceled context so that the run function exits immediately..
ctx, cancel := context.WithCancel(context.Background())
cancel()
validatorService := &ValidatorService{
ctx: ctx,
cancel: cancel,
endpoint: "merkle tries",
withCert: "alice.crt",
keyManager: keymanager.NewDirect(nil),
}
validatorService.Start()
if err := validatorService.Stop(); err != nil {
t.Fatalf("Could not stop service: %v", err)
}
testutil.AssertLogsContain(t, hook, "Stopping service")
}
func TestLifecycle_Insecure(t *testing.T) {
hook := logTest.NewGlobal()
// Use canceled context so that the run function exits immediately.
ctx, cancel := context.WithCancel(context.Background())
cancel()
validatorService := &ValidatorService{
ctx: ctx,
cancel: cancel,
endpoint: "merkle tries",
keyManager: keymanager.NewDirect(nil),
}
validatorService.Start()
testutil.AssertLogsContain(t, hook, "You are using an insecure gRPC connection")
if err := validatorService.Stop(); err != nil {
t.Fatalf("Could not stop service: %v", err)
}
testutil.AssertLogsContain(t, hook, "Stopping service")
}
func TestStatus_NoConnectionError(t *testing.T) {
validatorService := &ValidatorService{}
if err := validatorService.Status(); !strings.Contains(err.Error(), "no connection") {
t.Errorf("Expected status check to fail if no connection is found, received: %v", err)
}
}

View File

@@ -0,0 +1,462 @@
// Package streaming represents a gRPC stream-based implementation
// of an eth2 validator client.
package streaming
import (
"context"
"encoding/binary"
"encoding/hex"
"fmt"
"io"
"strconv"
"strings"
"sync"
"time"
"github.com/dgraph-io/ristretto"
"github.com/gogo/protobuf/proto"
ptypes "github.com/gogo/protobuf/types"
lru "github.com/hashicorp/golang-lru"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
slashpb "github.com/prysmaticlabs/prysm/proto/slashing"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/roughtime"
"github.com/prysmaticlabs/prysm/shared/slotutil"
"github.com/prysmaticlabs/prysm/validator/client/metrics"
"github.com/prysmaticlabs/prysm/validator/db"
"github.com/prysmaticlabs/prysm/validator/keymanager"
slashingprotection "github.com/prysmaticlabs/prysm/validator/slashing-protection"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
type validatorRole int8
const (
roleUnknown = iota
roleAttester
roleProposer
roleAggregator
)
type validator struct {
genesisTime uint64
ticker *slotutil.SlotTicker
db *db.Store
dutiesLock sync.RWMutex
dutiesByEpoch map[uint64][]*ethpb.DutiesResponse_Duty
validatorClient ethpb.BeaconNodeValidatorClient
beaconClient ethpb.BeaconChainClient
graffiti []byte
node ethpb.NodeClient
keyManager keymanager.KeyManager
prevBalance map[[48]byte]uint64
logValidatorBalances bool
emitAccountMetrics bool
attLogs map[[32]byte]*attSubmitted
attLogsLock sync.Mutex
domainDataLock sync.Mutex
domainDataCache *ristretto.Cache
aggregatedSlotCommitteeIDCache *lru.Cache
aggregatedSlotCommitteeIDCacheLock sync.Mutex
attesterHistoryByPubKey map[[48]byte]*slashpb.AttestationHistory
attesterHistoryByPubKeyLock sync.RWMutex
protector slashingprotection.Protector
}
// Done cleans up the validator.
func (v *validator) Done() {
v.ticker.Done()
}
// WaitForChainStart checks whether the beacon node has started its runtime. That is,
// it calls to the beacon node which then verifies the ETH1.0 deposit contract logs to check
// for the ChainStart log to have been emitted. If so, it starts a ticker based on the ChainStart
// unix timestamp which will be used to keep track of time within the validator client.
func (v *validator) WaitForChainStart(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "validator.WaitForChainStart")
defer span.End()
// First, check if the beacon chain has started.
stream, err := v.validatorClient.WaitForChainStart(ctx, &ptypes.Empty{})
if err != nil {
return errors.Wrap(err, "could not setup beacon chain ChainStart streaming client")
}
for {
log.Info("Waiting for beacon chain start log from the ETH 1.0 deposit contract")
chainStartRes, err := stream.Recv()
// If the stream is closed, we stop the loop.
if err == io.EOF {
break
}
// If context is canceled we stop the loop.
if ctx.Err() == context.Canceled {
return errors.Wrap(ctx.Err(), "context has been canceled so shutting down the loop")
}
if err != nil {
return errors.Wrap(err, "could not receive ChainStart from stream")
}
v.genesisTime = chainStartRes.GenesisTime
break
}
// Once the ChainStart log is received, we update the genesis time of the validator client
// and begin a slot ticker used to track the current slot the beacon node is in.
v.ticker = slotutil.GetSlotTicker(time.Unix(int64(v.genesisTime), 0), params.BeaconConfig().SecondsPerSlot)
log.WithField("genesisTime", time.Unix(int64(v.genesisTime), 0)).Info("Beacon chain genesis")
return nil
}
// WaitForSync checks whether the beacon node has sync to the latest head.
func (v *validator) WaitForSync(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "validator.WaitForSync")
defer span.End()
s, err := v.node.GetSyncStatus(ctx, &ptypes.Empty{})
if err != nil {
return errors.Wrap(err, "could not get sync status")
}
if !s.Syncing {
return nil
}
for {
select {
// Poll every half slot.
case <-time.After(slotutil.DivideSlotBy(2 /* twice per slot */)):
s, err := v.node.GetSyncStatus(ctx, &ptypes.Empty{})
if err != nil {
return errors.Wrap(err, "could not get sync status")
}
if !s.Syncing {
return nil
}
log.Info("Waiting for beacon node to sync to latest chain head")
case <-ctx.Done():
return errors.New("context has been canceled, exiting goroutine")
}
}
}
// WaitForSynced opens a stream with the beacon chain node so it can be informed of when the beacon node is
// fully synced and ready to communicate with the validator.
func (v *validator) WaitForSynced(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "validator.WaitForSynced")
defer span.End()
// First, check if the beacon chain has started.
stream, err := v.validatorClient.WaitForSynced(ctx, &ptypes.Empty{})
if err != nil {
return errors.Wrap(err, "could not setup beacon chain Synced streaming client")
}
for {
log.Info("Waiting for chainstart to occur and the beacon node to be fully synced")
syncedRes, err := stream.Recv()
// If the stream is closed, we stop the loop.
if err == io.EOF {
break
}
// If context is canceled we stop the loop.
if ctx.Err() == context.Canceled {
return errors.Wrap(ctx.Err(), "context has been canceled so shutting down the loop")
}
if err != nil {
return errors.Wrap(err, "could not receive Synced from stream")
}
v.genesisTime = syncedRes.GenesisTime
break
}
// Once the Synced log is received, we update the genesis time of the validator client
// and begin a slot ticker used to track the current slot the beacon node is in.
v.ticker = slotutil.GetSlotTicker(time.Unix(int64(v.genesisTime), 0), params.BeaconConfig().SecondsPerSlot)
log.WithField("genesisTime", time.Unix(int64(v.genesisTime), 0)).Info("Chain has started and the beacon node is synced")
return nil
}
// WaitForActivation checks whether the validator pubkey is in the active
// validator set. If not, this operation will block until an activation message is
// received.
func (v *validator) WaitForActivation(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "validator.WaitForActivation")
defer span.End()
validatingKeys, err := v.keyManager.FetchValidatingKeys()
if err != nil {
return errors.Wrap(err, "could not fetch validating keys")
}
req := &ethpb.ValidatorActivationRequest{
PublicKeys: bytesutil.FromBytes48Array(validatingKeys),
}
stream, err := v.validatorClient.WaitForActivation(ctx, req)
if err != nil {
return errors.Wrap(err, "could not setup validator WaitForActivation streaming client")
}
for {
res, err := stream.Recv()
// If the stream is closed, we stop the loop.
if err == io.EOF {
break
}
// If context is canceled we stop the loop.
if ctx.Err() == context.Canceled {
return errors.Wrap(ctx.Err(), "context has been canceled so shutting down the loop")
}
if err != nil {
return errors.Wrap(err, "could not receive validator activation from stream")
}
valActivated := v.checkAndLogValidatorStatus(res.Statuses)
if valActivated {
for _, statusResp := range res.Statuses {
if statusResp.Status.Status != ethpb.ValidatorStatus_ACTIVE {
continue
}
log.WithFields(logrus.Fields{
"publicKey": fmt.Sprintf("%#x", bytesutil.Trunc(statusResp.PublicKey)),
"index": statusResp.Index,
}).Info("Validator activated")
}
break
}
}
v.ticker = slotutil.GetSlotTicker(time.Unix(int64(v.genesisTime), 0), params.BeaconConfig().SecondsPerSlot)
return nil
}
func (v *validator) checkAndLogValidatorStatus(validatorStatuses []*ethpb.ValidatorActivationResponse_Status) bool {
nonexistentIndex := ^uint64(0)
var validatorActivated bool
for _, status := range validatorStatuses {
fields := logrus.Fields{
"pubKey": fmt.Sprintf("%#x", bytesutil.Trunc(status.PublicKey[:])),
"status": status.Status.Status.String(),
}
if status.Index != nonexistentIndex {
fields["index"] = status.Index
}
log := log.WithFields(fields)
if v.emitAccountMetrics {
fmtKey := fmt.Sprintf("%#x", status.PublicKey)
metrics.ValidatorStatusesGaugeVec.WithLabelValues(fmtKey).Set(float64(status.Status.Status))
}
switch status.Status.Status {
case ethpb.ValidatorStatus_UNKNOWN_STATUS:
log.Info("Waiting for deposit to be observed by beacon node")
case ethpb.ValidatorStatus_DEPOSITED:
if status.Status.DepositInclusionSlot != 0 {
log.WithFields(logrus.Fields{
"expectedInclusionSlot": status.Status.DepositInclusionSlot,
"eth1DepositBlockNumber": status.Status.Eth1DepositBlockNumber,
}).Info("Deposit for validator received but not processed into the beacon state")
} else {
log.WithField(
"positionInActivationQueue", status.Status.PositionInActivationQueue,
).Info("Deposit processed, entering activation queue after finalization")
}
case ethpb.ValidatorStatus_PENDING:
if status.Status.ActivationEpoch == params.BeaconConfig().FarFutureEpoch {
log.WithFields(logrus.Fields{
"positionInActivationQueue": status.Status.PositionInActivationQueue,
}).Info("Waiting to be assigned activation epoch")
} else {
log.WithFields(logrus.Fields{
"activationEpoch": status.Status.ActivationEpoch,
}).Info("Waiting for activation")
}
case ethpb.ValidatorStatus_ACTIVE:
validatorActivated = true
case ethpb.ValidatorStatus_EXITED:
log.Info("Validator exited")
default:
log.WithFields(logrus.Fields{
"activationEpoch": status.Status.ActivationEpoch,
}).Info("Validator status")
}
}
return validatorActivated
}
// NextSlot emits the next slot number at the start time of that slot.
func (v *validator) NextSlot() <-chan uint64 {
return v.ticker.C()
}
// SlotDeadline is the start time of the next slot.
func (v *validator) SlotDeadline(slot uint64) time.Time {
secs := (slot + 1) * params.BeaconConfig().SecondsPerSlot
return time.Unix(int64(v.genesisTime), 0 /*ns*/).Add(time.Duration(secs) * time.Second)
}
// UpdateProtections goes through the duties of the given slot and fetches the required validator history,
// assigning it in validator.
func (v *validator) UpdateProtections(ctx context.Context, slot uint64) error {
epoch := slot / params.BeaconConfig().SlotsPerEpoch
v.dutiesLock.RLock()
duty, ok := v.dutiesByEpoch[epoch]
if !ok {
v.dutiesLock.RUnlock()
log.Debugf("No assigned duties yet for epoch %d", epoch)
return nil
}
v.dutiesLock.RUnlock()
attestingPubKeys := make([][48]byte, 0, len(duty))
for _, dt := range duty {
if dt == nil {
continue
}
if dt.AttesterSlot == slot {
attestingPubKeys = append(attestingPubKeys, bytesutil.ToBytes48(dt.PublicKey))
}
}
attHistoryByPubKey, err := v.db.AttestationHistoryForPubKeys(ctx, attestingPubKeys)
if err != nil {
return errors.Wrap(err, "could not get attester history")
}
v.attesterHistoryByPubKey = attHistoryByPubKey
return nil
}
// SaveProtections saves the attestation information currently in validator state.
func (v *validator) SaveProtections(ctx context.Context) error {
if err := v.db.SaveAttestationHistoryForPubKeys(ctx, v.attesterHistoryByPubKey); err != nil {
return errors.Wrap(err, "could not save attester history to DB")
}
v.attesterHistoryByPubKey = make(map[[48]byte]*slashpb.AttestationHistory)
return nil
}
// isAggregator checks if a validator is an aggregator of a given slot, it uses the selection algorithm outlined in:
// https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/validator/0_beacon-chain-validator.md#aggregation-selection
func (v *validator) isAggregator(ctx context.Context, committee []uint64, slot uint64, pubKey [48]byte) (bool, error) {
modulo := uint64(1)
if len(committee)/int(params.BeaconConfig().TargetAggregatorsPerCommittee) > 1 {
modulo = uint64(len(committee)) / params.BeaconConfig().TargetAggregatorsPerCommittee
}
slotSig, err := v.signSlot(ctx, pubKey, slot)
if err != nil {
return false, err
}
b := hashutil.Hash(slotSig)
return binary.LittleEndian.Uint64(b[:8])%modulo == 0, nil
}
// UpdateDomainDataCaches by making calls for all of the possible domain data. These can change when
// the fork version changes which can happen once per epoch. Although changing for the fork version
// is very rare, a validator should check these data every epoch to be sure the validator is
// participating on the correct fork version.
func (v *validator) UpdateDomainDataCaches(ctx context.Context, slot uint64) {
if !featureconfig.Get().EnableDomainDataCache {
return
}
for _, d := range [][]byte{
params.BeaconConfig().DomainRandao[:],
params.BeaconConfig().DomainBeaconAttester[:],
params.BeaconConfig().DomainBeaconProposer[:],
params.BeaconConfig().DomainSelectionProof[:],
params.BeaconConfig().DomainAggregateAndProof[:],
} {
_, err := v.domainData(ctx, helpers.SlotToEpoch(slot), d)
if err != nil {
log.WithError(err).Errorf("Failed to update domain data for domain %v", d)
}
}
}
// CurrentSlot based on the chain genesis time.
func (v *validator) CurrentSlot() uint64 {
var currentSlot uint64
genesisTime := time.Unix(int64(v.genesisTime), 0)
if genesisTime.Before(roughtime.Now()) {
currentSlot = slotutil.SlotsSinceGenesis(genesisTime)
}
return currentSlot
}
func (v *validator) domainData(ctx context.Context, epoch uint64, domain []byte) (*ethpb.DomainResponse, error) {
v.domainDataLock.Lock()
defer v.domainDataLock.Unlock()
req := &ethpb.DomainRequest{
Epoch: epoch,
Domain: domain,
}
key := strings.Join([]string{strconv.FormatUint(req.Epoch, 10), hex.EncodeToString(req.Domain)}, ",")
if featureconfig.Get().EnableDomainDataCache {
if val, ok := v.domainDataCache.Get(key); ok {
return proto.Clone(val.(proto.Message)).(*ethpb.DomainResponse), nil
}
}
res, err := v.validatorClient.DomainData(ctx, req)
if err != nil {
return nil, err
}
if featureconfig.Get().EnableDomainDataCache {
v.domainDataCache.Set(key, proto.Clone(res), 1)
}
return res, nil
}
func (v *validator) logDuties(slot uint64, duties []*ethpb.DutiesResponse_Duty) {
attesterKeys := make([][]string, params.BeaconConfig().SlotsPerEpoch)
for i := range attesterKeys {
attesterKeys[i] = make([]string, 0)
}
proposerKeys := make([]string, params.BeaconConfig().SlotsPerEpoch)
slotOffset := helpers.StartSlot(helpers.SlotToEpoch(slot))
for _, duty := range duties {
if v.emitAccountMetrics {
fmtKey := fmt.Sprintf("%#x", duty.PublicKey)
metrics.ValidatorStatusesGaugeVec.WithLabelValues(fmtKey).Set(float64(duty.Status))
}
// Only interested in validators who are attesting/proposing.
// Note that SLASHING validators will have duties but their results are ignored by the network so we don't bother with them.
if duty.Status != ethpb.ValidatorStatus_ACTIVE && duty.Status != ethpb.ValidatorStatus_EXITING {
continue
}
validatorKey := fmt.Sprintf("%#x", bytesutil.Trunc(duty.PublicKey))
attesterIndex := duty.AttesterSlot - slotOffset
if attesterIndex >= params.BeaconConfig().SlotsPerEpoch {
log.WithField("duty", duty).Warn("Invalid attester slot")
} else {
attesterKeys[duty.AttesterSlot-slotOffset] = append(attesterKeys[duty.AttesterSlot-slotOffset], validatorKey)
}
for _, proposerSlot := range duty.ProposerSlots {
proposerIndex := proposerSlot - slotOffset
if proposerIndex >= params.BeaconConfig().SlotsPerEpoch {
log.WithField("duty", duty).Warn("Invalid proposer slot")
} else {
proposerKeys[proposerIndex] = validatorKey
}
}
}
for i := uint64(0); i < params.BeaconConfig().SlotsPerEpoch; i++ {
if len(attesterKeys[i]) > 0 {
log.WithField("slot", slotOffset+i).WithField("attesters", len(attesterKeys[i])).WithField("pubKeys", attesterKeys[i]).Info("Attestation schedule")
}
if proposerKeys[i] != "" {
log.WithField("slot", slotOffset+i).WithField("pubKey", proposerKeys[i]).Info("Proposal schedule")
}
}
}
// This constructs a validator subscribed key, it's used to track
// which subnet has already been pending requested.
func validatorSubscribeKey(slot uint64, committeeID uint64) [64]byte {
return bytesutil.ToBytes64(append(bytesutil.Bytes32(slot), bytesutil.Bytes32(committeeID)...))
}

View File

@@ -0,0 +1,166 @@
package streaming
import (
"context"
"fmt"
"time"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"go.opencensus.io/trace"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/roughtime"
"github.com/prysmaticlabs/prysm/shared/slotutil"
"github.com/prysmaticlabs/prysm/validator/client/metrics"
)
// SubmitAggregateAndProof submits the validator's signed slot signature to the beacon node
// via gRPC. Beacon node will verify the slot signature and determine if the validator is also
// an aggregator. If yes, then beacon node will broadcast aggregated signature and
// proof on the validator's behalf.
func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot uint64, pubKey [48]byte) {
ctx, span := trace.StartSpan(ctx, "validator.SubmitAggregateAndProof")
defer span.End()
span.AddAttributes(trace.StringAttribute("validator", fmt.Sprintf("%#x", pubKey)))
fmtKey := fmt.Sprintf("%#x", pubKey[:])
epoch := slot / params.BeaconConfig().SlotsPerEpoch
duty, err := v.duty(pubKey, epoch)
if err != nil {
log.Errorf("Could not fetch validator assignment: %v", err)
if v.emitAccountMetrics {
metrics.ValidatorAggFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
// Avoid sending beacon node duplicated aggregation requests.
k := validatorSubscribeKey(slot, duty.CommitteeIndex)
v.aggregatedSlotCommitteeIDCacheLock.Lock()
defer v.aggregatedSlotCommitteeIDCacheLock.Unlock()
if v.aggregatedSlotCommitteeIDCache.Contains(k) {
return
}
v.aggregatedSlotCommitteeIDCache.Add(k, true)
slotSig, err := v.signSlot(ctx, pubKey, slot)
if err != nil {
log.Errorf("Could not sign slot: %v", err)
if v.emitAccountMetrics {
metrics.ValidatorAggFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
// As specified in spec, an aggregator should wait until two thirds of the way through slot
// to broadcast the best aggregate to the global aggregate channel.
// https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/validator/0_beacon-chain-validator.md#broadcast-aggregate
v.waitToSlotTwoThirds(ctx, slot)
res, err := v.validatorClient.SubmitAggregateSelectionProof(ctx, &ethpb.AggregateSelectionRequest{
Slot: slot,
CommitteeIndex: duty.CommitteeIndex,
PublicKey: pubKey[:],
SlotSignature: slotSig,
})
if err != nil {
log.WithField("slot", slot).Errorf("Could not submit slot signature to beacon node: %v", err)
if v.emitAccountMetrics {
metrics.ValidatorAggFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
sig, err := v.aggregateAndProofSig(ctx, pubKey, res.AggregateAndProof)
if err != nil {
log.Errorf("Could not sign aggregate and proof: %v", err)
}
_, err = v.validatorClient.SubmitSignedAggregateSelectionProof(ctx, &ethpb.SignedAggregateSubmitRequest{
SignedAggregateAndProof: &ethpb.SignedAggregateAttestationAndProof{
Message: res.AggregateAndProof,
Signature: sig,
},
})
if err != nil {
log.Errorf("Could not submit signed aggregate and proof to beacon node: %v", err)
if v.emitAccountMetrics {
metrics.ValidatorAggFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
if err := v.addIndicesToLog(duty); err != nil {
log.Errorf("Could not add aggregator indices to logs: %v", err)
if v.emitAccountMetrics {
metrics.ValidatorAggFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
if v.emitAccountMetrics {
metrics.ValidatorAggSuccessVec.WithLabelValues(fmtKey).Inc()
}
}
// This implements selection logic outlined in:
// https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/validator/0_beacon-chain-validator.md#aggregation-selection
func (v *validator) signSlot(ctx context.Context, pubKey [48]byte, slot uint64) ([]byte, error) {
domain, err := v.domainData(ctx, helpers.SlotToEpoch(slot), params.BeaconConfig().DomainSelectionProof[:])
if err != nil {
return nil, err
}
sig, err := v.signObject(pubKey, slot, domain.SignatureDomain)
if err != nil {
return nil, errors.Wrap(err, "Failed to sign slot")
}
return sig.Marshal(), nil
}
// waitToSlotTwoThirds waits until two third through the current slot period
// such that any attestations from this slot have time to reach the beacon node
// before creating the aggregated attestation.
func (v *validator) waitToSlotTwoThirds(ctx context.Context, slot uint64) {
_, span := trace.StartSpan(ctx, "validator.waitToSlotTwoThirds")
defer span.End()
oneThird := slotutil.DivideSlotBy(3 /* one third of slot duration */)
twoThird := oneThird + oneThird
delay := twoThird
startTime := slotutil.SlotStartTime(v.genesisTime, slot)
finalTime := startTime.Add(delay)
time.Sleep(roughtime.Until(finalTime))
}
// This returns the signature of validator signing over aggregate and
// proof object.
func (v *validator) aggregateAndProofSig(ctx context.Context, pubKey [48]byte, agg *ethpb.AggregateAttestationAndProof) ([]byte, error) {
d, err := v.domainData(ctx, helpers.SlotToEpoch(agg.Aggregate.Data.Slot), params.BeaconConfig().DomainAggregateAndProof[:])
if err != nil {
return nil, err
}
sig, err := v.signObject(pubKey, agg, d.SignatureDomain)
if err != nil {
return nil, err
}
return sig.Marshal(), nil
}
func (v *validator) addIndicesToLog(duty *ethpb.DutiesResponse_Duty) error {
v.attLogsLock.Lock()
defer v.attLogsLock.Unlock()
for _, log := range v.attLogs {
if duty.CommitteeIndex == log.data.CommitteeIndex {
log.aggregatorIndices = append(log.aggregatorIndices, duty.ValidatorIndex)
}
}
return nil
}

View File

@@ -0,0 +1,107 @@
package streaming
import (
"context"
"testing"
"github.com/golang/mock/gomock"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
logTest "github.com/sirupsen/logrus/hooks/test"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/roughtime"
"github.com/prysmaticlabs/prysm/shared/slotutil"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
func TestSubmitAggregateAndProof_GetDutiesRequestFailure(t *testing.T) {
hook := logTest.NewGlobal()
validator, _, finish := setup(t)
validator.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty)
validator.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{}
defer finish()
validator.SubmitAggregateAndProof(context.Background(), 0, validatorPubKey)
testutil.AssertLogsContain(t, hook, "Could not fetch validator assignment")
}
func TestSubmitAggregateAndProof_Ok(t *testing.T) {
validator, m, finish := setup(t)
defer finish()
validator.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty)
validator.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{
{
PublicKey: validatorKey.PublicKey.Marshal(),
},
}
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), // epoch
).Return(&ethpb.DomainResponse{}, nil /*err*/)
m.validatorClient.EXPECT().SubmitAggregateSelectionProof(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.AggregateSelectionRequest{}),
).Return(&ethpb.AggregateSelectionResponse{
AggregateAndProof: &ethpb.AggregateAttestationAndProof{
AggregatorIndex: 0,
Aggregate: &ethpb.Attestation{Data: &ethpb.AttestationData{}},
SelectionProof: nil,
},
}, nil)
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), // epoch
).Return(&ethpb.DomainResponse{}, nil /*err*/)
m.validatorClient.EXPECT().SubmitSignedAggregateSelectionProof(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.SignedAggregateSubmitRequest{}),
).Return(&ethpb.SignedAggregateSubmitResponse{}, nil)
validator.SubmitAggregateAndProof(context.Background(), 0, validatorPubKey)
}
func TestWaitForSlotTwoThird_WaitCorrectly(t *testing.T) {
validator, _, finish := setup(t)
defer finish()
currentTime := roughtime.Now()
numOfSlots := uint64(4)
validator.genesisTime = uint64(currentTime.Unix()) - (numOfSlots * params.BeaconConfig().SecondsPerSlot)
oneThird := slotutil.DivideSlotBy(3 /* one third of slot duration */)
timeToSleep := oneThird + oneThird
twoThirdTime := currentTime.Add(timeToSleep)
validator.waitToSlotTwoThirds(context.Background(), numOfSlots)
currentTime = roughtime.Now()
if currentTime.Unix() != twoThirdTime.Unix() {
t.Errorf("Wanted %v time for slot two third but got %v", twoThirdTime, currentTime)
}
}
func TestAggregateAndProofSignature_CanSignValidSignature(t *testing.T) {
validator, m, finish := setup(t)
defer finish()
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
&ethpb.DomainRequest{Epoch: 0, Domain: params.BeaconConfig().DomainAggregateAndProof[:]},
).Return(&ethpb.DomainResponse{}, nil /*err*/)
agg := &ethpb.AggregateAttestationAndProof{
AggregatorIndex: 0,
Aggregate: &ethpb.Attestation{Data: &ethpb.AttestationData{}},
SelectionProof: nil,
}
sig, err := validator.aggregateAndProofSig(context.Background(), validatorPubKey, agg)
if err != nil {
t.Fatal(err)
}
if _, err := bls.SignatureFromBytes(sig); err != nil {
t.Fatal(err)
}
}

View File

@@ -0,0 +1,290 @@
package streaming
import (
"context"
"fmt"
"time"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
slashpb "github.com/prysmaticlabs/prysm/proto/slashing"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/roughtime"
"github.com/prysmaticlabs/prysm/shared/slotutil"
"github.com/prysmaticlabs/prysm/validator/client/metrics"
"github.com/prysmaticlabs/prysm/validator/keymanager"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// SubmitAttestation completes the validator client's attester responsibility at a given slot.
// It fetches the latest beacon block head along with the latest canonical beacon state
// information in order to sign the block and include information about the validator's
// participation in voting on the block.
func (v *validator) SubmitAttestation(ctx context.Context, slot uint64, pubKey [48]byte) {
ctx, span := trace.StartSpan(ctx, "validator.SubmitAttestation")
defer span.End()
span.AddAttributes(trace.StringAttribute("validator", fmt.Sprintf("%#x", pubKey)))
fmtKey := fmt.Sprintf("%#x", pubKey[:])
log := log.WithField("pubKey", fmt.Sprintf("%#x", bytesutil.Trunc(pubKey[:]))).WithField("slot", slot)
epoch := slot / params.BeaconConfig().SlotsPerEpoch
duty, err := v.duty(pubKey, epoch)
if err != nil {
log.WithError(err).Error("Could not fetch validator assignment")
if v.emitAccountMetrics {
metrics.ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
if len(duty.Committee) == 0 {
log.Debug("Empty committee for validator duty, not attesting")
return
}
v.attesterHistoryByPubKeyLock.RLock()
attesterHistory := v.attesterHistoryByPubKey[pubKey]
v.attesterHistoryByPubKeyLock.RUnlock()
v.waitToSlotOneThird(ctx, slot)
req := &ethpb.AttestationDataRequest{
Slot: slot,
CommitteeIndex: duty.CommitteeIndex,
}
data, err := v.validatorClient.GetAttestationData(ctx, req)
if err != nil {
log.WithError(err).Error("Could not request attestation to sign at slot")
if v.emitAccountMetrics {
metrics.ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
if featureconfig.Get().ProtectAttester {
if isNewAttSlashable(attesterHistory, data.Source.Epoch, data.Target.Epoch) {
log.WithFields(logrus.Fields{
"sourceEpoch": data.Source.Epoch,
"targetEpoch": data.Target.Epoch,
}).Error("Attempted to make a slashable attestation, rejected")
if v.emitAccountMetrics {
metrics.ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
}
sig, err := v.signAtt(ctx, pubKey, data)
if err != nil {
log.WithError(err).Error("Could not sign attestation")
if v.emitAccountMetrics {
metrics.ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
var indexInCommittee uint64
var found bool
for i, vID := range duty.Committee {
if vID == duty.ValidatorIndex {
indexInCommittee = uint64(i)
found = true
break
}
}
if !found {
log.Errorf("Validator ID %d not found in committee of %v", duty.ValidatorIndex, duty.Committee)
if v.emitAccountMetrics {
metrics.ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
aggregationBitfield := bitfield.NewBitlist(uint64(len(duty.Committee)))
aggregationBitfield.SetBitAt(indexInCommittee, true)
attestation := &ethpb.Attestation{
Data: data,
AggregationBits: aggregationBitfield,
Signature: sig,
}
if featureconfig.Get().SlasherProtection && v.protector != nil {
indexedAtt := &ethpb.IndexedAttestation{
AttestingIndices: []uint64{duty.ValidatorIndex},
Data: data,
Signature: sig,
}
if !v.protector.VerifyAttestation(ctx, indexedAtt) {
log.WithFields(logrus.Fields{
"sourceEpoch": data.Source.Epoch,
"targetEpoch": data.Target.Epoch,
}).Error("Attempted to make a slashable attestation, rejected by external slasher service")
if v.emitAccountMetrics {
metrics.ValidatorAttestFailVecSlasher.WithLabelValues(fmtKey).Inc()
}
return
}
}
attResp, err := v.validatorClient.ProposeAttestation(ctx, attestation)
if err != nil {
log.WithError(err).Error("Could not submit attestation to beacon node")
if v.emitAccountMetrics {
metrics.ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
if err := v.saveAttesterIndexToData(data, duty.ValidatorIndex); err != nil {
log.WithError(err).Error("Could not save validator index for logging")
if v.emitAccountMetrics {
metrics.ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
if featureconfig.Get().ProtectAttester {
attesterHistory = markAttestationForTargetEpoch(attesterHistory, data.Source.Epoch, data.Target.Epoch)
v.attesterHistoryByPubKeyLock.Lock()
v.attesterHistoryByPubKey[pubKey] = attesterHistory
v.attesterHistoryByPubKeyLock.Unlock()
}
if v.emitAccountMetrics {
metrics.ValidatorAttestSuccessVec.WithLabelValues(fmtKey).Inc()
}
span.AddAttributes(
trace.Int64Attribute("slot", int64(slot)),
trace.StringAttribute("attestationHash", fmt.Sprintf("%#x", attResp.AttestationDataRoot)),
trace.Int64Attribute("committeeIndex", int64(data.CommitteeIndex)),
trace.StringAttribute("blockRoot", fmt.Sprintf("%#x", data.BeaconBlockRoot)),
trace.Int64Attribute("justifiedEpoch", int64(data.Source.Epoch)),
trace.Int64Attribute("targetEpoch", int64(data.Target.Epoch)),
trace.StringAttribute("bitfield", fmt.Sprintf("%#x", aggregationBitfield)),
)
}
// Given validator's public key, this returns the signature of an attestation data.
func (v *validator) signAtt(ctx context.Context, pubKey [48]byte, data *ethpb.AttestationData) ([]byte, error) {
domain, err := v.domainData(ctx, data.Target.Epoch, params.BeaconConfig().DomainBeaconAttester[:])
if err != nil {
return nil, err
}
root, err := helpers.ComputeSigningRoot(data, domain.SignatureDomain)
if err != nil {
return nil, err
}
var sig *bls.Signature
if protectingKeymanager, supported := v.keyManager.(keymanager.ProtectingKeyManager); supported {
sig, err = protectingKeymanager.SignAttestation(pubKey, bytesutil.ToBytes32(domain.SignatureDomain), data)
} else {
sig, err = v.keyManager.Sign(pubKey, root)
}
if err != nil {
return nil, err
}
return sig.Marshal(), nil
}
// For logging, this saves the last submitted attester index to its attestation data. The purpose of this
// is to enhance attesting logs to be readable when multiple validator keys ran in a single client.
func (v *validator) saveAttesterIndexToData(data *ethpb.AttestationData, index uint64) error {
v.attLogsLock.Lock()
defer v.attLogsLock.Unlock()
h, err := hashutil.HashProto(data)
if err != nil {
return err
}
if v.attLogs[h] == nil {
v.attLogs[h] = &attSubmitted{data, []uint64{}, []uint64{}}
}
v.attLogs[h] = &attSubmitted{data, append(v.attLogs[h].attesterIndices, index), []uint64{}}
return nil
}
// isNewAttSlashable uses the attestation history to determine if an attestation of sourceEpoch
// and targetEpoch would be slashable. It can detect double, surrounding, and surrounded votes.
func isNewAttSlashable(history *slashpb.AttestationHistory, sourceEpoch uint64, targetEpoch uint64) bool {
farFuture := params.BeaconConfig().FarFutureEpoch
wsPeriod := params.BeaconConfig().WeakSubjectivityPeriod
// Previously pruned, we should return false.
if int(targetEpoch) <= int(history.LatestEpochWritten)-int(wsPeriod) {
return false
}
// Check if there has already been a vote for this target epoch.
if safeTargetToSource(history, targetEpoch) != farFuture {
return true
}
// Check if the new attestation would be surrounding another attestation.
for i := sourceEpoch; i <= targetEpoch; i++ {
// Unattested for epochs are marked as FAR_FUTURE_EPOCH.
if safeTargetToSource(history, i) == farFuture {
continue
}
if history.TargetToSource[i%wsPeriod] > sourceEpoch {
return true
}
}
// Check if the new attestation is being surrounded.
for i := targetEpoch; i <= history.LatestEpochWritten; i++ {
if safeTargetToSource(history, i) < sourceEpoch {
return true
}
}
return false
}
// markAttestationForTargetEpoch returns the modified attestation history with the passed-in epochs marked
// as attested for. This is done to prevent the validator client from signing any slashable attestations.
func markAttestationForTargetEpoch(history *slashpb.AttestationHistory, sourceEpoch uint64, targetEpoch uint64) *slashpb.AttestationHistory {
wsPeriod := params.BeaconConfig().WeakSubjectivityPeriod
if targetEpoch > history.LatestEpochWritten {
// If the target epoch to mark is ahead of latest written epoch, override the old targets and mark the requested epoch.
// Limit the overwriting to one weak subjectivity period as further is not needed.
maxToWrite := history.LatestEpochWritten + wsPeriod
for i := history.LatestEpochWritten + 1; i < targetEpoch && i <= maxToWrite; i++ {
history.TargetToSource[i%wsPeriod] = params.BeaconConfig().FarFutureEpoch
}
history.LatestEpochWritten = targetEpoch
}
history.TargetToSource[targetEpoch%wsPeriod] = sourceEpoch
return history
}
// safeTargetToSource makes sure the epoch accessed is within bounds, and if it's not it at
// returns the "default" FAR_FUTURE_EPOCH value.
func safeTargetToSource(history *slashpb.AttestationHistory, targetEpoch uint64) uint64 {
wsPeriod := params.BeaconConfig().WeakSubjectivityPeriod
if targetEpoch > history.LatestEpochWritten || int(targetEpoch) < int(history.LatestEpochWritten)-int(wsPeriod) {
return params.BeaconConfig().FarFutureEpoch
}
return history.TargetToSource[targetEpoch%wsPeriod]
}
// waitToSlotOneThird waits until one third through the current slot period
// such that head block for beacon node can get updated.
func (v *validator) waitToSlotOneThird(ctx context.Context, slot uint64) {
_, span := trace.StartSpan(ctx, "validator.waitToSlotOneThird")
defer span.End()
delay := slotutil.DivideSlotBy(3 /* a third of the slot duration */)
startTime := slotutil.SlotStartTime(v.genesisTime, slot)
finalTime := startTime.Add(delay)
time.Sleep(roughtime.Until(finalTime))
}

View File

@@ -0,0 +1,562 @@
package streaming
import (
"context"
"errors"
"reflect"
"sync"
"testing"
"time"
"github.com/golang/mock/gomock"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-bitfield"
logTest "github.com/sirupsen/logrus/hooks/test"
"gopkg.in/d4l3k/messagediff.v1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
slashpb "github.com/prysmaticlabs/prysm/proto/slashing"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/roughtime"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
func TestRequestAttestation_ValidatorDutiesRequestFailure(t *testing.T) {
hook := logTest.NewGlobal()
validator, _, finish := setup(t)
validator.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty)
validator.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{}
defer finish()
validator.SubmitAttestation(context.Background(), 30, validatorPubKey)
testutil.AssertLogsContain(t, hook, "Could not fetch validator assignment")
}
func TestAttestToBlockHead_SubmitAttestation_EmptyCommittee(t *testing.T) {
hook := logTest.NewGlobal()
validator, _, finish := setup(t)
defer finish()
validator.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty)
validator.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{
{
PublicKey: validatorKey.PublicKey.Marshal(),
CommitteeIndex: 0,
Committee: make([]uint64, 0),
ValidatorIndex: 0,
},
}
validator.SubmitAttestation(context.Background(), 0, validatorPubKey)
testutil.AssertLogsContain(t, hook, "Empty committee")
}
func TestAttestToBlockHead_SubmitAttestation_RequestFailure(t *testing.T) {
hook := logTest.NewGlobal()
validator, m, finish := setup(t)
defer finish()
validator.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty)
validator.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{
{
PublicKey: validatorKey.PublicKey.Marshal(),
CommitteeIndex: 5,
Committee: make([]uint64, 111),
ValidatorIndex: 0,
},
}
m.validatorClient.EXPECT().GetAttestationData(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.AttestationDataRequest{}),
).Return(&ethpb.AttestationData{
BeaconBlockRoot: []byte{},
Target: &ethpb.Checkpoint{},
Source: &ethpb.Checkpoint{},
}, nil)
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), // epoch2
).Return(&ethpb.DomainResponse{}, nil /*err*/)
m.validatorClient.EXPECT().ProposeAttestation(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.Attestation{}),
).Return(nil, errors.New("something went wrong"))
validator.SubmitAttestation(context.Background(), 30, validatorPubKey)
testutil.AssertLogsContain(t, hook, "Could not submit attestation to beacon node")
}
func TestAttestToBlockHead_AttestsCorrectly(t *testing.T) {
config := &featureconfig.Flags{
ProtectAttester: true,
}
reset := featureconfig.InitWithReset(config)
defer reset()
validator, m, finish := setup(t)
defer finish()
hook := logTest.NewGlobal()
validatorIndex := uint64(7)
committee := []uint64{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
validator.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty)
validator.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{
{
PublicKey: validatorKey.PublicKey.Marshal(),
CommitteeIndex: 5,
Committee: committee,
ValidatorIndex: validatorIndex,
},
}
beaconBlockRoot := bytesutil.ToBytes32([]byte("A"))
targetRoot := bytesutil.ToBytes32([]byte("B"))
sourceRoot := bytesutil.ToBytes32([]byte("C"))
m.validatorClient.EXPECT().GetAttestationData(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.AttestationDataRequest{}),
).Return(&ethpb.AttestationData{
BeaconBlockRoot: beaconBlockRoot[:],
Target: &ethpb.Checkpoint{Root: targetRoot[:]},
Source: &ethpb.Checkpoint{Root: sourceRoot[:], Epoch: 3},
}, nil)
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), // epoch
).Return(&ethpb.DomainResponse{SignatureDomain: []byte{}}, nil /*err*/)
var generatedAttestation *ethpb.Attestation
m.validatorClient.EXPECT().ProposeAttestation(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.Attestation{}),
).Do(func(_ context.Context, att *ethpb.Attestation) {
generatedAttestation = att
}).Return(&ethpb.AttestResponse{}, nil /* error */)
validator.SubmitAttestation(context.Background(), 30, validatorPubKey)
aggregationBitfield := bitfield.NewBitlist(uint64(len(committee)))
aggregationBitfield.SetBitAt(4, true)
expectedAttestation := &ethpb.Attestation{
Data: &ethpb.AttestationData{
BeaconBlockRoot: beaconBlockRoot[:],
Target: &ethpb.Checkpoint{Root: targetRoot[:]},
Source: &ethpb.Checkpoint{Root: sourceRoot[:], Epoch: 3},
},
AggregationBits: aggregationBitfield,
}
root, err := helpers.ComputeSigningRoot(expectedAttestation.Data, []byte{})
if err != nil {
t.Fatal(err)
}
sig, err := validator.keyManager.Sign(validatorPubKey, root)
if err != nil {
t.Fatal(err)
}
expectedAttestation.Signature = sig.Marshal()
if !reflect.DeepEqual(generatedAttestation, expectedAttestation) {
t.Errorf("Incorrectly attested head, wanted %v, received %v", expectedAttestation, generatedAttestation)
diff, _ := messagediff.PrettyDiff(expectedAttestation, generatedAttestation)
t.Log(diff)
}
testutil.AssertLogsDoNotContain(t, hook, "Could not")
}
func TestAttestToBlockHead_BlocksDoubleAtt(t *testing.T) {
config := &featureconfig.Flags{
ProtectAttester: true,
}
reset := featureconfig.InitWithReset(config)
defer reset()
hook := logTest.NewGlobal()
validator, m, finish := setup(t)
defer finish()
validatorIndex := uint64(7)
committee := []uint64{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
validator.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty)
validator.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{
{
PublicKey: validatorKey.PublicKey.Marshal(),
CommitteeIndex: 5,
Committee: committee,
ValidatorIndex: validatorIndex,
},
}
beaconBlockRoot := bytesutil.ToBytes32([]byte("A"))
targetRoot := bytesutil.ToBytes32([]byte("B"))
sourceRoot := bytesutil.ToBytes32([]byte("C"))
m.validatorClient.EXPECT().GetAttestationData(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.AttestationDataRequest{}),
).Times(2).Return(&ethpb.AttestationData{
BeaconBlockRoot: beaconBlockRoot[:],
Target: &ethpb.Checkpoint{Root: targetRoot[:], Epoch: 4},
Source: &ethpb.Checkpoint{Root: sourceRoot[:], Epoch: 3},
}, nil)
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), // epoch
).Return(&ethpb.DomainResponse{}, nil /*err*/)
m.validatorClient.EXPECT().ProposeAttestation(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.Attestation{}),
).Return(&ethpb.AttestResponse{}, nil /* error */)
validator.SubmitAttestation(context.Background(), 30, validatorPubKey)
validator.SubmitAttestation(context.Background(), 30, validatorPubKey)
testutil.AssertLogsContain(t, hook, "Attempted to make a slashable attestation, rejected")
}
func TestAttestToBlockHead_BlocksSurroundAtt(t *testing.T) {
config := &featureconfig.Flags{
ProtectAttester: true,
}
reset := featureconfig.InitWithReset(config)
defer reset()
hook := logTest.NewGlobal()
validator, m, finish := setup(t)
defer finish()
validatorIndex := uint64(7)
committee := []uint64{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
validator.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty)
validator.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{
{
PublicKey: validatorKey.PublicKey.Marshal(),
CommitteeIndex: 5,
Committee: committee,
ValidatorIndex: validatorIndex,
},
}
beaconBlockRoot := bytesutil.ToBytes32([]byte("A"))
targetRoot := bytesutil.ToBytes32([]byte("B"))
sourceRoot := bytesutil.ToBytes32([]byte("C"))
m.validatorClient.EXPECT().GetAttestationData(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.AttestationDataRequest{}),
).Times(2).Return(&ethpb.AttestationData{
BeaconBlockRoot: beaconBlockRoot[:],
Target: &ethpb.Checkpoint{Root: targetRoot[:], Epoch: 2},
Source: &ethpb.Checkpoint{Root: sourceRoot[:], Epoch: 1},
}, nil)
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), // epoch
).Return(&ethpb.DomainResponse{}, nil /*err*/)
m.validatorClient.EXPECT().ProposeAttestation(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.Attestation{}),
).Return(&ethpb.AttestResponse{}, nil /* error */)
validator.SubmitAttestation(context.Background(), 30, validatorPubKey)
validator.SubmitAttestation(context.Background(), 30, validatorPubKey)
testutil.AssertLogsContain(t, hook, "Attempted to make a slashable attestation, rejected")
}
func TestAttestToBlockHead_BlocksSurroundedAtt(t *testing.T) {
config := &featureconfig.Flags{
ProtectAttester: true,
}
reset := featureconfig.InitWithReset(config)
defer reset()
hook := logTest.NewGlobal()
validator, m, finish := setup(t)
defer finish()
validatorIndex := uint64(7)
committee := []uint64{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
validator.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty)
validator.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{
{
PublicKey: validatorKey.PublicKey.Marshal(),
CommitteeIndex: 5,
Committee: committee,
ValidatorIndex: validatorIndex,
},
}
beaconBlockRoot := bytesutil.ToBytes32([]byte("A"))
targetRoot := bytesutil.ToBytes32([]byte("B"))
sourceRoot := bytesutil.ToBytes32([]byte("C"))
m.validatorClient.EXPECT().GetAttestationData(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.AttestationDataRequest{}),
).Return(&ethpb.AttestationData{
BeaconBlockRoot: beaconBlockRoot[:],
Target: &ethpb.Checkpoint{Root: targetRoot[:], Epoch: 3},
Source: &ethpb.Checkpoint{Root: sourceRoot[:], Epoch: 0},
}, nil)
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), // epoch
).Return(&ethpb.DomainResponse{}, nil /*err*/)
m.validatorClient.EXPECT().ProposeAttestation(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.Attestation{}),
).Return(&ethpb.AttestResponse{}, nil /* error */)
validator.SubmitAttestation(context.Background(), 30, validatorPubKey)
m.validatorClient.EXPECT().GetAttestationData(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.AttestationDataRequest{}),
).Return(&ethpb.AttestationData{
BeaconBlockRoot: []byte("A"),
Target: &ethpb.Checkpoint{Root: []byte("B"), Epoch: 2},
Source: &ethpb.Checkpoint{Root: []byte("C"), Epoch: 1},
}, nil)
validator.SubmitAttestation(context.Background(), 30, validatorPubKey)
testutil.AssertLogsContain(t, hook, "Attempted to make a slashable attestation, rejected")
}
func TestAttestToBlockHead_DoesNotAttestBeforeDelay(t *testing.T) {
validator, m, finish := setup(t)
defer finish()
validator.genesisTime = uint64(roughtime.Now().Unix())
m.validatorClient.EXPECT().GetDuties(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.DutiesRequest{}),
gomock.Any(),
).Times(0)
m.validatorClient.EXPECT().GetAttestationData(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.AttestationDataRequest{}),
).Times(0)
m.validatorClient.EXPECT().ProposeAttestation(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.Attestation{}),
).Return(&ethpb.AttestResponse{}, nil /* error */).Times(0)
timer := time.NewTimer(1 * time.Second)
go validator.SubmitAttestation(context.Background(), 0, validatorPubKey)
<-timer.C
}
func TestAttestToBlockHead_DoesAttestAfterDelay(t *testing.T) {
validator, m, finish := setup(t)
defer finish()
var wg sync.WaitGroup
wg.Add(1)
defer wg.Wait()
validator.genesisTime = uint64(roughtime.Now().Unix())
validatorIndex := uint64(5)
committee := []uint64{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
validator.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty)
validator.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{
{
PublicKey: validatorKey.PublicKey.Marshal(),
CommitteeIndex: 5,
Committee: committee,
ValidatorIndex: validatorIndex,
},
}
m.validatorClient.EXPECT().GetAttestationData(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.AttestationDataRequest{}),
).Return(&ethpb.AttestationData{
BeaconBlockRoot: []byte("A"),
Target: &ethpb.Checkpoint{Root: []byte("B")},
Source: &ethpb.Checkpoint{Root: []byte("C"), Epoch: 3},
}, nil).Do(func(arg0, arg1 interface{}) {
wg.Done()
})
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), // epoch
).Return(&ethpb.DomainResponse{}, nil /*err*/)
m.validatorClient.EXPECT().ProposeAttestation(
gomock.Any(), // ctx
gomock.Any(),
).Return(&ethpb.AttestResponse{}, nil).Times(1)
validator.SubmitAttestation(context.Background(), 0, validatorPubKey)
}
func TestAttestToBlockHead_CorrectBitfieldLength(t *testing.T) {
validator, m, finish := setup(t)
defer finish()
validatorIndex := uint64(2)
committee := []uint64{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
validator.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty)
validator.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{
{
PublicKey: validatorKey.PublicKey.Marshal(),
CommitteeIndex: 5,
Committee: committee,
ValidatorIndex: validatorIndex,
},
}
m.validatorClient.EXPECT().GetAttestationData(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.AttestationDataRequest{}),
).Return(&ethpb.AttestationData{
Target: &ethpb.Checkpoint{Root: []byte("B")},
Source: &ethpb.Checkpoint{Root: []byte("C"), Epoch: 3},
}, nil)
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), // epoch
).Return(&ethpb.DomainResponse{}, nil /*err*/)
var generatedAttestation *ethpb.Attestation
m.validatorClient.EXPECT().ProposeAttestation(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.Attestation{}),
).Do(func(_ context.Context, att *ethpb.Attestation) {
generatedAttestation = att
}).Return(&ethpb.AttestResponse{}, nil /* error */)
validator.SubmitAttestation(context.Background(), 30, validatorPubKey)
if len(generatedAttestation.AggregationBits) != 2 {
t.Errorf("Wanted length %d, received %d", 2, len(generatedAttestation.AggregationBits))
}
}
func TestAttestationHistory_BlocksDoubleAttestation(t *testing.T) {
newMap := make(map[uint64]uint64)
newMap[0] = params.BeaconConfig().FarFutureEpoch
attestations := &slashpb.AttestationHistory{
TargetToSource: newMap,
LatestEpochWritten: 0,
}
// Mark an attestation spanning epochs 0 to 3.
newAttSource := uint64(0)
newAttTarget := uint64(3)
attestations = markAttestationForTargetEpoch(attestations, newAttSource, newAttTarget)
if attestations.LatestEpochWritten != newAttTarget {
t.Fatalf("Expected latest epoch written to be %d, received %d", newAttTarget, attestations.LatestEpochWritten)
}
// Try an attestation that should be slashable (double att) spanning epochs 1 to 3.
newAttSource = uint64(1)
newAttTarget = uint64(3)
if !isNewAttSlashable(attestations, newAttSource, newAttTarget) {
t.Fatalf("Expected attestation of source %d and target %d to be considered slashable", newAttSource, newAttTarget)
}
}
func TestAttestationHistory_Prunes(t *testing.T) {
wsPeriod := params.BeaconConfig().WeakSubjectivityPeriod
newMap := make(map[uint64]uint64)
newMap[0] = params.BeaconConfig().FarFutureEpoch
attestations := &slashpb.AttestationHistory{
TargetToSource: newMap,
LatestEpochWritten: 0,
}
// Try an attestation on totally unmarked history, should not be slashable.
if isNewAttSlashable(attestations, 0, wsPeriod+5) {
t.Fatalf("Expected attestation of source 0, target %d to be considered slashable", wsPeriod+5)
}
// Mark attestations spanning epochs 0 to 3 and 6 to 9.
prunedNewAttSource := uint64(0)
prunedNewAttTarget := uint64(3)
attestations = markAttestationForTargetEpoch(attestations, prunedNewAttSource, prunedNewAttTarget)
newAttSource := prunedNewAttSource + 6
newAttTarget := prunedNewAttTarget + 6
attestations = markAttestationForTargetEpoch(attestations, newAttSource, newAttTarget)
if attestations.LatestEpochWritten != newAttTarget {
t.Fatalf("Expected latest epoch written to be %d, received %d", newAttTarget, attestations.LatestEpochWritten)
}
// Mark an attestation spanning epochs 54000 to 54003.
farNewAttSource := newAttSource + wsPeriod
farNewAttTarget := newAttTarget + wsPeriod
attestations = markAttestationForTargetEpoch(attestations, farNewAttSource, farNewAttTarget)
if attestations.LatestEpochWritten != farNewAttTarget {
t.Fatalf("Expected latest epoch written to be %d, received %d", newAttTarget, attestations.LatestEpochWritten)
}
if safeTargetToSource(attestations, prunedNewAttTarget) != params.BeaconConfig().FarFutureEpoch {
t.Fatalf("Expected attestation at target epoch %d to not be marked", prunedNewAttTarget)
}
if safeTargetToSource(attestations, farNewAttTarget) != farNewAttSource {
t.Fatalf("Expected attestation at target epoch %d to not be marked", farNewAttSource)
}
// Try an attestation from existing source to outside prune, should slash.
if !isNewAttSlashable(attestations, newAttSource, farNewAttTarget) {
t.Fatalf("Expected attestation of source %d, target %d to be considered slashable", newAttSource, farNewAttTarget)
}
// Try an attestation from before existing target to outside prune, should slash.
if !isNewAttSlashable(attestations, newAttTarget-1, farNewAttTarget) {
t.Fatalf("Expected attestation of source %d, target %d to be considered slashable", newAttTarget-1, farNewAttTarget)
}
// Try an attestation larger than pruning amount, should slash.
if !isNewAttSlashable(attestations, 0, farNewAttTarget+5) {
t.Fatalf("Expected attestation of source 0, target %d to be considered slashable", farNewAttTarget+5)
}
}
func TestAttestationHistory_BlocksSurroundedAttestation(t *testing.T) {
newMap := make(map[uint64]uint64)
newMap[0] = params.BeaconConfig().FarFutureEpoch
attestations := &slashpb.AttestationHistory{
TargetToSource: newMap,
LatestEpochWritten: 0,
}
// Mark an attestation spanning epochs 0 to 3.
newAttSource := uint64(0)
newAttTarget := uint64(3)
attestations = markAttestationForTargetEpoch(attestations, newAttSource, newAttTarget)
if attestations.LatestEpochWritten != newAttTarget {
t.Fatalf("Expected latest epoch written to be %d, received %d", newAttTarget, attestations.LatestEpochWritten)
}
// Try an attestation that should be slashable (being surrounded) spanning epochs 1 to 2.
newAttSource = uint64(1)
newAttTarget = uint64(2)
if !isNewAttSlashable(attestations, newAttSource, newAttTarget) {
t.Fatalf("Expected attestation of source %d and target %d to be considered slashable", newAttSource, newAttTarget)
}
}
func TestAttestationHistory_BlocksSurroundingAttestation(t *testing.T) {
newMap := make(map[uint64]uint64)
newMap[0] = params.BeaconConfig().FarFutureEpoch
attestations := &slashpb.AttestationHistory{
TargetToSource: newMap,
LatestEpochWritten: 0,
}
// Mark an attestation spanning epochs 1 to 2.
newAttSource := uint64(1)
newAttTarget := uint64(2)
attestations = markAttestationForTargetEpoch(attestations, newAttSource, newAttTarget)
if attestations.LatestEpochWritten != newAttTarget {
t.Fatalf("Expected latest epoch written to be %d, received %d", newAttTarget, attestations.LatestEpochWritten)
}
if attestations.TargetToSource[newAttTarget] != newAttSource {
t.Fatalf("Expected source epoch to be %d, received %d", newAttSource, attestations.TargetToSource[newAttTarget])
}
// Try an attestation that should be slashable (surrounding) spanning epochs 0 to 3.
newAttSource = uint64(0)
newAttTarget = uint64(3)
if !isNewAttSlashable(attestations, newAttSource, newAttTarget) {
t.Fatalf("Expected attestation of source %d and target %d to be considered slashable", newAttSource, newAttTarget)
}
}

View File

@@ -0,0 +1,205 @@
package streaming
import (
"bytes"
"context"
"fmt"
"io"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"go.opencensus.io/trace"
)
// StreamDuties consumes a server-side stream of validator duties from a beacon node
// for a set of validating keys passed in as a request type. New duties will be
// sent over the stream upon a new epoch being reached or from a a chain reorg happening
// across epochs in the beacon node.
func (v *validator) StreamDuties(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "validator.StreamDuties")
defer span.End()
validatingKeys, err := v.keyManager.FetchValidatingKeys()
if err != nil {
return err
}
numValidatingKeys := len(validatingKeys)
req := &ethpb.DutiesRequest{
PublicKeys: bytesutil.FromBytes48Array(validatingKeys),
}
stream, err := v.validatorClient.StreamDuties(ctx, req)
if err != nil {
return errors.Wrap(err, "Could not setup validator duties streaming client")
}
for {
res, err := stream.Recv()
// If the stream is closed, we stop the loop.
if err == io.EOF {
break
}
// If context is canceled we stop the loop.
if ctx.Err() == context.Canceled {
return errors.Wrap(ctx.Err(), "context has been canceled so shutting down the loop")
}
if err != nil {
return errors.Wrap(err, "Could not receive duties from stream")
}
// Updates validator duties and requests the beacon node to subscribe
// to attestation subnets in advance.
v.updateDuties(ctx, res, numValidatingKeys)
if err := v.requestSubnetSubscriptions(ctx, res, numValidatingKeys); err != nil {
log.WithError(err).Error("Could not request beacon node to subscribe to subnets")
}
}
return nil
}
// RolesAt slot returns the validator roles at the given slot. Returns nil if the
// validator is known to not have a roles at the at slot. Returns UNKNOWN if the
// validator assignments are unknown. Otherwise returns a valid validatorRole map.
func (v *validator) RolesAt(ctx context.Context, slot uint64) (map[[48]byte][]validatorRole, error) {
epoch := slot / params.BeaconConfig().SlotsPerEpoch
rolesAt := make(map[[48]byte][]validatorRole)
v.dutiesLock.RLock()
duty, ok := v.dutiesByEpoch[epoch]
if !ok {
v.dutiesLock.RUnlock()
log.Debugf("No assigned duties yet for epoch %d", epoch)
return rolesAt, nil
}
v.dutiesLock.RUnlock()
for _, dt := range duty {
var roles []validatorRole
if dt == nil {
continue
}
if len(dt.ProposerSlots) > 0 {
for _, proposerSlot := range dt.ProposerSlots {
if proposerSlot != 0 && proposerSlot == slot {
roles = append(roles, roleProposer)
break
}
}
}
if dt.AttesterSlot == slot {
roles = append(roles, roleAttester)
aggregator, err := v.isAggregator(ctx, dt.Committee, slot, bytesutil.ToBytes48(dt.PublicKey))
if err != nil {
return nil, errors.Wrap(err, "could not check if a validator is an aggregator")
}
if aggregator {
roles = append(roles, roleAggregator)
}
}
if len(roles) == 0 {
roles = append(roles, roleUnknown)
}
var pubKey [48]byte
copy(pubKey[:], dt.PublicKey)
rolesAt[pubKey] = roles
}
return rolesAt, nil
}
// Update duties sets the received validator duties in-memory for the validator client
// and determines which validating keys were selected as attestation aggregators
// for the epoch. Additionally, this function uses that information to notify
// the beacon node it should subscribe the assigned attestation p2p subnets.
func (v *validator) updateDuties(ctx context.Context, dutiesResp *ethpb.DutiesResponse, numKeys int) {
ctx, span := trace.StartSpan(ctx, "validator.updateDuties")
defer span.End()
currentSlot := v.CurrentSlot()
currentEpoch := currentSlot / params.BeaconConfig().SlotsPerEpoch
v.dutiesLock.Lock()
v.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty, 2)
v.dutiesByEpoch[currentEpoch] = dutiesResp.CurrentEpochDuties
v.dutiesByEpoch[currentEpoch+1] = dutiesResp.NextEpochDuties
v.dutiesLock.Unlock()
v.logDuties(currentSlot, dutiesResp.CurrentEpochDuties)
v.logDuties(currentSlot+params.BeaconConfig().SlotsPerEpoch, dutiesResp.NextEpochDuties)
}
// Given the validator public key and an epoch, this gets the validator assignment.
func (v *validator) duty(pubKey [48]byte, epoch uint64) (*ethpb.DutiesResponse_Duty, error) {
v.dutiesLock.RLock()
defer v.dutiesLock.RUnlock()
duty, ok := v.dutiesByEpoch[epoch]
if !ok {
return nil, fmt.Errorf("no duty found for epoch %d", epoch)
}
for _, d := range duty {
if bytes.Equal(pubKey[:], d.PublicKey) {
return d, nil
}
}
return nil, fmt.Errorf("pubkey %#x not in duties", bytesutil.Trunc(pubKey[:]))
}
func (v *validator) requestSubnetSubscriptions(ctx context.Context, dutiesResp *ethpb.DutiesResponse, numKeys int) error {
subscribeSlots := make([]uint64, 0, numKeys)
subscribeCommitteeIDs := make([]uint64, 0, numKeys)
subscribeIsAggregator := make([]bool, 0, numKeys)
alreadySubscribed := make(map[[64]byte]bool)
for _, duty := range dutiesResp.CurrentEpochDuties {
if duty.Status == ethpb.ValidatorStatus_ACTIVE || duty.Status == ethpb.ValidatorStatus_EXITING {
attesterSlot := duty.AttesterSlot
committeeIndex := duty.CommitteeIndex
alreadySubscribedKey := validatorSubscribeKey(attesterSlot, committeeIndex)
if _, ok := alreadySubscribed[alreadySubscribedKey]; ok {
continue
}
aggregator, err := v.isAggregator(ctx, duty.Committee, attesterSlot, bytesutil.ToBytes48(duty.PublicKey))
if err != nil {
return errors.Wrap(err, "could not check if a validator is an aggregator")
}
if aggregator {
alreadySubscribed[alreadySubscribedKey] = true
}
subscribeSlots = append(subscribeSlots, attesterSlot)
subscribeCommitteeIDs = append(subscribeCommitteeIDs, committeeIndex)
subscribeIsAggregator = append(subscribeIsAggregator, aggregator)
}
}
for _, duty := range dutiesResp.NextEpochDuties {
if duty.Status == ethpb.ValidatorStatus_ACTIVE || duty.Status == ethpb.ValidatorStatus_EXITING {
attesterSlot := duty.AttesterSlot
committeeIndex := duty.CommitteeIndex
alreadySubscribedKey := validatorSubscribeKey(attesterSlot, committeeIndex)
if _, ok := alreadySubscribed[alreadySubscribedKey]; ok {
continue
}
aggregator, err := v.isAggregator(ctx, duty.Committee, attesterSlot, bytesutil.ToBytes48(duty.PublicKey))
if err != nil {
return errors.Wrap(err, "could not check if a validator is an aggregator")
}
if aggregator {
alreadySubscribed[alreadySubscribedKey] = true
}
subscribeSlots = append(subscribeSlots, attesterSlot)
subscribeCommitteeIDs = append(subscribeCommitteeIDs, committeeIndex)
subscribeIsAggregator = append(subscribeIsAggregator, aggregator)
}
}
_, err := v.validatorClient.SubscribeCommitteeSubnets(ctx, &ethpb.CommitteeSubnetsSubscribeRequest{
Slots: subscribeSlots,
CommitteeIds: subscribeCommitteeIDs,
IsAggregator: subscribeIsAggregator,
})
return err
}

View File

@@ -0,0 +1,137 @@
package streaming
import (
"context"
"errors"
"io"
"strings"
"testing"
"time"
"github.com/golang/mock/gomock"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/mock"
"github.com/prysmaticlabs/prysm/shared/params"
)
func TestStreamDuties_ReturnsError(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := mock.NewMockBeaconNodeValidatorClient(ctrl)
v := validator{
keyManager: testKeyManager,
validatorClient: client,
}
v.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty)
v.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{
{
CommitteeIndex: 1,
},
}
expected := errors.New("bad")
client.EXPECT().StreamDuties(
gomock.Any(),
gomock.Any(),
).Return(nil, expected)
if err := v.StreamDuties(context.Background()); !strings.Contains(err.Error(), "bad") {
t.Errorf("Bad error; want=%v got=%v", expected, err)
}
}
func TestStreamDuties_OK(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := mock.NewMockBeaconNodeValidatorClient(ctrl)
resp := &ethpb.DutiesResponse{
CurrentEpochDuties: []*ethpb.DutiesResponse_Duty{
{
AttesterSlot: params.BeaconConfig().SlotsPerEpoch,
ValidatorIndex: 200,
CommitteeIndex: 100,
Committee: []uint64{0, 1, 2, 3},
PublicKey: []byte("testPubKey_1"),
ProposerSlots: []uint64{params.BeaconConfig().SlotsPerEpoch + 1},
},
{
AttesterSlot: params.BeaconConfig().SlotsPerEpoch,
ValidatorIndex: 201,
CommitteeIndex: 101,
Committee: []uint64{0, 1, 2, 3},
PublicKey: []byte("testPubKey_2"),
ProposerSlots: []uint64{params.BeaconConfig().SlotsPerEpoch + 2},
},
},
}
v := validator{
keyManager: testKeyManager,
validatorClient: client,
}
v.genesisTime = uint64(time.Now().Unix()) + 500
v.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty)
stream := mock.NewMockBeaconNodeValidator_StreamDutiesClient(ctrl)
client.EXPECT().StreamDuties(
gomock.Any(),
gomock.Any(),
).Return(stream, nil)
ctx := context.Background()
stream.EXPECT().Context().Return(ctx).AnyTimes()
stream.EXPECT().Recv().Return(
resp,
nil,
)
client.EXPECT().SubscribeCommitteeSubnets(
gomock.Any(),
gomock.Any(),
).Return(nil, nil)
stream.EXPECT().Recv().Return(
nil,
io.EOF,
)
if err := v.StreamDuties(ctx); err != nil {
t.Fatalf("Could not update assignments: %v", err)
}
if v.dutiesByEpoch[0][0].ProposerSlots[0] != params.BeaconConfig().SlotsPerEpoch+1 {
t.Errorf(
"Unexpected validator assignments. want=%v got=%v",
params.BeaconConfig().SlotsPerEpoch+1,
v.dutiesByEpoch[0][0].ProposerSlots[0],
)
}
if v.dutiesByEpoch[0][0].AttesterSlot != params.BeaconConfig().SlotsPerEpoch {
t.Errorf(
"Unexpected validator assignments. want=%v got=%v",
params.BeaconConfig().SlotsPerEpoch,
v.dutiesByEpoch[0][0].AttesterSlot,
)
}
if v.dutiesByEpoch[0][0].CommitteeIndex != resp.CurrentEpochDuties[0].CommitteeIndex {
t.Errorf(
"Unexpected validator assignments. want=%v got=%v",
resp.Duties[0].CommitteeIndex,
v.dutiesByEpoch[0][0].CommitteeIndex,
)
}
if v.dutiesByEpoch[0][0].ValidatorIndex != resp.CurrentEpochDuties[0].ValidatorIndex {
t.Errorf(
"Unexpected validator assignments. want=%v got=%v",
resp.CurrentEpochDuties[0].ValidatorIndex,
v.dutiesByEpoch[0][0].ValidatorIndex,
)
}
if v.dutiesByEpoch[0][1].ValidatorIndex != resp.CurrentEpochDuties[1].ValidatorIndex {
t.Errorf(
"Unexpected validator assignments. want=%v got=%v",
resp.CurrentEpochDuties[1].ValidatorIndex,
v.dutiesByEpoch[0][1].ValidatorIndex,
)
}
}

View File

@@ -0,0 +1,37 @@
package streaming
import (
"fmt"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/sirupsen/logrus"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
)
type attSubmitted struct {
data *ethpb.AttestationData
attesterIndices []uint64
aggregatorIndices []uint64
}
func (v *validator) LogAttestationsSubmitted() {
v.attLogsLock.Lock()
defer v.attLogsLock.Unlock()
for _, attLog := range v.attLogs {
log.WithFields(logrus.Fields{
"Slot": attLog.data.Slot,
"CommitteeIndex": attLog.data.CommitteeIndex,
"BeaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(attLog.data.BeaconBlockRoot)),
"SourceEpoch": attLog.data.Source.Epoch,
"SourceRoot": fmt.Sprintf("%#x", bytesutil.Trunc(attLog.data.Source.Root)),
"TargetEpoch": attLog.data.Target.Epoch,
"TargetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(attLog.data.Target.Root)),
"AttesterIndices": attLog.attesterIndices,
"AggregatorIndices": attLog.aggregatorIndices,
}).Info("Submitted new attestations")
}
v.attLogs = make(map[[32]byte]*attSubmitted)
}

View File

@@ -0,0 +1,109 @@
package streaming
import (
"context"
"fmt"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/validator/client/metrics"
"github.com/sirupsen/logrus"
)
// LogValidatorGainsAndLosses logs important metrics related to this validator client's
// responsibilities throughout the beacon chain's lifecycle. It logs absolute accrued rewards
// and penalties over time, percentage gain/loss, and gives the end user a better idea
// of how the validator performs with respect to the rest.
func (v *validator) LogValidatorGainsAndLosses(ctx context.Context, slot uint64) error {
if slot%params.BeaconConfig().SlotsPerEpoch != 0 || slot <= params.BeaconConfig().SlotsPerEpoch {
// Do nothing unless we are at the start of the epoch, and not in the first epoch.
return nil
}
if !v.logValidatorBalances {
return nil
}
pks, err := v.keyManager.FetchValidatingKeys()
if err != nil {
return err
}
pubKeys := bytesutil.FromBytes48Array(pks)
req := &ethpb.ValidatorPerformanceRequest{
PublicKeys: pubKeys,
}
resp, err := v.beaconClient.GetValidatorPerformance(ctx, req)
if err != nil {
return err
}
if v.emitAccountMetrics {
for _, missingPubKey := range resp.MissingValidators {
fmtKey := fmt.Sprintf("%#x", missingPubKey[:])
metrics.ValidatorBalancesGaugeVec.WithLabelValues(fmtKey).Set(0)
}
}
included := 0
votedSource := 0
votedTarget := 0
votedHead := 0
prevEpoch := uint64(0)
if slot >= params.BeaconConfig().SlotsPerEpoch {
prevEpoch = (slot / params.BeaconConfig().SlotsPerEpoch) - 1
}
gweiPerEth := float64(params.BeaconConfig().GweiPerEth)
for i, pubKey := range resp.PublicKeys {
pubKeyBytes := bytesutil.ToBytes48(pubKey)
if slot < params.BeaconConfig().SlotsPerEpoch {
v.prevBalance[pubKeyBytes] = params.BeaconConfig().MaxEffectiveBalance
}
truncatedKey := fmt.Sprintf("%#x", pubKey[:8])
if v.prevBalance[pubKeyBytes] > 0 {
newBalance := float64(resp.BalancesAfterEpochTransition[i]) / gweiPerEth
prevBalance := float64(resp.BalancesBeforeEpochTransition[i]) / gweiPerEth
percentNet := (newBalance - prevBalance) / prevBalance
log.WithFields(logrus.Fields{
"pubKey": truncatedKey,
"epoch": prevEpoch,
"correctlyVotedSource": resp.CorrectlyVotedSource[i],
"correctlyVotedTarget": resp.CorrectlyVotedTarget[i],
"correctlyVotedHead": resp.CorrectlyVotedHead[i],
"inclusionSlot": resp.InclusionSlots[i],
"inclusionDistance": resp.InclusionDistances[i],
"oldBalance": prevBalance,
"newBalance": newBalance,
"percentChange": fmt.Sprintf("%.5f%%", percentNet*100),
}).Info("Previous epoch voting summary")
if v.emitAccountMetrics {
metrics.ValidatorBalancesGaugeVec.WithLabelValues(truncatedKey).Set(newBalance)
}
}
if resp.InclusionSlots[i] != ^uint64(0) {
included++
}
if resp.CorrectlyVotedSource[i] {
votedSource++
}
if resp.CorrectlyVotedTarget[i] {
votedTarget++
}
if resp.CorrectlyVotedHead[i] {
votedHead++
}
v.prevBalance[pubKeyBytes] = resp.BalancesBeforeEpochTransition[i]
}
log.WithFields(logrus.Fields{
"epoch": prevEpoch,
"attestationInclusionPercentage": fmt.Sprintf("%.0f%%", (float64(included)/float64(len(resp.InclusionSlots)))*100),
"correctlyVotedSourcePercentage": fmt.Sprintf("%.0f%%", (float64(votedSource)/float64(len(resp.CorrectlyVotedSource)))*100),
"correctlyVotedTargetPercentage": fmt.Sprintf("%.0f%%", (float64(votedTarget)/float64(len(resp.CorrectlyVotedTarget)))*100),
"correctlyVotedHeadPercentage": fmt.Sprintf("%.0f%%", (float64(votedHead)/float64(len(resp.CorrectlyVotedHead)))*100),
}).Info("Previous epoch aggregated voting summary")
return nil
}

View File

@@ -0,0 +1,208 @@
package streaming
// Validator client proposer functions.
import (
"context"
"fmt"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/shared/blockutil"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/validator/client/metrics"
"github.com/prysmaticlabs/prysm/validator/keymanager"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// ProposeBlock A new beacon block for a given slot. This method collects the
// previous beacon block, any pending deposits, and ETH1 data from the beacon
// chain node to construct the new block. The new block is then processed with
// the state root computation, and finally signed by the validator before being
// sent back to the beacon node for broadcasting.
func (v *validator) ProposeBlock(ctx context.Context, slot uint64, pubKey [48]byte) {
if slot == 0 {
log.Debug("Assigned to genesis slot, skipping proposal")
return
}
ctx, span := trace.StartSpan(ctx, "validator.ProposeBlock")
defer span.End()
fmtKey := fmt.Sprintf("%#x", pubKey[:])
span.AddAttributes(trace.StringAttribute("validator", fmt.Sprintf("%#x", pubKey)))
log := log.WithField("pubKey", fmt.Sprintf("%#x", bytesutil.Trunc(pubKey[:])))
// Sign randao reveal, it's used to request block from beacon node
epoch := slot / params.BeaconConfig().SlotsPerEpoch
randaoReveal, err := v.signRandaoReveal(ctx, pubKey, epoch)
if err != nil {
log.WithError(err).Error("Failed to sign randao reveal")
if v.emitAccountMetrics {
metrics.ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
// Request block from beacon node
b, err := v.validatorClient.GetBlock(ctx, &ethpb.BlockRequest{
Slot: slot,
RandaoReveal: randaoReveal,
Graffiti: v.graffiti,
})
if err != nil {
log.WithField("blockSlot", slot).WithError(err).Error("Failed to request block from beacon node")
if v.emitAccountMetrics {
metrics.ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
var slotBits bitfield.Bitlist
if featureconfig.Get().ProtectProposer {
slotBits, err = v.db.ProposalHistoryForEpoch(ctx, pubKey[:], epoch)
if err != nil {
log.WithError(err).Error("Failed to get proposal history")
if v.emitAccountMetrics {
metrics.ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
// If the bit for the current slot is marked, do not propose.
if slotBits.BitAt(slot % params.BeaconConfig().SlotsPerEpoch) {
log.WithField("epoch", epoch).Error("Tried to sign a double proposal, rejected")
if v.emitAccountMetrics {
metrics.ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
}
// Sign returned block from beacon node
sig, err := v.signBlock(ctx, pubKey, epoch, b)
if err != nil {
log.WithError(err).Error("Failed to sign block")
if v.emitAccountMetrics {
metrics.ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
blk := &ethpb.SignedBeaconBlock{
Block: b,
Signature: sig,
}
if featureconfig.Get().SlasherProtection && v.protector != nil {
bh, err := blockutil.SignedBeaconBlockHeaderFromBlock(blk)
if err != nil {
log.WithError(err).Error("Failed to get block header from block")
}
if !v.protector.VerifyBlock(ctx, bh) {
log.WithField("epoch", epoch).Error("Tried to sign a double proposal, rejected by external slasher")
if v.emitAccountMetrics {
metrics.ValidatorProposeFailVecSlasher.WithLabelValues(fmtKey).Inc()
}
return
}
}
// Propose and broadcast block via beacon node
blkResp, err := v.validatorClient.ProposeBlock(ctx, blk)
if err != nil {
log.WithError(err).Error("Failed to propose block")
if v.emitAccountMetrics {
metrics.ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
if featureconfig.Get().ProtectProposer {
slotBits.SetBitAt(slot%params.BeaconConfig().SlotsPerEpoch, true)
if err := v.db.SaveProposalHistoryForEpoch(ctx, pubKey[:], epoch, slotBits); err != nil {
log.WithError(err).Error("Failed to save updated proposal history")
if v.emitAccountMetrics {
metrics.ValidatorProposeFailVec.WithLabelValues(fmtKey).Inc()
}
return
}
}
if v.emitAccountMetrics {
metrics.ValidatorProposeSuccessVec.WithLabelValues(fmtKey).Inc()
}
span.AddAttributes(
trace.StringAttribute("blockRoot", fmt.Sprintf("%#x", blkResp.BlockRoot)),
trace.Int64Attribute("numDeposits", int64(len(b.Body.Deposits))),
trace.Int64Attribute("numAttestations", int64(len(b.Body.Attestations))),
)
blkRoot := fmt.Sprintf("%#x", bytesutil.Trunc(blkResp.BlockRoot))
log.WithFields(logrus.Fields{
"slot": b.Slot,
"blockRoot": blkRoot,
"numAttestations": len(b.Body.Attestations),
"numDeposits": len(b.Body.Deposits),
}).Info("Submitted new block")
}
// ProposeExit --
func (v *validator) ProposeExit(ctx context.Context, exit *ethpb.VoluntaryExit) error {
return errors.New("unimplemented")
}
// Sign randao reveal with randao domain and private key.
func (v *validator) signRandaoReveal(ctx context.Context, pubKey [48]byte, epoch uint64) ([]byte, error) {
domain, err := v.domainData(ctx, epoch, params.BeaconConfig().DomainRandao[:])
if err != nil {
return nil, errors.Wrap(err, "could not get domain data")
}
randaoReveal, err := v.signObject(pubKey, epoch, domain.SignatureDomain)
if err != nil {
return nil, errors.Wrap(err, "could not sign reveal")
}
return randaoReveal.Marshal(), nil
}
// Sign block with proposer domain and private key.
func (v *validator) signBlock(ctx context.Context, pubKey [48]byte, epoch uint64, b *ethpb.BeaconBlock) ([]byte, error) {
domain, err := v.domainData(ctx, epoch, params.BeaconConfig().DomainBeaconProposer[:])
if err != nil {
return nil, errors.Wrap(err, "could not get domain data")
}
var sig *bls.Signature
if protectingKeymanager, supported := v.keyManager.(keymanager.ProtectingKeyManager); supported {
bodyRoot, err := stateutil.BlockBodyRoot(b.Body)
if err != nil {
return nil, errors.Wrap(err, "could not get signing root")
}
blockHeader := &ethpb.BeaconBlockHeader{
Slot: b.Slot,
ProposerIndex: b.ProposerIndex,
StateRoot: b.StateRoot,
ParentRoot: b.ParentRoot,
BodyRoot: bodyRoot[:],
}
sig, err = protectingKeymanager.SignProposal(pubKey, bytesutil.ToBytes32(domain.SignatureDomain), blockHeader)
if err != nil {
return nil, errors.Wrap(err, "could not sign block proposal")
}
} else {
blockRoot, err := helpers.ComputeSigningRoot(b, domain.SignatureDomain)
if err != nil {
return nil, errors.Wrap(err, "could not get signing root")
}
sig, err = v.keyManager.Sign(pubKey, blockRoot)
if err != nil {
return nil, errors.Wrap(err, "could not sign block proposal")
}
}
return sig.Marshal(), nil
}

View File

@@ -0,0 +1,345 @@
package streaming
import (
"context"
"errors"
"testing"
"github.com/golang/mock/gomock"
lru "github.com/hashicorp/golang-lru"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
logTest "github.com/sirupsen/logrus/hooks/test"
slashpb "github.com/prysmaticlabs/prysm/proto/slashing"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/mock"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/validator/db"
)
type mocks struct {
validatorClient *mock.MockBeaconNodeValidatorClient
}
func setup(t *testing.T) (*validator, *mocks, func()) {
valDB := db.SetupDB(t, [][48]byte{validatorPubKey})
ctrl := gomock.NewController(t)
m := &mocks{
validatorClient: mock.NewMockBeaconNodeValidatorClient(ctrl),
}
aggregatedSlotCommitteeIDCache, err := lru.New(int(params.BeaconConfig().MaxCommitteesPerSlot))
if err != nil {
t.Fatal(err)
}
cleanMap := make(map[uint64]uint64)
cleanMap[0] = params.BeaconConfig().FarFutureEpoch
clean := &slashpb.AttestationHistory{
TargetToSource: cleanMap,
}
attHistoryByPubKey := make(map[[48]byte]*slashpb.AttestationHistory)
attHistoryByPubKey[validatorPubKey] = clean
validator := &validator{
db: valDB,
validatorClient: m.validatorClient,
keyManager: testKeyManager,
graffiti: []byte{},
attLogs: make(map[[32]byte]*attSubmitted),
aggregatedSlotCommitteeIDCache: aggregatedSlotCommitteeIDCache,
attesterHistoryByPubKey: attHistoryByPubKey,
}
return validator, m, ctrl.Finish
}
func TestProposeBlock_DoesNotProposeGenesisBlock(t *testing.T) {
hook := logTest.NewGlobal()
validator, _, finish := setup(t)
defer finish()
validator.ProposeBlock(context.Background(), 0, validatorPubKey)
testutil.AssertLogsContain(t, hook, "Assigned to genesis slot, skipping proposal")
}
func TestProposeBlock_DomainDataFailed(t *testing.T) {
hook := logTest.NewGlobal()
validator, m, finish := setup(t)
defer finish()
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), // epoch
).Return(nil /*response*/, errors.New("uh oh"))
validator.ProposeBlock(context.Background(), 1, validatorPubKey)
testutil.AssertLogsContain(t, hook, "Failed to sign randao reveal")
}
func TestProposeBlock_RequestBlockFailed(t *testing.T) {
hook := logTest.NewGlobal()
validator, m, finish := setup(t)
defer finish()
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), // epoch
).Return(&ethpb.DomainResponse{}, nil /*err*/)
m.validatorClient.EXPECT().GetBlock(
gomock.Any(), // ctx
gomock.Any(), // block request
).Return(nil /*response*/, errors.New("uh oh"))
validator.ProposeBlock(context.Background(), 1, validatorPubKey)
testutil.AssertLogsContain(t, hook, "Failed to request block from beacon node")
}
func TestProposeBlock_ProposeBlockFailed(t *testing.T) {
hook := logTest.NewGlobal()
validator, m, finish := setup(t)
defer finish()
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), //epoch
).Return(&ethpb.DomainResponse{}, nil /*err*/)
m.validatorClient.EXPECT().GetBlock(
gomock.Any(), // ctx
gomock.Any(),
).Return(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}, nil /*err*/)
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), //epoch
).Return(&ethpb.DomainResponse{}, nil /*err*/)
m.validatorClient.EXPECT().ProposeBlock(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.SignedBeaconBlock{}),
).Return(nil /*response*/, errors.New("uh oh"))
validator.ProposeBlock(context.Background(), 1, validatorPubKey)
testutil.AssertLogsContain(t, hook, "Failed to propose block")
}
func TestProposeBlock_BlocksDoubleProposal(t *testing.T) {
cfg := &featureconfig.Flags{
ProtectProposer: true,
}
reset := featureconfig.InitWithReset(cfg)
defer reset()
hook := logTest.NewGlobal()
validator, m, finish := setup(t)
defer finish()
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), //epoch
).Times(2).Return(&ethpb.DomainResponse{}, nil /*err*/)
m.validatorClient.EXPECT().GetBlock(
gomock.Any(), // ctx
gomock.Any(),
).Times(2).Return(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}, nil /*err*/)
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), //epoch
).Return(&ethpb.DomainResponse{}, nil /*err*/)
m.validatorClient.EXPECT().ProposeBlock(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.SignedBeaconBlock{}),
).Return(&ethpb.ProposeResponse{}, nil /*error*/)
slot := params.BeaconConfig().SlotsPerEpoch*5 + 2
validator.ProposeBlock(context.Background(), slot, validatorPubKey)
testutil.AssertLogsDoNotContain(t, hook, "Tried to sign a double proposal")
validator.ProposeBlock(context.Background(), slot, validatorPubKey)
testutil.AssertLogsContain(t, hook, "Tried to sign a double proposal")
}
func TestProposeBlock_BlocksDoubleProposal_After54KEpochs(t *testing.T) {
cfg := &featureconfig.Flags{
ProtectProposer: true,
}
reset := featureconfig.InitWithReset(cfg)
defer reset()
hook := logTest.NewGlobal()
validator, m, finish := setup(t)
defer finish()
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), //epoch
).Times(2).Return(&ethpb.DomainResponse{}, nil /*err*/)
m.validatorClient.EXPECT().GetBlock(
gomock.Any(), // ctx
gomock.Any(),
).Times(2).Return(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}, nil /*err*/)
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), //epoch
).Return(&ethpb.DomainResponse{}, nil /*err*/)
m.validatorClient.EXPECT().ProposeBlock(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.SignedBeaconBlock{}),
).Return(&ethpb.ProposeResponse{}, nil /*error*/)
farFuture := (params.BeaconConfig().WeakSubjectivityPeriod + 9) * params.BeaconConfig().SlotsPerEpoch
validator.ProposeBlock(context.Background(), farFuture, validatorPubKey)
testutil.AssertLogsDoNotContain(t, hook, "Tried to sign a double proposal")
validator.ProposeBlock(context.Background(), farFuture, validatorPubKey)
testutil.AssertLogsContain(t, hook, "Tried to sign a double proposal")
}
func TestProposeBlock_AllowsPastProposals(t *testing.T) {
cfg := &featureconfig.Flags{
ProtectProposer: true,
}
reset := featureconfig.InitWithReset(cfg)
defer reset()
hook := logTest.NewGlobal()
validator, m, finish := setup(t)
defer finish()
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), //epoch
).Times(2).Return(&ethpb.DomainResponse{}, nil /*err*/)
m.validatorClient.EXPECT().GetBlock(
gomock.Any(), // ctx
gomock.Any(),
).Times(2).Return(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}, nil /*err*/)
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), //epoch
).Times(2).Return(&ethpb.DomainResponse{}, nil /*err*/)
m.validatorClient.EXPECT().ProposeBlock(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.SignedBeaconBlock{}),
).Times(2).Return(&ethpb.ProposeResponse{}, nil /*error*/)
farAhead := (params.BeaconConfig().WeakSubjectivityPeriod + 9) * params.BeaconConfig().SlotsPerEpoch
validator.ProposeBlock(context.Background(), farAhead, validatorPubKey)
testutil.AssertLogsDoNotContain(t, hook, "Tried to sign a double proposal")
past := (params.BeaconConfig().WeakSubjectivityPeriod - 400) * params.BeaconConfig().SlotsPerEpoch
validator.ProposeBlock(context.Background(), past, validatorPubKey)
testutil.AssertLogsDoNotContain(t, hook, "Tried to sign a double proposal")
}
func TestProposeBlock_AllowsSameEpoch(t *testing.T) {
cfg := &featureconfig.Flags{
ProtectProposer: true,
}
reset := featureconfig.InitWithReset(cfg)
defer reset()
hook := logTest.NewGlobal()
validator, m, finish := setup(t)
defer finish()
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), //epoch
).Times(2).Return(&ethpb.DomainResponse{}, nil /*err*/)
m.validatorClient.EXPECT().GetBlock(
gomock.Any(), // ctx
gomock.Any(),
).Times(2).Return(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}, nil /*err*/)
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), //epoch
).Times(2).Return(&ethpb.DomainResponse{}, nil /*err*/)
m.validatorClient.EXPECT().ProposeBlock(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.SignedBeaconBlock{}),
).Times(2).Return(&ethpb.ProposeResponse{}, nil /*error*/)
pubKey := validatorPubKey
farAhead := (params.BeaconConfig().WeakSubjectivityPeriod + 9) * params.BeaconConfig().SlotsPerEpoch
validator.ProposeBlock(context.Background(), farAhead, pubKey)
testutil.AssertLogsDoNotContain(t, hook, "Tried to sign a double proposal")
validator.ProposeBlock(context.Background(), farAhead-4, pubKey)
testutil.AssertLogsDoNotContain(t, hook, "Tried to sign a double proposal")
}
func TestProposeBlock_BroadcastsBlock(t *testing.T) {
validator, m, finish := setup(t)
defer finish()
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), //epoch
).Return(&ethpb.DomainResponse{}, nil /*err*/)
m.validatorClient.EXPECT().GetBlock(
gomock.Any(), // ctx
gomock.Any(),
).Return(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}, nil /*err*/)
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), //epoch
).Return(&ethpb.DomainResponse{}, nil /*err*/)
m.validatorClient.EXPECT().ProposeBlock(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.SignedBeaconBlock{}),
).Return(&ethpb.ProposeResponse{}, nil /*error*/)
validator.ProposeBlock(context.Background(), 1, validatorPubKey)
}
func TestProposeBlock_BroadcastsBlock_WithGraffiti(t *testing.T) {
validator, m, finish := setup(t)
defer finish()
validator.graffiti = []byte("12345678901234567890123456789012")
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), //epoch
).Return(&ethpb.DomainResponse{}, nil /*err*/)
m.validatorClient.EXPECT().GetBlock(
gomock.Any(), // ctx
gomock.Any(),
).Return(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{Graffiti: validator.graffiti}}, nil /*err*/)
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), //epoch
).Return(&ethpb.DomainResponse{}, nil /*err*/)
var sentBlock *ethpb.SignedBeaconBlock
m.validatorClient.EXPECT().ProposeBlock(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.SignedBeaconBlock{}),
).DoAndReturn(func(ctx context.Context, block *ethpb.SignedBeaconBlock) (*ethpb.ProposeResponse, error) {
sentBlock = block
return &ethpb.ProposeResponse{}, nil
})
validator.ProposeBlock(context.Background(), 1, validatorPubKey)
if string(sentBlock.Block.Body.Graffiti) != string(validator.graffiti) {
t.Errorf("Block was broadcast with the wrong graffiti field, wanted \"%v\", got \"%v\"", string(validator.graffiti), string(sentBlock.Block.Body.Graffiti))
}
}

View File

@@ -0,0 +1,875 @@
package streaming
import (
"context"
"errors"
"io/ioutil"
"reflect"
"strings"
"testing"
"time"
ptypes "github.com/gogo/protobuf/types"
"github.com/golang/mock/gomock"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/sirupsen/logrus"
logTest "github.com/sirupsen/logrus/hooks/test"
slashpb "github.com/prysmaticlabs/prysm/proto/slashing"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/mock"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
db2 "github.com/prysmaticlabs/prysm/validator/db"
"github.com/prysmaticlabs/prysm/validator/keymanager"
)
func init() {
logrus.SetLevel(logrus.DebugLevel)
logrus.SetOutput(ioutil.Discard)
}
var _ = Validator(&validator{})
const cancelledCtx = "context has been canceled"
func publicKeys(km keymanager.KeyManager) [][]byte {
keys, err := km.FetchValidatingKeys()
if err != nil {
log.WithError(err).Debug("Cannot fetch validating keys")
}
res := make([][]byte, len(keys))
for i := range keys {
res[i] = keys[i][:]
}
return res
}
func generateMockStatusResponse(pubkeys [][]byte) *ethpb.ValidatorActivationResponse {
multipleStatus := make([]*ethpb.ValidatorActivationResponse_Status, len(pubkeys))
for i, key := range pubkeys {
multipleStatus[i] = &ethpb.ValidatorActivationResponse_Status{
PublicKey: key,
Status: &ethpb.ValidatorStatusResponse{
Status: ethpb.ValidatorStatus_UNKNOWN_STATUS,
},
}
}
return &ethpb.ValidatorActivationResponse{Statuses: multipleStatus}
}
func TestWaitForChainStart_SetsChainStartGenesisTime(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := mock.NewMockBeaconNodeValidatorClient(ctrl)
v := validator{
keyManager: testKeyManager,
validatorClient: client,
}
genesis := uint64(time.Unix(1, 0).Unix())
clientStream := mock.NewMockBeaconNodeValidator_WaitForChainStartClient(ctrl)
client.EXPECT().WaitForChainStart(
gomock.Any(),
&ptypes.Empty{},
).Return(clientStream, nil)
clientStream.EXPECT().Recv().Return(
&ethpb.ChainStartResponse{
Started: true,
GenesisTime: genesis,
},
nil,
)
if err := v.WaitForChainStart(context.Background()); err != nil {
t.Fatal(err)
}
if v.genesisTime != genesis {
t.Errorf("Expected chain start time to equal %d, received %d", genesis, v.genesisTime)
}
if v.ticker == nil {
t.Error("Expected ticker to be set, received nil")
}
}
func TestWaitForChainStart_ContextCanceled(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := mock.NewMockBeaconNodeValidatorClient(ctrl)
v := validator{
keyManager: testKeyManager,
validatorClient: client,
}
genesis := uint64(time.Unix(0, 0).Unix())
clientStream := mock.NewMockBeaconNodeValidator_WaitForChainStartClient(ctrl)
client.EXPECT().WaitForChainStart(
gomock.Any(),
&ptypes.Empty{},
).Return(clientStream, nil)
clientStream.EXPECT().Recv().Return(
&ethpb.ChainStartResponse{
Started: true,
GenesisTime: genesis,
},
nil,
)
ctx, cancel := context.WithCancel(context.Background())
cancel()
err := v.WaitForChainStart(ctx)
want := cancelledCtx
if !strings.Contains(err.Error(), want) {
t.Errorf("Expected %v, received %v", want, err)
}
}
func TestWaitForChainStart_StreamSetupFails(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := mock.NewMockBeaconNodeValidatorClient(ctrl)
v := validator{
keyManager: testKeyManager,
validatorClient: client,
}
clientStream := mock.NewMockBeaconNodeValidator_WaitForChainStartClient(ctrl)
client.EXPECT().WaitForChainStart(
gomock.Any(),
&ptypes.Empty{},
).Return(clientStream, errors.New("failed stream"))
err := v.WaitForChainStart(context.Background())
want := "could not setup beacon chain ChainStart streaming client"
if !strings.Contains(err.Error(), want) {
t.Errorf("Expected %v, received %v", want, err)
}
}
func TestWaitForChainStart_ReceiveErrorFromStream(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := mock.NewMockBeaconNodeValidatorClient(ctrl)
v := validator{
keyManager: testKeyManager,
validatorClient: client,
}
clientStream := mock.NewMockBeaconNodeValidator_WaitForChainStartClient(ctrl)
client.EXPECT().WaitForChainStart(
gomock.Any(),
&ptypes.Empty{},
).Return(clientStream, nil)
clientStream.EXPECT().Recv().Return(
nil,
errors.New("fails"),
)
err := v.WaitForChainStart(context.Background())
want := "could not receive ChainStart from stream"
if !strings.Contains(err.Error(), want) {
t.Errorf("Expected %v, received %v", want, err)
}
}
func TestWaitForSynced_SetsGenesisTime(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := mock.NewMockBeaconNodeValidatorClient(ctrl)
v := validator{
keyManager: testKeyManager,
validatorClient: client,
}
genesis := uint64(time.Unix(1, 0).Unix())
clientStream := mock.NewMockBeaconNodeValidator_WaitForSyncedClient(ctrl)
client.EXPECT().WaitForSynced(
gomock.Any(),
&ptypes.Empty{},
).Return(clientStream, nil)
clientStream.EXPECT().Recv().Return(
&ethpb.SyncedResponse{
Synced: true,
GenesisTime: genesis,
},
nil,
)
if err := v.WaitForSynced(context.Background()); err != nil {
t.Fatal(err)
}
if v.genesisTime != genesis {
t.Errorf("Expected chain start time to equal %d, received %d", genesis, v.genesisTime)
}
if v.ticker == nil {
t.Error("Expected ticker to be set, received nil")
}
}
func TestWaitForSynced_ContextCanceled(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := mock.NewMockBeaconNodeValidatorClient(ctrl)
v := validator{
keyManager: testKeyManager,
validatorClient: client,
}
genesis := uint64(time.Unix(0, 0).Unix())
clientStream := mock.NewMockBeaconNodeValidator_WaitForSyncedClient(ctrl)
client.EXPECT().WaitForSynced(
gomock.Any(),
&ptypes.Empty{},
).Return(clientStream, nil)
clientStream.EXPECT().Recv().Return(
&ethpb.SyncedResponse{
Synced: true,
GenesisTime: genesis,
},
nil,
)
ctx, cancel := context.WithCancel(context.Background())
cancel()
err := v.WaitForSynced(ctx)
want := cancelledCtx
if !strings.Contains(err.Error(), want) {
t.Errorf("Expected %v, received %v", want, err)
}
}
func TestWaitForSynced_StreamSetupFails(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := mock.NewMockBeaconNodeValidatorClient(ctrl)
v := validator{
keyManager: testKeyManager,
validatorClient: client,
}
clientStream := mock.NewMockBeaconNodeValidator_WaitForSyncedClient(ctrl)
client.EXPECT().WaitForSynced(
gomock.Any(),
&ptypes.Empty{},
).Return(clientStream, errors.New("failed stream"))
err := v.WaitForSynced(context.Background())
want := "could not setup beacon chain Synced streaming client"
if !strings.Contains(err.Error(), want) {
t.Errorf("Expected %v, received %v", want, err)
}
}
func TestWaitForSynced_ReceiveErrorFromStream(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := mock.NewMockBeaconNodeValidatorClient(ctrl)
v := validator{
keyManager: testKeyManager,
validatorClient: client,
}
clientStream := mock.NewMockBeaconNodeValidator_WaitForSyncedClient(ctrl)
client.EXPECT().WaitForSynced(
gomock.Any(),
&ptypes.Empty{},
).Return(clientStream, nil)
clientStream.EXPECT().Recv().Return(
nil,
errors.New("fails"),
)
err := v.WaitForSynced(context.Background())
want := "could not receive Synced from stream"
if !strings.Contains(err.Error(), want) {
t.Errorf("Expected %v, received %v", want, err)
}
}
func TestWaitActivation_ContextCanceled(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := mock.NewMockBeaconNodeValidatorClient(ctrl)
v := validator{
keyManager: testKeyManager,
validatorClient: client,
}
clientStream := mock.NewMockBeaconNodeValidator_WaitForActivationClient(ctrl)
client.EXPECT().WaitForActivation(
gomock.Any(),
&ethpb.ValidatorActivationRequest{
PublicKeys: publicKeys(v.keyManager),
},
).Return(clientStream, nil)
clientStream.EXPECT().Recv().Return(
&ethpb.ValidatorActivationResponse{},
nil,
)
ctx, cancel := context.WithCancel(context.Background())
cancel()
err := v.WaitForActivation(ctx)
want := cancelledCtx
if !strings.Contains(err.Error(), want) {
t.Errorf("Expected %v, received %v", want, err)
}
}
func TestWaitActivation_StreamSetupFails(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := mock.NewMockBeaconNodeValidatorClient(ctrl)
v := validator{
keyManager: testKeyManager,
validatorClient: client,
}
clientStream := mock.NewMockBeaconNodeValidator_WaitForActivationClient(ctrl)
client.EXPECT().WaitForActivation(
gomock.Any(),
&ethpb.ValidatorActivationRequest{
PublicKeys: publicKeys(v.keyManager),
},
).Return(clientStream, errors.New("failed stream"))
err := v.WaitForActivation(context.Background())
want := "could not setup validator WaitForActivation streaming client"
if !strings.Contains(err.Error(), want) {
t.Errorf("Expected %v, received %v", want, err)
}
}
func TestWaitActivation_ReceiveErrorFromStream(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := mock.NewMockBeaconNodeValidatorClient(ctrl)
v := validator{
keyManager: testKeyManager,
validatorClient: client,
}
clientStream := mock.NewMockBeaconNodeValidator_WaitForActivationClient(ctrl)
client.EXPECT().WaitForActivation(
gomock.Any(),
&ethpb.ValidatorActivationRequest{
PublicKeys: publicKeys(v.keyManager),
},
).Return(clientStream, nil)
clientStream.EXPECT().Recv().Return(
nil,
errors.New("fails"),
)
err := v.WaitForActivation(context.Background())
want := "could not receive validator activation from stream"
if !strings.Contains(err.Error(), want) {
t.Errorf("Expected %v, received %v", want, err)
}
}
func TestWaitActivation_LogsActivationEpochOK(t *testing.T) {
hook := logTest.NewGlobal()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := mock.NewMockBeaconNodeValidatorClient(ctrl)
v := validator{
keyManager: testKeyManager,
validatorClient: client,
genesisTime: 1,
}
resp := generateMockStatusResponse(publicKeys(v.keyManager))
resp.Statuses[0].Status.Status = ethpb.ValidatorStatus_ACTIVE
clientStream := mock.NewMockBeaconNodeValidator_WaitForActivationClient(ctrl)
client.EXPECT().WaitForActivation(
gomock.Any(),
&ethpb.ValidatorActivationRequest{
PublicKeys: publicKeys(v.keyManager),
},
).Return(clientStream, nil)
clientStream.EXPECT().Recv().Return(
resp,
nil,
)
if err := v.WaitForActivation(context.Background()); err != nil {
t.Errorf("Could not wait for activation: %v", err)
}
testutil.AssertLogsContain(t, hook, "Validator activated")
}
func TestWaitMultipleActivation_LogsActivationEpochOK(t *testing.T) {
hook := logTest.NewGlobal()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := mock.NewMockBeaconNodeValidatorClient(ctrl)
v := validator{
keyManager: testKeyManagerThreeValidators,
validatorClient: client,
genesisTime: 1,
}
publicKeys := publicKeys(v.keyManager)
resp := generateMockStatusResponse(publicKeys)
resp.Statuses[0].Status.Status = ethpb.ValidatorStatus_ACTIVE
resp.Statuses[1].Status.Status = ethpb.ValidatorStatus_ACTIVE
clientStream := mock.NewMockBeaconNodeValidator_WaitForActivationClient(ctrl)
client.EXPECT().WaitForActivation(
gomock.Any(),
&ethpb.ValidatorActivationRequest{
PublicKeys: publicKeys,
},
).Return(clientStream, nil)
clientStream.EXPECT().Recv().Return(
resp,
nil,
)
if err := v.WaitForActivation(context.Background()); err != nil {
t.Errorf("Could not wait for activation: %v", err)
}
testutil.AssertLogsContain(t, hook, "Validator activated")
}
func TestWaitActivation_NotAllValidatorsActivatedOK(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := mock.NewMockBeaconNodeValidatorClient(ctrl)
v := validator{
keyManager: testKeyManagerThreeValidators,
validatorClient: client,
genesisTime: 1,
}
resp := generateMockStatusResponse(publicKeys(v.keyManager))
resp.Statuses[0].Status.Status = ethpb.ValidatorStatus_ACTIVE
clientStream := mock.NewMockBeaconNodeValidator_WaitForActivationClient(ctrl)
client.EXPECT().WaitForActivation(
gomock.Any(),
gomock.Any(),
).Return(clientStream, nil)
clientStream.EXPECT().Recv().Return(
&ethpb.ValidatorActivationResponse{},
nil,
)
clientStream.EXPECT().Recv().Return(
resp,
nil,
)
if err := v.WaitForActivation(context.Background()); err != nil {
t.Errorf("Could not wait for activation: %v", err)
}
}
func TestWaitSync_ContextCanceled(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
n := mock.NewMockNodeClient(ctrl)
v := validator{
node: n,
}
ctx, cancel := context.WithCancel(context.Background())
cancel()
n.EXPECT().GetSyncStatus(
gomock.Any(),
gomock.Any(),
).Return(&ethpb.SyncStatus{Syncing: true}, nil)
err := v.WaitForSync(ctx)
want := cancelledCtx
if !strings.Contains(err.Error(), want) {
t.Errorf("Expected %v, received %v", want, err)
}
}
func TestWaitSync_NotSyncing(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
n := mock.NewMockNodeClient(ctrl)
v := validator{
node: n,
}
n.EXPECT().GetSyncStatus(
gomock.Any(),
gomock.Any(),
).Return(&ethpb.SyncStatus{Syncing: false}, nil)
err := v.WaitForSync(context.Background())
if err != nil {
t.Fatal(err)
}
}
func TestWaitSync_Syncing(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
n := mock.NewMockNodeClient(ctrl)
v := validator{
node: n,
}
n.EXPECT().GetSyncStatus(
gomock.Any(),
gomock.Any(),
).Return(&ethpb.SyncStatus{Syncing: true}, nil)
n.EXPECT().GetSyncStatus(
gomock.Any(),
gomock.Any(),
).Return(&ethpb.SyncStatus{Syncing: false}, nil)
err := v.WaitForSync(context.Background())
if err != nil {
t.Fatal(err)
}
}
func TestUpdateProtections_OK(t *testing.T) {
pubKey1 := [48]byte{1}
pubKey2 := [48]byte{2}
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := mock.NewMockBeaconNodeValidatorClient(ctrl)
db := db2.SetupDB(t, [][48]byte{pubKey1, pubKey2})
newMap := make(map[uint64]uint64)
newMap[0] = params.BeaconConfig().FarFutureEpoch
newMap[1] = 0
newMap[2] = 1
history := &slashpb.AttestationHistory{
TargetToSource: newMap,
LatestEpochWritten: 2,
}
newMap2 := make(map[uint64]uint64)
newMap2[0] = params.BeaconConfig().FarFutureEpoch
newMap2[1] = params.BeaconConfig().FarFutureEpoch
newMap2[2] = params.BeaconConfig().FarFutureEpoch
newMap2[3] = 2
history2 := &slashpb.AttestationHistory{
TargetToSource: newMap,
LatestEpochWritten: 3,
}
histories := make(map[[48]byte]*slashpb.AttestationHistory)
histories[pubKey1] = history
histories[pubKey2] = history2
if err := db.SaveAttestationHistoryForPubKeys(context.Background(), histories); err != nil {
t.Fatal(err)
}
slot := params.BeaconConfig().SlotsPerEpoch
epoch := slot / params.BeaconConfig().SlotsPerEpoch
v := validator{
db: db,
keyManager: testKeyManager,
validatorClient: client,
}
v.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty)
v.dutiesByEpoch[epoch] = []*ethpb.DutiesResponse_Duty{
{
AttesterSlot: slot,
ValidatorIndex: 200,
CommitteeIndex: 100,
Committee: []uint64{0, 1, 2, 3},
PublicKey: pubKey1[:],
},
{
AttesterSlot: slot,
ValidatorIndex: 201,
CommitteeIndex: 100,
Committee: []uint64{0, 1, 2, 3},
PublicKey: pubKey2[:],
},
}
if err := v.UpdateProtections(context.Background(), slot); err != nil {
t.Fatalf("Could not update assignments: %v", err)
}
if !reflect.DeepEqual(v.attesterHistoryByPubKey[pubKey1], history) {
t.Fatalf("Expected retrieved history to be equal to %v, received %v", history, v.attesterHistoryByPubKey[pubKey1])
}
if !reflect.DeepEqual(v.attesterHistoryByPubKey[pubKey2], history2) {
t.Fatalf("Expected retrieved history to be equal to %v, received %v", history2, v.attesterHistoryByPubKey[pubKey2])
}
}
func TestSaveProtections_OK(t *testing.T) {
pubKey1 := [48]byte{1}
pubKey2 := [48]byte{2}
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := mock.NewMockBeaconNodeValidatorClient(ctrl)
db := db2.SetupDB(t, [][48]byte{pubKey1, pubKey2})
cleanHistories, err := db.AttestationHistoryForPubKeys(context.Background(), [][48]byte{pubKey1, pubKey2})
if err != nil {
t.Fatal(err)
}
v := validator{
db: db,
keyManager: testKeyManager,
validatorClient: client,
attesterHistoryByPubKey: cleanHistories,
}
history1 := cleanHistories[pubKey1]
history1 = markAttestationForTargetEpoch(history1, 0, 1)
history2 := cleanHistories[pubKey1]
history2 = markAttestationForTargetEpoch(history1, 2, 3)
cleanHistories[pubKey1] = history1
cleanHistories[pubKey2] = history2
v.attesterHistoryByPubKey = cleanHistories
if err := v.SaveProtections(context.Background()); err != nil {
t.Fatalf("Could not update assignments: %v", err)
}
savedHistories, err := db.AttestationHistoryForPubKeys(context.Background(), [][48]byte{pubKey1, pubKey2})
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(savedHistories[pubKey1], history1) {
t.Fatalf("Expected retrieved history to be equal to %v, received %v", history1, v.attesterHistoryByPubKey[pubKey1])
}
if !reflect.DeepEqual(savedHistories[pubKey2], history2) {
t.Fatalf("Expected retrieved history to be equal to %v, received %v", history2, v.attesterHistoryByPubKey[pubKey2])
}
}
func TestRolesAt_OK(t *testing.T) {
v, m, finish := setup(t)
defer finish()
sks := make([]*bls.SecretKey, 4)
sks[0] = bls.RandKey()
sks[1] = bls.RandKey()
sks[2] = bls.RandKey()
sks[3] = bls.RandKey()
v.keyManager = keymanager.NewDirect(sks)
v.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty)
v.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{
{
CommitteeIndex: 1,
AttesterSlot: 1,
PublicKey: sks[0].PublicKey().Marshal(),
},
{
CommitteeIndex: 2,
ProposerSlots: []uint64{1},
PublicKey: sks[1].PublicKey().Marshal(),
},
{
CommitteeIndex: 1,
AttesterSlot: 2,
PublicKey: sks[2].PublicKey().Marshal(),
},
{
CommitteeIndex: 2,
AttesterSlot: 1,
ProposerSlots: []uint64{1, 5},
PublicKey: sks[3].PublicKey().Marshal(),
},
}
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), // epoch
).Return(&ethpb.DomainResponse{}, nil /*err*/)
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), // epoch
).Return(&ethpb.DomainResponse{}, nil /*err*/)
roleMap, err := v.RolesAt(context.Background(), 1)
if err != nil {
t.Fatal(err)
}
if roleMap[bytesutil.ToBytes48(sks[0].PublicKey().Marshal())][0] != roleAttester {
t.Errorf("Unexpected validator role. want: roleProposer")
}
if roleMap[bytesutil.ToBytes48(sks[1].PublicKey().Marshal())][0] != roleProposer {
t.Errorf("Unexpected validator role. want: roleAttester")
}
if roleMap[bytesutil.ToBytes48(sks[2].PublicKey().Marshal())][0] != roleUnknown {
t.Errorf("Unexpected validator role. want: UNKNOWN")
}
if roleMap[bytesutil.ToBytes48(sks[3].PublicKey().Marshal())][0] != roleProposer {
t.Errorf("Unexpected validator role. want: roleProposer")
}
if roleMap[bytesutil.ToBytes48(sks[3].PublicKey().Marshal())][1] != roleAttester {
t.Errorf("Unexpected validator role. want: roleAttester")
}
if roleMap[bytesutil.ToBytes48(sks[3].PublicKey().Marshal())][2] != roleAggregator {
t.Errorf("Unexpected validator role. want: roleAggregator")
}
}
func TestRolesAt_DoesNotAssignProposer_Slot0(t *testing.T) {
v, m, finish := setup(t)
defer finish()
sks := make([]*bls.SecretKey, 3)
sks[0] = bls.RandKey()
sks[1] = bls.RandKey()
sks[2] = bls.RandKey()
v.keyManager = keymanager.NewDirect(sks)
v.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty)
v.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{
{
CommitteeIndex: 1,
AttesterSlot: 0,
ProposerSlots: []uint64{0},
PublicKey: sks[0].PublicKey().Marshal(),
},
{
CommitteeIndex: 2,
AttesterSlot: 4,
ProposerSlots: nil,
PublicKey: sks[1].PublicKey().Marshal(),
},
{
CommitteeIndex: 1,
AttesterSlot: 3,
ProposerSlots: nil,
PublicKey: sks[2].PublicKey().Marshal(),
},
}
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
gomock.Any(), // epoch
).Return(&ethpb.DomainResponse{}, nil /*err*/)
roleMap, err := v.RolesAt(context.Background(), 0)
if err != nil {
t.Fatal(err)
}
if roleMap[bytesutil.ToBytes48(sks[0].PublicKey().Marshal())][0] != roleAttester {
t.Errorf("Unexpected validator role. want: roleProposer")
}
if roleMap[bytesutil.ToBytes48(sks[1].PublicKey().Marshal())][0] != roleUnknown {
t.Errorf("Unexpected validator role. want: roleAttester")
}
if roleMap[bytesutil.ToBytes48(sks[2].PublicKey().Marshal())][0] != roleUnknown {
t.Errorf("Unexpected validator role. want: UNKNOWN")
}
}
func TestCheckAndLogValidatorStatus_OK(t *testing.T) {
nonexistentIndex := ^uint64(0)
type statusTest struct {
name string
status *ethpb.ValidatorActivationResponse_Status
log string
active bool
}
pubKeys := [][]byte{
bytesutil.Uint64ToBytes(0),
bytesutil.Uint64ToBytes(1),
bytesutil.Uint64ToBytes(2),
bytesutil.Uint64ToBytes(3),
}
tests := []statusTest{
{
name: "UNKNOWN_STATUS, no deposit found yet",
status: &ethpb.ValidatorActivationResponse_Status{
PublicKey: pubKeys[0],
Index: nonexistentIndex,
Status: &ethpb.ValidatorStatusResponse{
Status: ethpb.ValidatorStatus_UNKNOWN_STATUS,
},
},
log: "Waiting for deposit to be observed by beacon node",
},
{
name: "DEPOSITED, deposit found",
status: &ethpb.ValidatorActivationResponse_Status{
PublicKey: pubKeys[0],
Index: nonexistentIndex,
Status: &ethpb.ValidatorStatusResponse{
Status: ethpb.ValidatorStatus_DEPOSITED,
DepositInclusionSlot: 50,
Eth1DepositBlockNumber: 400,
},
},
log: "Deposit for validator received but not processed into the beacon state\" eth1DepositBlockNumber=400 expectedInclusionSlot=50",
},
{
name: "DEPOSITED into state",
status: &ethpb.ValidatorActivationResponse_Status{
PublicKey: pubKeys[0],
Index: 30,
Status: &ethpb.ValidatorStatusResponse{
Status: ethpb.ValidatorStatus_DEPOSITED,
PositionInActivationQueue: 30,
},
},
log: "Deposit processed, entering activation queue after finalization\" index=30 positionInActivationQueue=30",
},
{
name: "PENDING",
status: &ethpb.ValidatorActivationResponse_Status{
PublicKey: pubKeys[0],
Index: 50,
Status: &ethpb.ValidatorStatusResponse{
Status: ethpb.ValidatorStatus_PENDING,
ActivationEpoch: params.BeaconConfig().FarFutureEpoch,
PositionInActivationQueue: 6,
},
},
log: "Waiting to be assigned activation epoch\" index=50 positionInActivationQueue=6",
},
{
name: "PENDING",
status: &ethpb.ValidatorActivationResponse_Status{
PublicKey: pubKeys[0],
Index: 89,
Status: &ethpb.ValidatorStatusResponse{
Status: ethpb.ValidatorStatus_PENDING,
ActivationEpoch: 60,
PositionInActivationQueue: 5,
},
},
log: "Waiting for activation\" activationEpoch=60 index=89",
},
{
name: "EXITED",
status: &ethpb.ValidatorActivationResponse_Status{
PublicKey: pubKeys[0],
Status: &ethpb.ValidatorStatusResponse{
Status: ethpb.ValidatorStatus_EXITED,
},
},
log: "Validator exited",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
hook := logTest.NewGlobal()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := mock.NewMockBeaconNodeValidatorClient(ctrl)
v := validator{
keyManager: testKeyManager,
validatorClient: client,
}
v.dutiesByEpoch = make(map[uint64][]*ethpb.DutiesResponse_Duty)
v.dutiesByEpoch[0] = []*ethpb.DutiesResponse_Duty{
{
CommitteeIndex: 1,
},
}
active := v.checkAndLogValidatorStatus([]*ethpb.ValidatorActivationResponse_Status{test.status})
if active != test.active {
t.Fatalf("expected key to be active, expected %t, received %t", test.active, active)
}
testutil.AssertLogsContain(t, hook, test.log)
})
}
}

View File

@@ -22,7 +22,7 @@ import (
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/version"
"github.com/prysmaticlabs/prysm/validator/accounts"
"github.com/prysmaticlabs/prysm/validator/client"
"github.com/prysmaticlabs/prysm/validator/client/streaming"
"github.com/prysmaticlabs/prysm/validator/flags"
"github.com/prysmaticlabs/prysm/validator/node"
"github.com/sirupsen/logrus"
@@ -182,7 +182,7 @@ contract in order to activate the validator client`,
ctx, cancel := context.WithTimeout(
context.Background(), 10*time.Second /* Cancel if cannot connect to beacon node in 10 seconds. */)
defer cancel()
dialOpts := client.ConstructDialOptions(
dialOpts := streaming.ConstructDialOptions(
cliCtx.Int(cmd.GrpcMaxCallRecvMsgSizeFlag.Name),
cliCtx.String(flags.CertFlag.Name),
strings.Split(cliCtx.String(flags.GrpcHeadersFlag.Name), ","),

View File

@@ -27,7 +27,8 @@ go_library(
"//shared/prometheus:go_default_library",
"//shared/tracing:go_default_library",
"//shared/version:go_default_library",
"//validator/client:go_default_library",
"//validator/client/polling:go_default_library",
"//validator/client/streaming:go_default_library",
"//validator/db:go_default_library",
"//validator/flags:go_default_library",
"//validator/keymanager:go_default_library",

View File

@@ -22,7 +22,8 @@ import (
"github.com/prysmaticlabs/prysm/shared/prometheus"
"github.com/prysmaticlabs/prysm/shared/tracing"
"github.com/prysmaticlabs/prysm/shared/version"
"github.com/prysmaticlabs/prysm/validator/client"
"github.com/prysmaticlabs/prysm/validator/client/polling"
"github.com/prysmaticlabs/prysm/validator/client/streaming"
"github.com/prysmaticlabs/prysm/validator/db"
"github.com/prysmaticlabs/prysm/validator/flags"
"github.com/prysmaticlabs/prysm/validator/keymanager"
@@ -195,7 +196,27 @@ func (s *ValidatorClient) registerClientService(keyManager keymanager.KeyManager
if err := s.services.FetchService(&sp); err == nil {
protector = sp
}
v, err := client.NewValidatorService(context.Background(), &client.Config{
if featureconfig.Get().EnableStreamDuties {
v, err := streaming.NewValidatorService(context.Background(), &streaming.Config{
Endpoint: endpoint,
DataDir: dataDir,
KeyManager: keyManager,
LogValidatorBalances: logValidatorBalances,
EmitAccountMetrics: emitAccountMetrics,
CertFlag: cert,
GraffitiFlag: graffiti,
GrpcMaxCallRecvMsgSizeFlag: maxCallRecvMsgSize,
GrpcRetriesFlag: grpcRetries,
GrpcHeadersFlag: s.cliCtx.String(flags.GrpcHeadersFlag.Name),
Protector: protector,
})
if err != nil {
return errors.Wrap(err, "could not initialize client service")
}
return s.services.RegisterService(v)
}
v, err := polling.NewValidatorService(context.Background(), &polling.Config{
Endpoint: endpoint,
DataDir: dataDir,
KeyManager: keyManager,