mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
79 Commits
kintsugi-n
...
cp-sync-fe
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
41008f9d3d | ||
|
|
b85859acec | ||
|
|
1523fbbd74 | ||
|
|
5edcfbd785 | ||
|
|
d3c97da4e1 | ||
|
|
1d216a8737 | ||
|
|
790bf03123 | ||
|
|
ab60b1c7b2 | ||
|
|
236a5c4167 | ||
|
|
5e2229ce9d | ||
|
|
6ffba5c769 | ||
|
|
3e61763bd7 | ||
|
|
23bdce2354 | ||
|
|
d94bf32dcf | ||
|
|
7cbef104b0 | ||
|
|
cd6d0d9cf1 | ||
|
|
afbe02697d | ||
|
|
2c921ec628 | ||
|
|
0e72938914 | ||
|
|
71d55d1cff | ||
|
|
d8aa0f8827 | ||
|
|
37bc407b56 | ||
|
|
5983d0a397 | ||
|
|
85faecf2ca | ||
|
|
f42227aa04 | ||
|
|
c9d5b4ba0e | ||
|
|
fed004686b | ||
|
|
4ae7513835 | ||
|
|
1d53fd2fd3 | ||
|
|
a2c1185032 | ||
|
|
448d62d6e3 | ||
|
|
4858de7875 | ||
|
|
cd1e3f2b3e | ||
|
|
6f20d17d15 | ||
|
|
94fd99f5cd | ||
|
|
d4a420ddfd | ||
|
|
838b19e985 | ||
|
|
788338a004 | ||
|
|
39c33b82ad | ||
|
|
905e0f4c1c | ||
|
|
0ade1f121d | ||
|
|
ee52f8dff3 | ||
|
|
50159c2e48 | ||
|
|
cae58bbbd8 | ||
|
|
9b37418761 | ||
|
|
a78cdf86cc | ||
|
|
1c4ea75a18 | ||
|
|
6f4c80531c | ||
|
|
e3246922eb | ||
|
|
5962363847 | ||
|
|
5e8cf9cd28 | ||
|
|
b5ca09bce6 | ||
|
|
720ee3f2a4 | ||
|
|
3d139d35f6 | ||
|
|
ea38969af2 | ||
|
|
f753ce81cc | ||
|
|
9d678b0c47 | ||
|
|
4a4a7e97df | ||
|
|
652b1617ed | ||
|
|
42edc4f8dd | ||
|
|
70d5bc448f | ||
|
|
d1159308c8 | ||
|
|
e01298bd08 | ||
|
|
2bcb62db28 | ||
|
|
672fb72a7f | ||
|
|
7fbd5b06da | ||
|
|
0fb91437fc | ||
|
|
6e731bdedd | ||
|
|
40eb718ba2 | ||
|
|
e1840f7523 | ||
|
|
b07e1ba7a4 | ||
|
|
233171d17c | ||
|
|
2b0e132201 | ||
|
|
ad9e5331f5 | ||
|
|
341a2f1ea3 | ||
|
|
7974fe01cd | ||
|
|
ae56f643eb | ||
|
|
2ea09b621e | ||
|
|
40fedee137 |
@@ -5,21 +5,22 @@ Contact: mailto:security@prysmaticlabs.com
|
||||
Encryption: openpgp4fpr:0AE0051D647BA3C1A917AF4072E33E4DF1A5036E
|
||||
Encryption: openpgp4fpr:CD08DE68C60B82D3EE2A3F7D95452A701810FEDB
|
||||
Encryption: openpgp4fpr:317D6E91058F8F3C2303BA7756313E44581297A6
|
||||
Encryption: openpgp4fpr:79C59A585E3FD3AFFA00F5C22940A6479DA7C9EC
|
||||
Preferred-Languages: en
|
||||
Canonical: https://github.com/prysmaticlabs/prysm/tree/master/.well-known/security.txt
|
||||
-----BEGIN PGP SIGNATURE-----
|
||||
|
||||
iQIzBAEBCgAdFiEECuAFHWR7o8GpF69AcuM+TfGlA24FAl++klgACgkQcuM+TfGl
|
||||
A27rQw/6A29p1W20J0v+h218p8XWLSUpTIGLnZTxw6KqdyVXMzlsQK0YG4G2s2AB
|
||||
0LKh7Ae/Di5E0U+Z4AjUW5nc5eaCxK36GMscH9Ah0rgJwNYxEJw7/2o8ZqVT/Ip2
|
||||
+56rFihRqxFZfaCNKFVuZFaL9jKewV9FKYP38ID6/SnTcrOHiu2AoAlyZGmB03p+
|
||||
iT57SPRHatygeY4xb/gwcfREFWEv+VHGyBTv8A+6ABZDxyurboCFMERHzFICrbmk
|
||||
8UdHxxlWZDnHAbAUyAwpERC5znx6IHXQJwF8TMtu6XY6a6axT2XBOyJDF9/mZOz+
|
||||
kdkz6loX5uxaQBGLtTv6Kqf1yUGANOZ16VhHvWwL209LmHmigIVQ+qSM6c79PsW/
|
||||
vrsqdz3GBsiMC5Fq2vYgnbgzpfE8Atjn0y7E+j4R7IvwOAE/Ro/b++nqnc4YqhME
|
||||
P/yTcfGftaCrdSNnQCXeoV9JxpFM5Xy8KV3eexvNKbcgA/9DtgxL5i+s5ZJkUT9A
|
||||
+qJvoRrRyIym32ghkHgtFJKB3PLCdobeoOVRk6EnMo9zKSiSK2rZEJW8Ccbo515D
|
||||
W9qUOn3GF7lNVuUFAU/YKEdmDp/AVaViZ7vH+8aq0LC0HBkZ8XlzWnWoArS8sMhw
|
||||
fX0R9g/HMgrwNte/d0mwim5lJ2Plgv60Bh4grJqwZJeWbU0zi1U=
|
||||
=uW+X
|
||||
iQIzBAEBCgAdFiEECuAFHWR7o8GpF69AcuM+TfGlA24FAmGOfiYACgkQcuM+TfGl
|
||||
A24YwRAAiQk3w6yzqSEggrOlNoNn04iu/rWZdn5ihkQgzACXy8XH2D1gdKLChE/X
|
||||
7e5bUtgE2aCuHryQjwoKxqZakviBJFstVmHgF64rXv2zKhpqA30Mj4fI+T3zn8I+
|
||||
+FpFV0TTsxNLDx+AcR1eQ1nSayO7ImUDIfOQNDDnSZZy42Bc+F+QIGKB3aH/8bpG
|
||||
kT+bDTZrXvX+TE1gZTbAtZG8sH8g/zadoWEHIhfXUuYb0kTz+DRzAxoqU4j4Z4ee
|
||||
1zSfFAgfJwxJP4kWD7s4xkE1sBbCgGBeD6cW/C2lbcfIei+XSizLpHW3jD9dNqh4
|
||||
fLkmEspSa/LV/iXFq8nFzu/GLww4q+sQZDzzDKZyws54CrATinRitZMhzoIL0bTn
|
||||
yFZVOGHosFAMEVZ36dl1Aw2+B2W6tr2CVr9c5zfV+kup5/KZH1EmT5nYY/zFwfg2
|
||||
jYCFB5wmYeiyWZvuprgJXRArgVZLZaJxwWazlPVk4i/4vPvRgvfHqOwHCBe8DXy0
|
||||
VHPhpewwb/ECYek1KoaNQflgR8iH2GMHkC5RjhGDAt1S0AQDtite5m4ZYt1kvO9E
|
||||
k/znkv89dduhL9CKDvZvnI+DICwsTrf//4KJ8PM/qaPAJa4GvtiUU/eS/jKBivtv
|
||||
OP5dZQtX6KPc9ewqqZgn622uHSezoBidgeTkdZsJ6tw2eIu0lsY=
|
||||
=V7L0
|
||||
-----END PGP SIGNATURE-----
|
||||
|
||||
14
WORKSPACE
14
WORKSPACE
@@ -225,7 +225,7 @@ filegroup(
|
||||
url = "https://github.com/eth2-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.1.3"
|
||||
consensus_spec_version = "v1.1.6"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -241,7 +241,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "e572f8c57e2dbbaeee056a600dc9d08396010dd5134a3a95e43c540470acf6f5",
|
||||
sha256 = "58dbf798e86017b5561af38f2217b99e9fa5b6be0e928b4c73dad6040bb94d65",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -257,7 +257,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "7e2f62eaae9fd541690cc61d252556d0c5deb585ca1873aacbeb5b02d06f1362",
|
||||
sha256 = "5be19f7fca9733686ca25dad5ae306327e98830ef6354549d1ddfc56c10e0e9a",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -273,7 +273,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "05cbb89810c8acd6c57c4773ddfd167305cd4539960e9b4d7b69e1a988b35ad2",
|
||||
sha256 = "cc110528fcf7ede049e6a05788c77f4a865c3110b49508149d61bb2a992bb896",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -288,7 +288,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "0cef67b08448f7eb43bf66c464451c9e7a4852df8ef90555cca6d440e3436882",
|
||||
sha256 = "c318d7b909ab39db9cc861f645ddd364e7475a4a3425bb702ab407fad3807acd",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
@@ -362,9 +362,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "54ce527b83d092da01127f2e3816f4d5cfbab69354caba8537f1ea55889b6d7c",
|
||||
sha256 = "0a3d94428ea28916276694c517b82b364122063fdbf924f54ee9ae0bc500289f",
|
||||
urls = [
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.0-beta.4/prysm-web-ui.tar.gz",
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.1/prysm-web-ui.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
13
api/client/openapi/BUILD.bazel
Normal file
13
api/client/openapi/BUILD.bazel
Normal file
@@ -0,0 +1,13 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["client.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/api/client/openapi",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
278
api/client/openapi/client.go
Normal file
278
api/client/openapi/client.go
Normal file
@@ -0,0 +1,278 @@
|
||||
package openapi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const GET_WEAK_SUBJECTIVITY_CHECKPOINT_EPOCH_PATH = "/eth/v1alpha1/beacon/weak_subjectivity_checkpoint_epoch"
|
||||
const GET_WEAK_SUBJECTIVITY_CHECKPOINT_PATH = "/eth/v1alpha1/beacon/weak_subjectivity_checkpoint"
|
||||
const GET_SIGNED_BLOCK_PATH = "/eth/v2/beacon/blocks"
|
||||
const GET_STATE_PATH = "/eth/v2/debug/beacon/states"
|
||||
|
||||
// ClientOpt is a functional option for the Client type (http.Client wrapper)
|
||||
type ClientOpt func(*Client)
|
||||
|
||||
// WithTimeout sets the .Timeout attribute of the wrapped http.Client
|
||||
func WithTimeout(timeout time.Duration) ClientOpt {
|
||||
return func(c *Client) {
|
||||
c.c.Timeout = timeout
|
||||
}
|
||||
}
|
||||
|
||||
// Client provides a collection of helper methods for calling the beacon node OpenAPI endpoints
|
||||
type Client struct {
|
||||
c *http.Client
|
||||
host string
|
||||
scheme string
|
||||
}
|
||||
|
||||
func (c *Client) urlForPath(methodPath string) *url.URL {
|
||||
u := &url.URL{
|
||||
Scheme: c.scheme,
|
||||
Host: c.host,
|
||||
}
|
||||
u.Path = path.Join(u.Path, methodPath)
|
||||
return u
|
||||
}
|
||||
|
||||
// NewClient constructs a new client with the provided options (ex WithTimeout).
|
||||
// host is the base host + port used to construct request urls. This value can be
|
||||
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
|
||||
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
|
||||
host, err := validHostname(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &Client{
|
||||
c: &http.Client{},
|
||||
scheme: "http",
|
||||
host: host,
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(c)
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func validHostname(h string) (string, error) {
|
||||
// try to parse as url (being permissive)
|
||||
u, err := url.Parse(h)
|
||||
if err == nil && u.Host != "" {
|
||||
return u.Host, nil
|
||||
}
|
||||
// try to parse as host:port
|
||||
host, port, err := net.SplitHostPort(h)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprintf("%s:%s", host, port), nil
|
||||
}
|
||||
|
||||
type checkpointEpochResponse struct {
|
||||
Epoch string
|
||||
}
|
||||
|
||||
// GetWeakSubjectivityCheckpointEpoch retrieves the epoch for a finalized block within the weak subjectivity period.
|
||||
// The main use case for method is to find the slot that can be used to fetch a block within the subjectivity period
|
||||
// which can be used to sync (as an alternative to syncing from genesis).
|
||||
func (c *Client) GetWeakSubjectivityCheckpointEpoch() (uint64, error) {
|
||||
u := c.urlForPath(GET_WEAK_SUBJECTIVITY_CHECKPOINT_EPOCH_PATH)
|
||||
r, err := c.c.Get(u.String())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if r.StatusCode != http.StatusOK {
|
||||
return 0, non200Err(r)
|
||||
}
|
||||
jsonr := &checkpointEpochResponse{}
|
||||
err = json.NewDecoder(r.Body).Decode(jsonr)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return strconv.ParseUint(jsonr.Epoch, 10, 64)
|
||||
}
|
||||
|
||||
type wscResponse struct {
|
||||
BlockRoot string
|
||||
StateRoot string
|
||||
Epoch string
|
||||
}
|
||||
|
||||
// GetWeakSubjectivityCheckpoint calls an entirely different rpc method than GetWeakSubjectivityCheckpointEpoch
|
||||
// This endpoint is much slower, because it uses stategen to generate the BeaconState at the beginning of the
|
||||
// weak subjectivity epoch. This also results in a different set of state+block roots, because this endpoint currently
|
||||
// uses the state's latest_block_header for the block hash, while the checkpoint sync code assumes that the block
|
||||
// is from the first slot in the epoch and the state is from the subesequent slot.
|
||||
func (c *Client) GetWeakSubjectivityCheckpoint() (*ethpb.WeakSubjectivityCheckpoint, error) {
|
||||
u := c.urlForPath(GET_WEAK_SUBJECTIVITY_CHECKPOINT_PATH)
|
||||
r, err := c.c.Get(u.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if r.StatusCode != http.StatusOK {
|
||||
return nil, non200Err(r)
|
||||
}
|
||||
v := &wscResponse{}
|
||||
b := bytes.NewBuffer(nil)
|
||||
bodyReader := io.TeeReader(r.Body, b)
|
||||
err = json.NewDecoder(bodyReader).Decode(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
epoch, err := strconv.ParseUint(v.Epoch, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blockRoot, err := base64.StdEncoding.DecodeString(v.BlockRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stateRoot, err := base64.StdEncoding.DecodeString(v.StateRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.WeakSubjectivityCheckpoint{
|
||||
Epoch: types.Epoch(epoch),
|
||||
BlockRoot: blockRoot,
|
||||
StateRoot: stateRoot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TODO: fix hardcoded pb type using sniff code
|
||||
// GetBlockBySlot queries the beacon node API for the SignedBeaconBlockAltair for the given slot
|
||||
func (c *Client) GetBlockBySlot(slot uint64) (*ethpb.SignedBeaconBlockAltair, error) {
|
||||
blockPath := path.Join(GET_SIGNED_BLOCK_PATH, strconv.FormatUint(slot, 10))
|
||||
u := c.urlForPath(blockPath)
|
||||
log.Printf("requesting %s", u.String())
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Accept", "application/octet-stream")
|
||||
r, err := c.c.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if r.StatusCode != http.StatusOK {
|
||||
return nil, non200Err(r)
|
||||
}
|
||||
|
||||
v := ðpb.SignedBeaconBlockAltair{}
|
||||
b := new(bytes.Buffer)
|
||||
_, err = b.ReadFrom(r.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = v.UnmarshalSSZ(b.Bytes())
|
||||
return v, err
|
||||
}
|
||||
|
||||
// GetBlockByRoot retrieves a SignedBeaconBlockAltair with the given root via the beacon node API
|
||||
func (c *Client) GetBlockByRoot(blockHex string) (*ethpb.SignedBeaconBlockAltair, error) {
|
||||
blockPath := path.Join(GET_SIGNED_BLOCK_PATH, blockHex)
|
||||
u := c.urlForPath(blockPath)
|
||||
log.Printf("requesting %s", u.String())
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Accept", "application/octet-stream")
|
||||
r, err := c.c.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if r.StatusCode != http.StatusOK {
|
||||
return nil, non200Err(r)
|
||||
}
|
||||
|
||||
v := ðpb.SignedBeaconBlockAltair{}
|
||||
b := new(bytes.Buffer)
|
||||
_, err = b.ReadFrom(r.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = v.UnmarshalSSZ(b.Bytes())
|
||||
return v, err
|
||||
}
|
||||
|
||||
// GetStateByRoot retrieves a BeaconStateAltair with the given root via the beacon node API
|
||||
func (c *Client) GetStateByRoot(stateHex string) (*ethpb.BeaconStateAltair, error) {
|
||||
statePath := path.Join(GET_STATE_PATH, stateHex)
|
||||
u := c.urlForPath(statePath)
|
||||
log.Printf("requesting %s", u.String())
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Accept", "application/octet-stream")
|
||||
r, err := c.c.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if r.StatusCode != http.StatusOK {
|
||||
return nil, non200Err(r)
|
||||
}
|
||||
|
||||
v := ðpb.BeaconStateAltair{}
|
||||
b := new(bytes.Buffer)
|
||||
_, err = b.ReadFrom(r.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = v.UnmarshalSSZ(b.Bytes())
|
||||
return v, err
|
||||
}
|
||||
|
||||
// GetStateBySlot retrieves a BeaconStateAltair at the given slot via the beacon node API
|
||||
func (c *Client) GetStateBySlot(slot uint64) (*ethpb.BeaconStateAltair, error) {
|
||||
statePath := path.Join(GET_STATE_PATH, strconv.FormatUint(slot, 10))
|
||||
u := c.urlForPath(statePath)
|
||||
log.Printf("requesting %s", u.String())
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Accept", "application/octet-stream")
|
||||
r, err := c.c.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if r.StatusCode != http.StatusOK {
|
||||
return nil, non200Err(r)
|
||||
}
|
||||
|
||||
v := ðpb.BeaconStateAltair{}
|
||||
b := new(bytes.Buffer)
|
||||
_, err = b.ReadFrom(r.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = v.UnmarshalSSZ(b.Bytes())
|
||||
return v, err
|
||||
}
|
||||
|
||||
func non200Err(response *http.Response) error {
|
||||
bodyBytes, err := ioutil.ReadAll(response.Body)
|
||||
var body string
|
||||
if err != nil {
|
||||
body = "(Unable to read response body.)"
|
||||
} else {
|
||||
body = "response body:\n" + string(bodyBytes)
|
||||
}
|
||||
return fmt.Errorf("Got non-200 status code = %d requesting %s. %s", response.StatusCode, response.Request.URL, body)
|
||||
}
|
||||
@@ -74,10 +74,22 @@ func (m *ApiProxyMiddleware) PrepareRequestForProxying(endpoint Endpoint, req *h
|
||||
return nil
|
||||
}
|
||||
|
||||
type ClientOption func(*http.Client)
|
||||
|
||||
func WithTimeout(t time.Duration) ClientOption {
|
||||
return func(c *http.Client) {
|
||||
c.Timeout = t
|
||||
}
|
||||
}
|
||||
|
||||
// ProxyRequest proxies the request to grpc-gateway.
|
||||
func ProxyRequest(req *http.Request) (*http.Response, ErrorJson) {
|
||||
func ProxyRequest(req *http.Request, opts ...ClientOption) (*http.Response, ErrorJson) {
|
||||
// We do not use http.DefaultClient because it does not have any timeout.
|
||||
// TODO: think about exposing this as a flag, or based on endpoint
|
||||
netClient := &http.Client{Timeout: time.Minute * 2}
|
||||
for _, o := range opts {
|
||||
o(netClient)
|
||||
}
|
||||
grpcResp, err := netClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, InternalServerErrorWithMessage(err, "could not proxy request")
|
||||
|
||||
@@ -18,6 +18,7 @@ go_library(
|
||||
"receive_attestation.go",
|
||||
"receive_block.go",
|
||||
"service.go",
|
||||
"state_balance_cache.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain",
|
||||
@@ -95,6 +96,7 @@ go_test(
|
||||
"init_test.go",
|
||||
"log_test.go",
|
||||
"metrics_test.go",
|
||||
"mock_test.go",
|
||||
"process_attestation_test.go",
|
||||
"process_block_test.go",
|
||||
"receive_attestation_test.go",
|
||||
@@ -141,6 +143,7 @@ go_test(
|
||||
"chain_info_norace_test.go",
|
||||
"checktags_test.go",
|
||||
"init_test.go",
|
||||
"mock_test.go",
|
||||
"receive_block_test.go",
|
||||
"service_norace_test.go",
|
||||
],
|
||||
|
||||
@@ -56,8 +56,8 @@ func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
cp := ðpb.Checkpoint{Root: genesisRoot[:]}
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
c.finalizedCheckpt = cp
|
||||
c.genesisRoot = genesisRoot
|
||||
assert.DeepEqual(t, c.genesisRoot[:], c.FinalizedCheckpt().Root)
|
||||
c.originRoot = genesisRoot
|
||||
assert.DeepEqual(t, c.originRoot[:], c.FinalizedCheckpt().Root)
|
||||
}
|
||||
|
||||
func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
|
||||
@@ -77,8 +77,8 @@ func TestJustifiedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
genesisRoot := [32]byte{'B'}
|
||||
cp := ðpb.Checkpoint{Root: genesisRoot[:]}
|
||||
c.justifiedCheckpt = cp
|
||||
c.genesisRoot = genesisRoot
|
||||
assert.DeepEqual(t, c.genesisRoot[:], c.CurrentJustifiedCheckpt().Root)
|
||||
c.originRoot = genesisRoot
|
||||
assert.DeepEqual(t, c.originRoot[:], c.CurrentJustifiedCheckpt().Root)
|
||||
}
|
||||
|
||||
func TestPreviousJustifiedCheckpt_CanRetrieve(t *testing.T) {
|
||||
@@ -98,8 +98,8 @@ func TestPrevJustifiedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
cp := ðpb.Checkpoint{Root: genesisRoot[:]}
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
c.prevJustifiedCheckpt = cp
|
||||
c.genesisRoot = genesisRoot
|
||||
assert.DeepEqual(t, c.genesisRoot[:], c.PreviousJustifiedCheckpt().Root)
|
||||
c.originRoot = genesisRoot
|
||||
assert.DeepEqual(t, c.originRoot[:], c.PreviousJustifiedCheckpt().Root)
|
||||
}
|
||||
|
||||
func TestHeadSlot_CanRetrieve(t *testing.T) {
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
@@ -42,19 +41,16 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
|
||||
// ensure head gets its best justified info.
|
||||
if s.bestJustifiedCheckpt.Epoch > s.justifiedCheckpt.Epoch {
|
||||
s.justifiedCheckpt = s.bestJustifiedCheckpt
|
||||
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Get head from the fork choice service.
|
||||
f := s.finalizedCheckpt
|
||||
j := s.justifiedCheckpt
|
||||
// To get head before the first justified epoch, the fork choice will start with genesis root
|
||||
// To get head before the first justified epoch, the fork choice will start with origin root
|
||||
// instead of zero hashes.
|
||||
headStartRoot := bytesutil.ToBytes32(j.Root)
|
||||
if headStartRoot == params.BeaconConfig().ZeroHash {
|
||||
headStartRoot = s.genesisRoot
|
||||
headStartRoot = s.originRoot
|
||||
}
|
||||
|
||||
// In order to process head, fork choice store requires justified info.
|
||||
@@ -273,57 +269,6 @@ func (s *Service) hasHeadState() bool {
|
||||
return s.head != nil && s.head.state != nil
|
||||
}
|
||||
|
||||
// This caches justified state balances to be used for fork choice.
|
||||
func (s *Service) cacheJustifiedStateBalances(ctx context.Context, justifiedRoot [32]byte) error {
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.clearInitSyncBlocks()
|
||||
|
||||
var justifiedState state.BeaconState
|
||||
var err error
|
||||
if justifiedRoot == s.genesisRoot {
|
||||
justifiedState, err = s.cfg.BeaconDB.GenesisState(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
justifiedState, err = s.cfg.StateGen.StateByRoot(ctx, justifiedRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if justifiedState == nil || justifiedState.IsNil() {
|
||||
return errors.New("justified state can't be nil")
|
||||
}
|
||||
|
||||
epoch := time.CurrentEpoch(justifiedState)
|
||||
|
||||
justifiedBalances := make([]uint64, justifiedState.NumValidators())
|
||||
if err := justifiedState.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
if helpers.IsActiveValidatorUsingTrie(val, epoch) {
|
||||
justifiedBalances[idx] = val.EffectiveBalance()
|
||||
} else {
|
||||
justifiedBalances[idx] = 0
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.justifiedBalancesLock.Lock()
|
||||
defer s.justifiedBalancesLock.Unlock()
|
||||
s.justifiedBalances = justifiedBalances
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) getJustifiedBalances() []uint64 {
|
||||
s.justifiedBalancesLock.RLock()
|
||||
defer s.justifiedBalancesLock.RUnlock()
|
||||
return s.justifiedBalances
|
||||
}
|
||||
|
||||
// Notifies a common event feed of a new chain head event. Called right after a new
|
||||
// chain head is determined, set, and saved to disk.
|
||||
func (s *Service) notifyNewHeadEvent(
|
||||
@@ -332,8 +277,8 @@ func (s *Service) notifyNewHeadEvent(
|
||||
newHeadStateRoot,
|
||||
newHeadRoot []byte,
|
||||
) error {
|
||||
previousDutyDependentRoot := s.genesisRoot[:]
|
||||
currentDutyDependentRoot := s.genesisRoot[:]
|
||||
previousDutyDependentRoot := s.originRoot[:]
|
||||
currentDutyDependentRoot := s.originRoot[:]
|
||||
|
||||
var previousDutyEpoch types.Epoch
|
||||
currentDutyEpoch := slots.ToEpoch(newHeadSlot)
|
||||
|
||||
@@ -123,13 +123,15 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
ctx := context.Background()
|
||||
|
||||
state, _ := util.DeterministicGenesisState(t, 100)
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: r[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), state, r))
|
||||
require.NoError(t, service.cacheJustifiedStateBalances(context.Background(), r))
|
||||
require.DeepEqual(t, service.getJustifiedBalances(), state.Balances(), "Incorrect justified balances")
|
||||
balances, err := service.justifiedBalances.get(ctx, r)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, balances, state.Balances(), "Incorrect justified balances")
|
||||
}
|
||||
|
||||
func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
|
||||
@@ -156,7 +158,7 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
cfg: &config{
|
||||
StateNotifier: notifier,
|
||||
},
|
||||
genesisRoot: [32]byte{1},
|
||||
originRoot: [32]byte{1},
|
||||
}
|
||||
newHeadStateRoot := [32]byte{2}
|
||||
newHeadRoot := [32]byte{3}
|
||||
@@ -172,8 +174,8 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
Block: newHeadRoot[:],
|
||||
State: newHeadStateRoot[:],
|
||||
EpochTransition: false,
|
||||
PreviousDutyDependentRoot: srv.genesisRoot[:],
|
||||
CurrentDutyDependentRoot: srv.genesisRoot[:],
|
||||
PreviousDutyDependentRoot: srv.originRoot[:],
|
||||
CurrentDutyDependentRoot: srv.originRoot[:],
|
||||
}
|
||||
require.DeepSSZEqual(t, wanted, eventHead)
|
||||
})
|
||||
@@ -185,7 +187,7 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
cfg: &config{
|
||||
StateNotifier: notifier,
|
||||
},
|
||||
genesisRoot: genesisRoot,
|
||||
originRoot: genesisRoot,
|
||||
}
|
||||
epoch1Start, err := slots.EpochStart(1)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -25,18 +25,18 @@ func TestService_TreeHandler(t *testing.T) {
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetBalances([]uint64{params.BeaconConfig().GweiPerEth}))
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
[32]byte{'a'},
|
||||
),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
fcs := protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
[32]byte{'a'},
|
||||
)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
s, err := NewService(ctx)
|
||||
s, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = cfg
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, [32]byte{'a'}, [32]byte{'g'}, [32]byte{'c'}, 0, 0))
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.ProcessBlock(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'c'}, 0, 0))
|
||||
s.setHead([32]byte{'a'}, wrapper.WrappedPhase0SignedBeaconBlock(util.NewBeaconBlock()), headState)
|
||||
|
||||
@@ -130,6 +130,14 @@ var (
|
||||
Name: "sync_head_state_hit",
|
||||
Help: "The number of sync head state requests that are present in the cache.",
|
||||
})
|
||||
stateBalanceCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "state_balance_cache_hit",
|
||||
Help: "Count the number of state balance cache hits.",
|
||||
})
|
||||
stateBalanceCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "state_balance_cache_miss",
|
||||
Help: "Count the number of state balance cache hits.",
|
||||
})
|
||||
)
|
||||
|
||||
// reportSlotMetrics reports slot related metrics.
|
||||
|
||||
50
beacon-chain/blockchain/mock_test.go
Normal file
50
beacon-chain/blockchain/mock_test.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
)
|
||||
|
||||
func testServiceOptsWithDB(t *testing.T) []Option {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
return []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
}
|
||||
|
||||
// warning: only use these opts when you are certain there are no db calls
|
||||
// in your code path. this is a lightweight way to satisfy the stategen/beacondb
|
||||
// initialization requirements w/o the overhead of db init.
|
||||
func testServiceOptsNoDB() []Option {
|
||||
return []Option{
|
||||
withStateBalanceCache(satisfactoryStateBalanceCache()),
|
||||
}
|
||||
}
|
||||
|
||||
type mockStateByRooter struct {
|
||||
state state.BeaconState
|
||||
err error
|
||||
}
|
||||
|
||||
var _ stateByRooter = &mockStateByRooter{}
|
||||
|
||||
func (m mockStateByRooter) StateByRoot(_ context.Context, _ [32]byte) (state.BeaconState, error) {
|
||||
return m.state, m.err
|
||||
}
|
||||
|
||||
// returns an instance of the state balance cache that can be used
|
||||
// to satisfy the requirement for one in NewService, but which will
|
||||
// always return an error if used.
|
||||
func satisfactoryStateBalanceCache() *stateBalanceCache {
|
||||
err := errors.New("satisfactoryStateBalanceCache doesn't perform real caching")
|
||||
return &stateBalanceCache{stateGen: mockStateByRooter{err: err}}
|
||||
}
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
@@ -128,3 +129,18 @@ func WithSlasherAttestationsFeed(f *event.Feed) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func withStateBalanceCache(c *stateBalanceCache) Option {
|
||||
return func(s *Service) error {
|
||||
s.justifiedBalances = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithFinalizedStateAtStartUp to store finalized state at start up.
|
||||
func WithFinalizedStateAtStartUp(st state.BeaconState) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.FinalizedStateAtStartUp = st
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,14 +24,13 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, [32]byte{})),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
_, err = blockTree1(t, beaconDB, []byte{'g'})
|
||||
require.NoError(t, err)
|
||||
@@ -131,14 +130,14 @@ func TestStore_OnAttestation_Ok(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, genesisState.SetGenesisTime(uint64(time.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
@@ -157,13 +156,12 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
@@ -230,13 +228,12 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
epoch := types.Epoch(1)
|
||||
baseState, _ := util.DeterministicGenesisState(t, 1)
|
||||
@@ -268,12 +265,10 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
|
||||
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsNoDB()
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
require.NoError(t, service.verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Root: make([]byte, 32)}))
|
||||
@@ -281,12 +276,10 @@ func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
|
||||
|
||||
func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsNoDB()
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
require.NoError(t, service.verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Epoch: 1}))
|
||||
@@ -294,12 +287,10 @@ func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
|
||||
|
||||
func TestAttEpoch_NotMatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsNoDB()
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
nowTime := 2 * uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
err = service.verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Root: make([]byte, 32)})
|
||||
@@ -308,12 +299,9 @@ func TestAttEpoch_NotMatch(t *testing.T) {
|
||||
|
||||
func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
d := util.HydrateAttestationData(ðpb.AttestationData{})
|
||||
assert.ErrorContains(t, "signed beacon block can't be nil", service.verifyBeaconBlock(ctx, d))
|
||||
@@ -321,12 +309,10 @@ func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
|
||||
|
||||
func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 2
|
||||
@@ -340,12 +326,10 @@ func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
|
||||
|
||||
func TestVerifyBeaconBlock_OK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 2
|
||||
@@ -361,10 +345,14 @@ func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
@@ -387,12 +375,10 @@ func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
|
||||
|
||||
func TestVerifyFinalizedConsistency_OK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
@@ -415,12 +401,10 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) {
|
||||
|
||||
func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
|
||||
@@ -151,7 +151,12 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
s.finalizedCheckpt = postState.FinalizedCheckpoint()
|
||||
}
|
||||
|
||||
if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil {
|
||||
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", s.justifiedCheckpt.Root)
|
||||
return errors.Wrap(err, msg)
|
||||
}
|
||||
if err := s.updateHead(ctx, balances); err != nil {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
|
||||
@@ -255,12 +260,12 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
|
||||
|
||||
jCheckpoints := make([]*ethpb.Checkpoint, len(blks))
|
||||
fCheckpoints := make([]*ethpb.Checkpoint, len(blks))
|
||||
sigSet := &bls.SignatureSet{
|
||||
sigSet := &bls.SignatureBatch{
|
||||
Signatures: [][]byte{},
|
||||
PublicKeys: []bls.PublicKey{},
|
||||
Messages: [][32]byte{},
|
||||
}
|
||||
var set *bls.SignatureSet
|
||||
var set *bls.SignatureBatch
|
||||
boundaries := make(map[[32]byte]state.BeaconState)
|
||||
for i, b := range blks {
|
||||
set, preState, err = transition.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b)
|
||||
|
||||
@@ -135,7 +135,24 @@ func (s *Service) verifyBlkFinalizedSlot(b block.BeaconBlock) error {
|
||||
// shouldUpdateCurrentJustified prevents bouncing attack, by only update conflicting justified
|
||||
// checkpoints in the fork choice if in the early slots of the epoch.
|
||||
// Otherwise, delay incorporation of new justified checkpoint until next epoch boundary.
|
||||
// See https://ethresear.ch/t/prevention-of-bouncing-attack-on-ffg/6114 for more detailed analysis and discussion.
|
||||
//
|
||||
// Spec code:
|
||||
// def should_update_justified_checkpoint(store: Store, new_justified_checkpoint: Checkpoint) -> bool:
|
||||
// """
|
||||
// To address the bouncing attack, only update conflicting justified
|
||||
// checkpoints in the fork choice if in the early slots of the epoch.
|
||||
// Otherwise, delay incorporation of new justified checkpoint until next epoch boundary.
|
||||
//
|
||||
// See https://ethresear.ch/t/prevention-of-bouncing-attack-on-ffg/6114 for more detailed analysis and discussion.
|
||||
// """
|
||||
// if compute_slots_since_epoch_start(get_current_slot(store)) < SAFE_SLOTS_TO_UPDATE_JUSTIFIED:
|
||||
// return True
|
||||
//
|
||||
// justified_slot = compute_start_slot_at_epoch(store.justified_checkpoint.epoch)
|
||||
// if not get_ancestor(store, new_justified_checkpoint.root, justified_slot) == store.justified_checkpoint.root:
|
||||
// return False
|
||||
//
|
||||
// return True
|
||||
func (s *Service) shouldUpdateCurrentJustified(ctx context.Context, newJustifiedCheckpt *ethpb.Checkpoint) (bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.shouldUpdateCurrentJustified")
|
||||
defer span.End()
|
||||
@@ -143,51 +160,20 @@ func (s *Service) shouldUpdateCurrentJustified(ctx context.Context, newJustified
|
||||
if slots.SinceEpochStarts(s.CurrentSlot()) < params.BeaconConfig().SafeSlotsToUpdateJustified {
|
||||
return true, nil
|
||||
}
|
||||
var newJustifiedBlockSigned block.SignedBeaconBlock
|
||||
justifiedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(newJustifiedCheckpt.Root))
|
||||
var err error
|
||||
if s.hasInitSyncBlock(justifiedRoot) {
|
||||
newJustifiedBlockSigned = s.getInitSyncBlock(justifiedRoot)
|
||||
} else {
|
||||
newJustifiedBlockSigned, err = s.cfg.BeaconDB.Block(ctx, justifiedRoot)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
if newJustifiedBlockSigned == nil || newJustifiedBlockSigned.IsNil() || newJustifiedBlockSigned.Block().IsNil() {
|
||||
return false, errors.New("nil new justified block")
|
||||
}
|
||||
|
||||
newJustifiedBlock := newJustifiedBlockSigned.Block()
|
||||
jSlot, err := slots.EpochStart(s.justifiedCheckpt.Epoch)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if newJustifiedBlock.Slot() <= jSlot {
|
||||
return false, nil
|
||||
}
|
||||
var justifiedBlockSigned block.SignedBeaconBlock
|
||||
cachedJustifiedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
||||
if s.hasInitSyncBlock(cachedJustifiedRoot) {
|
||||
justifiedBlockSigned = s.getInitSyncBlock(cachedJustifiedRoot)
|
||||
} else {
|
||||
justifiedBlockSigned, err = s.cfg.BeaconDB.Block(ctx, cachedJustifiedRoot)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
if justifiedBlockSigned == nil || justifiedBlockSigned.IsNil() || justifiedBlockSigned.Block().IsNil() {
|
||||
return false, errors.New("nil justified block")
|
||||
}
|
||||
justifiedBlock := justifiedBlockSigned.Block()
|
||||
b, err := s.ancestor(ctx, justifiedRoot[:], justifiedBlock.Slot())
|
||||
justifiedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(newJustifiedCheckpt.Root))
|
||||
b, err := s.ancestor(ctx, justifiedRoot[:], jSlot)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !bytes.Equal(b, s.justifiedCheckpt.Root) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -207,9 +193,6 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
|
||||
if canUpdate {
|
||||
s.prevJustifiedCheckpt = s.justifiedCheckpt
|
||||
s.justifiedCheckpt = cpt
|
||||
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -220,12 +203,13 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
|
||||
// This method does not have defense against fork choice bouncing attack, which is why it's only recommend to be used during initial syncing.
|
||||
func (s *Service) updateJustifiedInitSync(ctx context.Context, cp *ethpb.Checkpoint) error {
|
||||
s.prevJustifiedCheckpt = s.justifiedCheckpt
|
||||
s.justifiedCheckpt = cp
|
||||
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
|
||||
|
||||
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cp); err != nil {
|
||||
return err
|
||||
}
|
||||
s.justifiedCheckpt = cp
|
||||
|
||||
return s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cp)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) error {
|
||||
@@ -344,7 +328,9 @@ func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state state.
|
||||
if !attestation.CheckPointIsEqual(s.justifiedCheckpt, state.CurrentJustifiedCheckpoint()) {
|
||||
if state.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
|
||||
s.justifiedCheckpt = state.CurrentJustifiedCheckpoint()
|
||||
return s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
||||
// we don't need to check if the previous justified checkpoint was an ancestor since the new
|
||||
// finalized checkpoint is overriding it.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update justified if store justified is not in chain with finalized check point.
|
||||
@@ -359,9 +345,6 @@ func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state state.
|
||||
}
|
||||
if !bytes.Equal(anc, s.finalizedCheckpt.Root) {
|
||||
s.justifiedCheckpt = state.CurrentJustifiedCheckpoint()
|
||||
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -456,7 +439,7 @@ func (s *Service) deletePoolAtts(atts []*ethpb.Attestation) error {
|
||||
// fork choice justification routine.
|
||||
func (s *Service) ensureRootNotZeros(root [32]byte) [32]byte {
|
||||
if root == params.BeaconConfig().ZeroHash {
|
||||
return s.genesisRoot
|
||||
return s.originRoot
|
||||
}
|
||||
return root
|
||||
}
|
||||
|
||||
@@ -35,16 +35,17 @@ import (
|
||||
|
||||
func TestStore_OnBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
|
||||
@@ -134,13 +135,12 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -186,14 +186,12 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
|
||||
func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
params.UseMinimalConfig()
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
service.genesisTime = time.Now()
|
||||
|
||||
update, err := service.shouldUpdateCurrentJustified(ctx, ðpb.Checkpoint{Root: make([]byte, 32)})
|
||||
@@ -221,14 +219,13 @@ func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
|
||||
|
||||
func TestShouldUpdateJustified_ReturnFalse(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
params.UseMinimalConfig()
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
|
||||
lastJustifiedBlk := util.NewBeaconBlock()
|
||||
lastJustifiedBlk.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, 32)
|
||||
lastJustifiedRoot, err := lastJustifiedBlk.Block.HashTreeRoot()
|
||||
@@ -253,13 +250,12 @@ func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{Slot: 1, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
|
||||
require.NoError(t, err)
|
||||
@@ -287,13 +283,12 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -325,10 +320,13 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}
|
||||
service, err := NewService(ctx)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, [32]byte{})),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
signedBlock := util.NewBeaconBlock()
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(signedBlock)))
|
||||
@@ -359,10 +357,12 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: make([]byte, 32)}
|
||||
|
||||
@@ -398,10 +398,12 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: make([]byte, 32)}
|
||||
|
||||
@@ -440,10 +442,12 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
// Set finalized epoch to 1.
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Epoch: 1}
|
||||
@@ -584,7 +588,8 @@ func TestCurrentSlot_HandlesOverflow(t *testing.T) {
|
||||
}
|
||||
func TestAncestorByDB_CtxErr(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
cancel()
|
||||
@@ -596,10 +601,14 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b1 := util.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
@@ -640,10 +649,9 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
|
||||
|
||||
func TestAncestor_CanUseForkchoice(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b1 := util.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
@@ -680,10 +688,14 @@ func TestAncestor_CanUseDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b1 := util.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
@@ -718,14 +730,13 @@ func TestAncestor_CanUseDB(t *testing.T) {
|
||||
|
||||
func TestEnsureRootNotZeroHashes(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &config{}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsNoDB()
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
service.genesisRoot = [32]byte{'a'}
|
||||
service.originRoot = [32]byte{'a'}
|
||||
|
||||
r := service.ensureRootNotZeros(params.BeaconConfig().ZeroHash)
|
||||
assert.Equal(t, service.genesisRoot, r, "Did not get wanted justified root")
|
||||
assert.Equal(t, service.originRoot, r, "Did not get wanted justified root")
|
||||
root := [32]byte{'b'}
|
||||
r = service.ensureRootNotZeros(root)
|
||||
assert.Equal(t, root, r, "Did not get wanted justified root")
|
||||
@@ -733,6 +744,12 @@ func TestEnsureRootNotZeroHashes(t *testing.T) {
|
||||
|
||||
func TestFinalizedImpliesNewJustified(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
ctx := context.Background()
|
||||
type args struct {
|
||||
cachedCheckPoint *ethpb.Checkpoint
|
||||
@@ -774,9 +791,8 @@ func TestFinalizedImpliesNewJustified(t *testing.T) {
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(test.args.stateCheckPoint))
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service.justifiedCheckpt = test.args.cachedCheckPoint
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo(test.want.Root, 32)}))
|
||||
genesisState, err := util.NewBeaconState()
|
||||
@@ -813,6 +829,12 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
@@ -867,9 +889,8 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{
|
||||
Root: tt.args.finalizedRoot[:],
|
||||
}
|
||||
@@ -883,12 +904,10 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdateJustifiedInitSync(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
gBlk := util.NewBeaconBlock()
|
||||
gRoot, err := gBlk.Block.HashTreeRoot()
|
||||
@@ -898,7 +917,7 @@ func TestUpdateJustifiedInitSync(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: gRoot[:]}))
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, beaconState, gRoot))
|
||||
service.genesisRoot = gRoot
|
||||
service.originRoot = gRoot
|
||||
currentCp := ðpb.Checkpoint{Epoch: 1}
|
||||
service.justifiedCheckpt = currentCp
|
||||
newCp := ðpb.Checkpoint{Epoch: 2, Root: gRoot[:]}
|
||||
@@ -914,10 +933,9 @@ func TestUpdateJustifiedInitSync(t *testing.T) {
|
||||
|
||||
func TestHandleEpochBoundary_BadMetrics(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &config{}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsNoDB()
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
@@ -929,10 +947,9 @@ func TestHandleEpochBoundary_BadMetrics(t *testing.T) {
|
||||
|
||||
func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &config{}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsNoDB()
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
s, _ := util.DeterministicGenesisState(t, 1024)
|
||||
service.head = &head{state: s}
|
||||
@@ -944,18 +961,18 @@ func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
|
||||
func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
DepositCache: depositCache,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
@@ -989,18 +1006,12 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
|
||||
func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
DepositCache: depositCache,
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
opts = append(opts, WithDepositCache(depositCache))
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
gs, _ := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
|
||||
@@ -7,6 +7,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
@@ -14,8 +18,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// AttestationStateFetcher allows for retrieving a beacon state corresponding to the block
|
||||
@@ -103,39 +105,52 @@ func (s *Service) VerifyFinalizedConsistency(ctx context.Context, root []byte) e
|
||||
}
|
||||
|
||||
// This routine processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
func (s *Service) processAttestationsRoutine(subscribedToStateEvents chan<- struct{}) {
|
||||
func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
|
||||
// Wait for state to be initialized.
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
subscribedToStateEvents <- struct{}{}
|
||||
<-stateChannel
|
||||
stateSub.Unsubscribe()
|
||||
|
||||
if s.genesisTime.IsZero() {
|
||||
log.Warn("ProcessAttestations routine waiting for genesis time")
|
||||
for s.genesisTime.IsZero() {
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
log.Warn("Genesis time received, now available to process attestations")
|
||||
}
|
||||
|
||||
st := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
|
||||
for {
|
||||
stateSub := stateFeed.Subscribe(stateChannel)
|
||||
go func() {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
stateSub.Unsubscribe()
|
||||
return
|
||||
case <-st.C():
|
||||
// Continue when there's no fork choice attestation, there's nothing to process and update head.
|
||||
// This covers the condition when the node is still initial syncing to the head of the chain.
|
||||
if s.cfg.AttPool.ForkchoiceAttestationCount() == 0 {
|
||||
continue
|
||||
case <-stateChannel:
|
||||
stateSub.Unsubscribe()
|
||||
break
|
||||
}
|
||||
|
||||
if s.genesisTime.IsZero() {
|
||||
log.Warn("ProcessAttestations routine waiting for genesis time")
|
||||
for s.genesisTime.IsZero() {
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
s.processAttestations(s.ctx)
|
||||
if err := s.updateHead(s.ctx, s.getJustifiedBalances()); err != nil {
|
||||
log.Warnf("Resolving fork due to new attestation: %v", err)
|
||||
log.Warn("Genesis time received, now available to process attestations")
|
||||
}
|
||||
|
||||
st := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case <-st.C():
|
||||
// Continue when there's no fork choice attestation, there's nothing to process and update head.
|
||||
// This covers the condition when the node is still initial syncing to the head of the chain.
|
||||
if s.cfg.AttPool.ForkchoiceAttestationCount() == 0 {
|
||||
continue
|
||||
}
|
||||
s.processAttestations(s.ctx)
|
||||
|
||||
balances, err := s.justifiedBalances.get(s.ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
||||
if err != nil {
|
||||
log.Errorf("Unable to get justified balances for root %v w/ error %s", s.justifiedCheckpt.Root, err)
|
||||
continue
|
||||
}
|
||||
if err := s.updateHead(s.ctx, balances); err != nil {
|
||||
log.Warnf("Resolving fork due to new attestation: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
|
||||
@@ -9,9 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -42,12 +40,10 @@ func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) {
|
||||
|
||||
func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
@@ -71,12 +67,10 @@ func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
|
||||
|
||||
func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
@@ -101,18 +95,12 @@ func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
|
||||
func TestProcessAttestations_Ok(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
opts = append(opts, WithAttestationPool(attestations.NewPool()))
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
AttPool: attestations.NewPool(),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
service.cfg = cfg
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
|
||||
@@ -124,21 +124,16 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
genesisBlockRoot := bytesutil.ToBytes32(nil)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
genesisBlockRoot,
|
||||
),
|
||||
AttPool: attestations.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
s, err := NewService(ctx)
|
||||
s, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = cfg
|
||||
require.NoError(t, s.saveGenesisData(ctx, genesis))
|
||||
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -166,21 +161,17 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
genesisBlockRoot := bytesutil.ToBytes32(nil)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
genesisBlockRoot,
|
||||
),
|
||||
AttPool: attestations.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
s, err := NewService(ctx)
|
||||
|
||||
s, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = cfg
|
||||
require.NoError(t, s.saveGenesisData(ctx, genesis))
|
||||
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -250,19 +241,14 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
genesisBlockRoot, err := genesis.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
genesisBlockRoot,
|
||||
),
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
s, err := NewService(ctx)
|
||||
s, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = cfg
|
||||
err = s.saveGenesisData(ctx, genesis)
|
||||
require.NoError(t, err)
|
||||
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
@@ -287,9 +273,10 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_HasInitSyncBlock(t *testing.T) {
|
||||
s, err := NewService(context.Background())
|
||||
opts := testServiceOptsNoDB()
|
||||
opts = append(opts, WithStateNotifier(&blockchainTesting.MockStateNotifier{}))
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = &config{StateNotifier: &blockchainTesting.MockStateNotifier{}}
|
||||
r := [32]byte{'a'}
|
||||
if s.HasInitSyncBlock(r) {
|
||||
t.Error("Should not have block")
|
||||
@@ -301,11 +288,10 @@ func TestService_HasInitSyncBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := NewService(context.Background())
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = &config{StateGen: stategen.New(beaconDB)}
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{}
|
||||
@@ -315,11 +301,10 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := NewService(context.Background())
|
||||
opts := testServiceOptsWithDB(t)
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = &config{StateGen: stategen.New(beaconDB)}
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{}
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
s.genesisTime = time.Now()
|
||||
@@ -329,11 +314,10 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := NewService(context.Background())
|
||||
opts := testServiceOptsWithDB(t)
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = &config{StateGen: stategen.New(beaconDB)}
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{Epoch: 10000000}
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
prysmTime "github.com/prysmaticlabs/prysm/time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
@@ -45,13 +47,14 @@ const headSyncMinEpochsAfterCheckpoint = 128
|
||||
// Service represents a service that handles the internal
|
||||
// logic of managing the full PoS beacon chain.
|
||||
type Service struct {
|
||||
cfg *config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
genesisTime time.Time
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
genesisRoot [32]byte
|
||||
cfg *config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
genesisTime time.Time
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
// originRoot is the genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
|
||||
originRoot [32]byte
|
||||
justifiedCheckpt *ethpb.Checkpoint
|
||||
prevJustifiedCheckpt *ethpb.Checkpoint
|
||||
bestJustifiedCheckpt *ethpb.Checkpoint
|
||||
@@ -62,9 +65,9 @@ type Service struct {
|
||||
checkpointStateCache *cache.CheckpointStateCache
|
||||
initSyncBlocks map[[32]byte]block.SignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
justifiedBalances []uint64
|
||||
justifiedBalancesLock sync.RWMutex
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
//justifiedBalances []uint64
|
||||
justifiedBalances *stateBalanceCache
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
}
|
||||
|
||||
// config options for the service.
|
||||
@@ -84,6 +87,7 @@ type config struct {
|
||||
StateGen *stategen.State
|
||||
SlasherAttestationsFeed *event.Feed
|
||||
WeakSubjectivityCheckpt *ethpb.Checkpoint
|
||||
FinalizedStateAtStartUp state.BeaconState
|
||||
}
|
||||
|
||||
// NewService instantiates a new block service instance that will
|
||||
@@ -96,7 +100,6 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
boundaryRoots: [][32]byte{},
|
||||
checkpointStateCache: cache.NewCheckpointStateCache(),
|
||||
initSyncBlocks: make(map[[32]byte]block.SignedBeaconBlock),
|
||||
justifiedBalances: make([]uint64, 0),
|
||||
cfg: &config{},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
@@ -105,6 +108,12 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
}
|
||||
}
|
||||
var err error
|
||||
if srv.justifiedBalances == nil {
|
||||
srv.justifiedBalances, err = newStateBalanceCache(srv.cfg.StateGen)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
srv.wsVerifier, err = NewWeakSubjectivityVerifier(srv.cfg.WeakSubjectivityCheckpt, srv.cfg.BeaconDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -114,148 +123,249 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
|
||||
// Start a blockchain service's main event loop.
|
||||
func (s *Service) Start() {
|
||||
// For running initial sync with state cache, in an event of restart, we use
|
||||
// last finalized check point as start point to sync instead of head
|
||||
// state. This is because we no longer save state every slot during sync.
|
||||
cp, err := s.cfg.BeaconDB.FinalizedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch finalized cp: %v", err)
|
||||
}
|
||||
saved := s.cfg.FinalizedStateAtStartUp
|
||||
|
||||
r := bytesutil.ToBytes32(cp.Root)
|
||||
// Before the first finalized epoch, in the current epoch,
|
||||
// the finalized root is defined as zero hashes instead of genesis root hash.
|
||||
// We want to use genesis root to retrieve for state.
|
||||
if r == params.BeaconConfig().ZeroHash {
|
||||
genesisBlock, err := s.cfg.BeaconDB.GenesisBlock(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch finalized cp: %v", err)
|
||||
if saved != nil && !saved.IsNil() {
|
||||
if err := s.startFromSavedState(saved); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if genesisBlock != nil && !genesisBlock.IsNil() {
|
||||
r, err = genesisBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
log.Fatalf("Could not tree hash genesis block: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
beaconState, err := s.cfg.StateGen.StateByRoot(s.ctx, r)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch beacon state by root: %v", err)
|
||||
}
|
||||
|
||||
// Make sure that attestation processor is subscribed and ready for state initializing event.
|
||||
attestationProcessorSubscribed := make(chan struct{}, 1)
|
||||
|
||||
// If the chain has already been initialized, simply start the block processing routine.
|
||||
if beaconState != nil && !beaconState.IsNil() {
|
||||
log.Info("Blockchain data already exists in DB, initializing...")
|
||||
s.genesisTime = time.Unix(int64(beaconState.GenesisTime()), 0)
|
||||
s.cfg.AttService.SetGenesisTime(beaconState.GenesisTime())
|
||||
if err := s.initializeChainInfo(s.ctx); err != nil {
|
||||
log.Fatalf("Could not set up chain info: %v", err)
|
||||
}
|
||||
|
||||
// We start a counter to genesis, if needed.
|
||||
gState, err := s.cfg.BeaconDB.GenesisState(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not retrieve genesis state: %v", err)
|
||||
}
|
||||
gRoot, err := gState.HashTreeRoot(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not hash tree root genesis state: %v", err)
|
||||
}
|
||||
go slots.CountdownToGenesis(s.ctx, s.genesisTime, uint64(gState.NumValidators()), gRoot)
|
||||
|
||||
justifiedCheckpoint, err := s.cfg.BeaconDB.JustifiedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not get justified checkpoint: %v", err)
|
||||
}
|
||||
finalizedCheckpoint, err := s.cfg.BeaconDB.FinalizedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not get finalized checkpoint: %v", err)
|
||||
}
|
||||
|
||||
// Resume fork choice.
|
||||
s.justifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint)
|
||||
if err := s.cacheJustifiedStateBalances(s.ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(s.justifiedCheckpt.Root))); err != nil {
|
||||
log.Fatalf("Could not cache justified state balances: %v", err)
|
||||
}
|
||||
s.prevJustifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint)
|
||||
s.bestJustifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint)
|
||||
s.finalizedCheckpt = ethpb.CopyCheckpoint(finalizedCheckpoint)
|
||||
s.prevFinalizedCheckpt = ethpb.CopyCheckpoint(finalizedCheckpoint)
|
||||
s.resumeForkChoice(justifiedCheckpoint, finalizedCheckpoint)
|
||||
|
||||
ss, err := slots.EpochStart(s.finalizedCheckpt.Epoch)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not get start slot of finalized epoch: %v", err)
|
||||
}
|
||||
h := s.headBlock().Block()
|
||||
if h.Slot() > ss {
|
||||
log.WithFields(logrus.Fields{
|
||||
"startSlot": ss,
|
||||
"endSlot": h.Slot(),
|
||||
}).Info("Loading blocks to fork choice store, this may take a while.")
|
||||
if err := s.fillInForkChoiceMissingBlocks(s.ctx, h, s.finalizedCheckpt, s.justifiedCheckpt); err != nil {
|
||||
log.Fatalf("Could not fill in fork choice store missing blocks: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// not attempting to save initial sync blocks here, because there shouldn't be until
|
||||
// after the statefeed.Initialized event is fired (below)
|
||||
if err := s.wsVerifier.VerifyWeakSubjectivity(s.ctx, s.finalizedCheckpt.Epoch); err != nil {
|
||||
// Exit run time if the node failed to verify weak subjectivity checkpoint.
|
||||
log.Fatalf("could not verify initial checkpoint provided for chain sync, with err=: %v", err)
|
||||
}
|
||||
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: s.genesisTime,
|
||||
GenesisValidatorsRoot: beaconState.GenesisValidatorRoot(),
|
||||
},
|
||||
})
|
||||
} else {
|
||||
log.Info("Waiting to reach the validator deposit threshold to start the beacon chain...")
|
||||
if s.cfg.ChainStartFetcher == nil {
|
||||
log.Fatal("Not configured web3Service for POW chain")
|
||||
return // return need for TestStartUninitializedChainWithoutConfigPOWChain.
|
||||
if err := s.startFromPOWChain(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
go func() {
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
<-attestationProcessorSubscribed
|
||||
for {
|
||||
select {
|
||||
case event := <-stateChannel:
|
||||
if event.Type == statefeed.ChainStarted {
|
||||
data, ok := event.Data.(*statefeed.ChainStartedData)
|
||||
if !ok {
|
||||
log.Error("event data is not type *statefeed.ChainStartedData")
|
||||
return
|
||||
}
|
||||
log.WithField("starttime", data.StartTime).Debug("Received chain start event")
|
||||
s.processChainStartTime(s.ctx, data.StartTime)
|
||||
return
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("Subscription to state notifier failed")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
go s.processAttestationsRoutine(attestationProcessorSubscribed)
|
||||
}
|
||||
|
||||
// processChainStartTime initializes a series of deposits from the ChainStart deposits in the eth1
|
||||
// Stop the blockchain service's main event loop and associated goroutines.
|
||||
func (s *Service) Stop() error {
|
||||
defer s.cancel()
|
||||
|
||||
if s.cfg.StateGen != nil && s.head != nil && s.head.state != nil {
|
||||
if err := s.cfg.StateGen.ForceCheckpoint(s.ctx, s.head.state.FinalizedCheckpoint().Root); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Save initial sync cached blocks to the DB before stop.
|
||||
return s.cfg.BeaconDB.SaveBlocks(s.ctx, s.getInitSyncBlocks())
|
||||
}
|
||||
|
||||
// Status always returns nil unless there is an error condition that causes
|
||||
// this service to be unhealthy.
|
||||
func (s *Service) Status() error {
|
||||
if s.originRoot == params.BeaconConfig().ZeroHash {
|
||||
return errors.New("genesis state has not been created")
|
||||
}
|
||||
if runtime.NumGoroutine() > s.cfg.MaxRoutines {
|
||||
return fmt.Errorf("too many goroutines %d", runtime.NumGoroutine())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) startFromSavedState(saved state.BeaconState) error {
|
||||
log.Info("Blockchain data already exists in DB, initializing...")
|
||||
s.genesisTime = time.Unix(int64(saved.GenesisTime()), 0)
|
||||
s.cfg.AttService.SetGenesisTime(saved.GenesisTime())
|
||||
|
||||
originRoot, err := s.originRootFromSavedState(s.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.originRoot = originRoot
|
||||
|
||||
if err := s.initializeHeadFromDB(s.ctx); err != nil {
|
||||
return errors.Wrap(err, "could not set up chain info")
|
||||
}
|
||||
spawnCountdownIfPreGenesis(s.ctx, s.genesisTime, s.cfg.BeaconDB)
|
||||
|
||||
justified, err := s.cfg.BeaconDB.JustifiedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get justified checkpoint")
|
||||
}
|
||||
s.prevJustifiedCheckpt = ethpb.CopyCheckpoint(justified)
|
||||
s.bestJustifiedCheckpt = ethpb.CopyCheckpoint(justified)
|
||||
s.justifiedCheckpt = ethpb.CopyCheckpoint(justified)
|
||||
|
||||
finalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint")
|
||||
}
|
||||
s.prevFinalizedCheckpt = ethpb.CopyCheckpoint(finalized)
|
||||
s.finalizedCheckpt = ethpb.CopyCheckpoint(finalized)
|
||||
|
||||
store := protoarray.New(justified.Epoch, finalized.Epoch, bytesutil.ToBytes32(finalized.Root))
|
||||
s.cfg.ForkChoiceStore = store
|
||||
|
||||
ss, err := slots.EpochStart(s.finalizedCheckpt.Epoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get start slot of finalized epoch")
|
||||
}
|
||||
h := s.headBlock().Block()
|
||||
if h.Slot() > ss {
|
||||
log.WithFields(logrus.Fields{
|
||||
"startSlot": ss,
|
||||
"endSlot": h.Slot(),
|
||||
}).Info("Loading blocks to fork choice store, this may take a while.")
|
||||
if err := s.fillInForkChoiceMissingBlocks(s.ctx, h, s.finalizedCheckpt, s.justifiedCheckpt); err != nil {
|
||||
return errors.Wrap(err, "could not fill in fork choice store missing blocks")
|
||||
}
|
||||
}
|
||||
|
||||
// not attempting to save initial sync blocks here, because there shouldn't be any until
|
||||
// after the statefeed.Initialized event is fired (below)
|
||||
if err := s.wsVerifier.VerifyWeakSubjectivity(s.ctx, s.finalizedCheckpt.Epoch); err != nil {
|
||||
// Exit run time if the node failed to verify weak subjectivity checkpoint.
|
||||
return errors.Wrap(err, "could not verify initial checkpoint provided for chain sync")
|
||||
}
|
||||
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: s.genesisTime,
|
||||
GenesisValidatorsRoot: saved.GenesisValidatorRoot(),
|
||||
},
|
||||
})
|
||||
|
||||
s.spawnProcessAttestationsRoutine(s.cfg.StateNotifier.StateFeed())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) originRootFromSavedState(ctx context.Context) ([32]byte, error) {
|
||||
// first check if we have started from checkpoint sync and have a root
|
||||
originRoot, err := s.cfg.BeaconDB.OriginCheckpointRoot(ctx)
|
||||
if err == nil {
|
||||
return originRoot, nil
|
||||
}
|
||||
// if it's an ErrNotFound, that probably just means the node was synced from genesis
|
||||
if !errors.Is(err, db.ErrNotFound) {
|
||||
return originRoot, errors.Wrap(err, "could not retrieve checkpoint sync chain origin data from db")
|
||||
}
|
||||
|
||||
// if the chain started from a genesis state, we should have a value for GenesisBlock
|
||||
genesisBlock, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
return originRoot, errors.Wrap(err, "could not get genesis block from db")
|
||||
}
|
||||
if err := helpers.BeaconBlockIsNil(genesisBlock); err != nil {
|
||||
return originRoot, err
|
||||
}
|
||||
genesisBlkRoot, err := genesisBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return genesisBlkRoot, errors.Wrap(err, "could not get signing root of genesis block")
|
||||
}
|
||||
return genesisBlkRoot, nil
|
||||
}
|
||||
|
||||
// initializeHeadFromDB uses the finalized checkpoint and head block found in the database to set the current head
|
||||
// note that this may block until stategen replays blocks between the finalized and head blocks
|
||||
// if the head sync flag was specified and the gap between the finalized and head blocks is at least 128 epochs long
|
||||
func (s *Service) initializeHeadFromDB(ctx context.Context) error {
|
||||
finalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint from db")
|
||||
}
|
||||
if finalized == nil {
|
||||
// This should never happen. At chain start, the finalized checkpoint
|
||||
// would be the genesis state and block.
|
||||
return errors.New("no finalized epoch in the database")
|
||||
}
|
||||
finalizedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
|
||||
var finalizedState state.BeaconState
|
||||
|
||||
finalizedState, err = s.cfg.StateGen.Resume(ctx, s.cfg.FinalizedStateAtStartUp)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized state from db")
|
||||
}
|
||||
|
||||
if flags.Get().HeadSync {
|
||||
headBlock, err := s.cfg.BeaconDB.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head block")
|
||||
}
|
||||
headEpoch := slots.ToEpoch(headBlock.Block().Slot())
|
||||
var epochsSinceFinality types.Epoch
|
||||
if headEpoch > finalized.Epoch {
|
||||
epochsSinceFinality = headEpoch - finalized.Epoch
|
||||
}
|
||||
// Head sync when node is far enough beyond known finalized epoch,
|
||||
// this becomes really useful during long period of non-finality.
|
||||
if epochsSinceFinality >= headSyncMinEpochsAfterCheckpoint {
|
||||
headRoot, err := headBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not hash head block")
|
||||
}
|
||||
finalizedState, err := s.cfg.StateGen.Resume(ctx, s.cfg.FinalizedStateAtStartUp)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized state from db")
|
||||
}
|
||||
log.Infof("Regenerating state from the last checkpoint at slot %d to current head slot of %d."+
|
||||
"This process may take a while, please wait.", finalizedState.Slot(), headBlock.Block().Slot())
|
||||
headState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head state")
|
||||
}
|
||||
s.setHead(headRoot, headBlock, headState)
|
||||
return nil
|
||||
} else {
|
||||
log.Warnf("Finalized checkpoint at slot %d is too close to the current head slot, "+
|
||||
"resetting head from the checkpoint ('--%s' flag is ignored).",
|
||||
finalizedState.Slot(), flags.HeadSync.Name)
|
||||
}
|
||||
}
|
||||
|
||||
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block from db")
|
||||
}
|
||||
|
||||
if finalizedState == nil || finalizedState.IsNil() || finalizedBlock == nil || finalizedBlock.IsNil() {
|
||||
return errors.New("finalized state and block can't be nil")
|
||||
}
|
||||
s.setHead(finalizedRoot, finalizedBlock, finalizedState)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) startFromPOWChain() error {
|
||||
log.Info("Waiting to reach the validator deposit threshold to start the beacon chain...")
|
||||
if s.cfg.ChainStartFetcher == nil {
|
||||
return errors.New("not configured web3Service for POW chain")
|
||||
}
|
||||
go func() {
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
s.spawnProcessAttestationsRoutine(s.cfg.StateNotifier.StateFeed())
|
||||
for {
|
||||
select {
|
||||
case event := <-stateChannel:
|
||||
if event.Type == statefeed.ChainStarted {
|
||||
data, ok := event.Data.(*statefeed.ChainStartedData)
|
||||
if !ok {
|
||||
log.Error("event data is not type *statefeed.ChainStartedData")
|
||||
return
|
||||
}
|
||||
log.WithField("starttime", data.StartTime).Debug("Received chain start event")
|
||||
s.onPowchainStart(s.ctx, data.StartTime)
|
||||
return
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("Subscription to state notifier failed")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// onPowchainStart initializes a series of deposits from the ChainStart deposits in the eth1
|
||||
// deposit contract, initializes the beacon chain's state, and kicks off the beacon chain.
|
||||
func (s *Service) processChainStartTime(ctx context.Context, genesisTime time.Time) {
|
||||
func (s *Service) onPowchainStart(ctx context.Context, genesisTime time.Time) {
|
||||
preGenesisState := s.cfg.ChainStartFetcher.PreGenesisState()
|
||||
initializedState, err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.cfg.ChainStartFetcher.ChainStartEth1Data())
|
||||
if err != nil {
|
||||
@@ -319,32 +429,6 @@ func (s *Service) initializeBeaconChain(
|
||||
return genesisState, nil
|
||||
}
|
||||
|
||||
// Stop the blockchain service's main event loop and associated goroutines.
|
||||
func (s *Service) Stop() error {
|
||||
defer s.cancel()
|
||||
|
||||
if s.cfg.StateGen != nil && s.head != nil && s.head.state != nil {
|
||||
if err := s.cfg.StateGen.ForceCheckpoint(s.ctx, s.head.state.FinalizedCheckpoint().Root); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Save initial sync cached blocks to the DB before stop.
|
||||
return s.cfg.BeaconDB.SaveBlocks(s.ctx, s.getInitSyncBlocks())
|
||||
}
|
||||
|
||||
// Status always returns nil unless there is an error condition that causes
|
||||
// this service to be unhealthy.
|
||||
func (s *Service) Status() error {
|
||||
if s.genesisRoot == params.BeaconConfig().ZeroHash {
|
||||
return errors.New("genesis state has not been created")
|
||||
}
|
||||
if runtime.NumGoroutine() > s.cfg.MaxRoutines {
|
||||
return fmt.Errorf("too many goroutines %d", runtime.NumGoroutine())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This gets called when beacon chain is first initialized to save genesis data (state, block, and more) in db.
|
||||
func (s *Service) saveGenesisData(ctx context.Context, genesisState state.BeaconState) error {
|
||||
if err := s.cfg.BeaconDB.SaveGenesisData(ctx, genesisState); err != nil {
|
||||
@@ -359,16 +443,13 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
return errors.Wrap(err, "could not get genesis block root")
|
||||
}
|
||||
|
||||
s.genesisRoot = genesisBlkRoot
|
||||
s.originRoot = genesisBlkRoot
|
||||
s.cfg.StateGen.SaveFinalizedState(0 /*slot*/, genesisBlkRoot, genesisState)
|
||||
|
||||
// Finalized checkpoint at genesis is a zero hash.
|
||||
genesisCheckpoint := genesisState.FinalizedCheckpoint()
|
||||
|
||||
s.justifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
|
||||
if err := s.cacheJustifiedStateBalances(ctx, genesisBlkRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
s.prevJustifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
|
||||
s.bestJustifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
|
||||
s.finalizedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
|
||||
@@ -388,94 +469,6 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
return nil
|
||||
}
|
||||
|
||||
// This gets called to initialize chain info variables using the finalized checkpoint stored in DB
|
||||
func (s *Service) initializeChainInfo(ctx context.Context) error {
|
||||
genesisBlock, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get genesis block from db")
|
||||
}
|
||||
if err := helpers.BeaconBlockIsNil(genesisBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
genesisBlkRoot, err := genesisBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get signing root of genesis block")
|
||||
}
|
||||
s.genesisRoot = genesisBlkRoot
|
||||
|
||||
finalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint from db")
|
||||
}
|
||||
if finalized == nil {
|
||||
// This should never happen. At chain start, the finalized checkpoint
|
||||
// would be the genesis state and block.
|
||||
return errors.New("no finalized epoch in the database")
|
||||
}
|
||||
finalizedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
|
||||
var finalizedState state.BeaconState
|
||||
|
||||
finalizedState, err = s.cfg.StateGen.Resume(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized state from db")
|
||||
}
|
||||
|
||||
if flags.Get().HeadSync {
|
||||
headBlock, err := s.cfg.BeaconDB.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head block")
|
||||
}
|
||||
headEpoch := slots.ToEpoch(headBlock.Block().Slot())
|
||||
var epochsSinceFinality types.Epoch
|
||||
if headEpoch > finalized.Epoch {
|
||||
epochsSinceFinality = headEpoch - finalized.Epoch
|
||||
}
|
||||
// Head sync when node is far enough beyond known finalized epoch,
|
||||
// this becomes really useful during long period of non-finality.
|
||||
if epochsSinceFinality >= headSyncMinEpochsAfterCheckpoint {
|
||||
headRoot, err := headBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not hash head block")
|
||||
}
|
||||
finalizedState, err := s.cfg.StateGen.Resume(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized state from db")
|
||||
}
|
||||
log.Infof("Regenerating state from the last checkpoint at slot %d to current head slot of %d."+
|
||||
"This process may take a while, please wait.", finalizedState.Slot(), headBlock.Block().Slot())
|
||||
headState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head state")
|
||||
}
|
||||
s.setHead(headRoot, headBlock, headState)
|
||||
return nil
|
||||
} else {
|
||||
log.Warnf("Finalized checkpoint at slot %d is too close to the current head slot, "+
|
||||
"resetting head from the checkpoint ('--%s' flag is ignored).",
|
||||
finalizedState.Slot(), flags.HeadSync.Name)
|
||||
}
|
||||
}
|
||||
|
||||
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block from db")
|
||||
}
|
||||
|
||||
if finalizedState == nil || finalizedState.IsNil() || finalizedBlock == nil || finalizedBlock.IsNil() {
|
||||
return errors.New("finalized state and block can't be nil")
|
||||
}
|
||||
s.setHead(finalizedRoot, finalizedBlock, finalizedState)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This is called when a client starts from non-genesis slot. This passes last justified and finalized
|
||||
// information to fork choice service to initializes fork choice store.
|
||||
func (s *Service) resumeForkChoice(justifiedCheckpoint, finalizedCheckpoint *ethpb.Checkpoint) {
|
||||
store := protoarray.New(justifiedCheckpoint.Epoch, finalizedCheckpoint.Epoch, bytesutil.ToBytes32(finalizedCheckpoint.Root))
|
||||
s.cfg.ForkChoiceStore = store
|
||||
}
|
||||
|
||||
// This returns true if block has been processed before. Two ways to verify the block has been processed:
|
||||
// 1.) Check fork choice store.
|
||||
// 2.) Check DB.
|
||||
@@ -487,3 +480,20 @@ func (s *Service) hasBlock(ctx context.Context, root [32]byte) bool {
|
||||
|
||||
return s.cfg.BeaconDB.HasBlock(ctx, root)
|
||||
}
|
||||
|
||||
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
|
||||
currentTime := prysmTime.Now()
|
||||
if currentTime.After(genesisTime) {
|
||||
return
|
||||
}
|
||||
|
||||
gState, err := db.GenesisState(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not retrieve genesis state: %v", err)
|
||||
}
|
||||
gRoot, err := gState.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not hash tree root genesis state: %v", err)
|
||||
}
|
||||
go slots.CountdownToGenesis(ctx, genesisTime, uint64(gState.NumValidators()), gRoot)
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
@@ -94,11 +95,12 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
DepositContainers: []*ethpb.DepositContainer{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
web3Service, err = powchain.NewService(ctx, &powchain.Web3ServiceConfig{
|
||||
BeaconDB: beaconDB,
|
||||
HttpEndpoints: []string{endpoint},
|
||||
DepositContract: common.Address{},
|
||||
})
|
||||
web3Service, err = powchain.NewService(
|
||||
ctx,
|
||||
powchain.WithDatabase(beaconDB),
|
||||
powchain.WithHttpEndpoints([]string{endpoint}),
|
||||
powchain.WithDepositContractAddress(common.Address{}),
|
||||
)
|
||||
require.NoError(t, err, "Unable to set up web3 service")
|
||||
|
||||
attService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
|
||||
@@ -107,25 +109,24 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &config{
|
||||
BeaconBlockBuf: 0,
|
||||
BeaconDB: beaconDB,
|
||||
DepositCache: depositCache,
|
||||
ChainStartFetcher: web3Service,
|
||||
P2p: &mockBroadcaster{},
|
||||
StateNotifier: &mockBeaconNode{},
|
||||
AttPool: attestations.NewPool(),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, params.BeaconConfig().ZeroHash),
|
||||
AttService: attService,
|
||||
stateGen := stategen.New(beaconDB)
|
||||
// Safe a state in stategen to purposes of testing a service stop / shutdown.
|
||||
require.NoError(t, stateGen.SaveState(ctx, bytesutil.ToBytes32(bState.FinalizedCheckpoint().Root), bState))
|
||||
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithDepositCache(depositCache),
|
||||
WithChainStartFetcher(web3Service),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithP2PBroadcaster(&mockBroadcaster{}),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, params.BeaconConfig().ZeroHash)),
|
||||
WithAttestationService(attService),
|
||||
WithStateGen(stateGen),
|
||||
}
|
||||
|
||||
// Safe a state in stategen to purposes of testing a service stop / shutdown.
|
||||
require.NoError(t, cfg.StateGen.SaveState(ctx, bytesutil.ToBytes32(bState.FinalizedCheckpoint().Root), bState))
|
||||
|
||||
chainService, err := NewService(ctx)
|
||||
chainService, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err, "Unable to setup chain service")
|
||||
chainService.cfg = cfg
|
||||
chainService.genesisTime = time.Unix(1, 0) // non-zero time
|
||||
|
||||
return chainService
|
||||
@@ -150,7 +151,7 @@ func TestChainStartStop_Initialized(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{Root: blkRoot[:]}))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: blkRoot[:]}))
|
||||
|
||||
chainService.cfg.FinalizedStateAtStartUp = s
|
||||
// Test the start function.
|
||||
chainService.Start()
|
||||
|
||||
@@ -177,7 +178,7 @@ func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}))
|
||||
|
||||
chainService.cfg.FinalizedStateAtStartUp = s
|
||||
// Test the start function.
|
||||
chainService.Start()
|
||||
|
||||
@@ -248,7 +249,7 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: blkRoot[:]}))
|
||||
|
||||
chainService.cfg.FinalizedStateAtStartUp = s
|
||||
// Test the start function.
|
||||
chainService.Start()
|
||||
|
||||
@@ -283,8 +284,11 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(headBlock)))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}}
|
||||
require.NoError(t, c.initializeChainInfo(ctx))
|
||||
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
|
||||
require.NoError(t, err)
|
||||
c, err := NewService(ctx, WithDatabase(beaconDB), WithStateGen(stategen.New(beaconDB)), WithAttestationService(attSrv), WithStateNotifier(&mock.MockStateNotifier{}), WithFinalizedStateAtStartUp(headState))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.startFromSavedState(headState))
|
||||
headBlk, err := c.HeadBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, headBlock, headBlk.Proto(), "Head block incorrect")
|
||||
@@ -297,7 +301,7 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
if !bytes.Equal(headRoot[:], r) {
|
||||
t.Error("head slot incorrect")
|
||||
}
|
||||
assert.Equal(t, genesisRoot, c.genesisRoot, "Genesis block root incorrect")
|
||||
assert.Equal(t, genesisRoot, c.originRoot, "Genesis block root incorrect")
|
||||
}
|
||||
|
||||
func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
|
||||
@@ -323,12 +327,15 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(headBlock)))
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}}
|
||||
require.NoError(t, c.initializeChainInfo(ctx))
|
||||
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
|
||||
require.NoError(t, err)
|
||||
c, err := NewService(ctx, WithDatabase(beaconDB), WithStateGen(stategen.New(beaconDB)), WithAttestationService(attSrv), WithStateNotifier(&mock.MockStateNotifier{}))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.startFromSavedState(headState))
|
||||
s, err := c.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
|
||||
assert.Equal(t, genesisRoot, c.genesisRoot, "Genesis block root incorrect")
|
||||
assert.Equal(t, genesisRoot, c.originRoot, "Genesis block root incorrect")
|
||||
assert.DeepEqual(t, genesis, c.head.block.Proto())
|
||||
}
|
||||
|
||||
@@ -380,13 +387,15 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
|
||||
Root: finalizedRoot[:],
|
||||
}))
|
||||
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}}
|
||||
|
||||
require.NoError(t, c.initializeChainInfo(ctx))
|
||||
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
|
||||
require.NoError(t, err)
|
||||
c, err := NewService(ctx, WithDatabase(beaconDB), WithStateGen(stategen.New(beaconDB)), WithAttestationService(attSrv), WithStateNotifier(&mock.MockStateNotifier{}), WithFinalizedStateAtStartUp(headState))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.startFromSavedState(headState))
|
||||
s, err := c.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
|
||||
assert.Equal(t, genesisRoot, c.genesisRoot, "Genesis block root incorrect")
|
||||
assert.Equal(t, genesisRoot, c.originRoot, "Genesis block root incorrect")
|
||||
// Since head sync is not triggered, chain is initialized to the last finalization checkpoint.
|
||||
assert.DeepEqual(t, finalizedBlock, c.head.block.Proto())
|
||||
assert.LogsContain(t, hook, "resetting head from the checkpoint ('--head-sync' flag is ignored)")
|
||||
@@ -403,11 +412,11 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, headRoot))
|
||||
|
||||
hook.Reset()
|
||||
require.NoError(t, c.initializeChainInfo(ctx))
|
||||
require.NoError(t, c.initializeHeadFromDB(ctx))
|
||||
s, err = c.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
|
||||
assert.Equal(t, genesisRoot, c.genesisRoot, "Genesis block root incorrect")
|
||||
assert.Equal(t, genesisRoot, c.originRoot, "Genesis block root incorrect")
|
||||
// Head slot is far beyond the latest finalized checkpoint, head sync is triggered.
|
||||
assert.DeepEqual(t, headBlock, c.head.block.Proto())
|
||||
assert.LogsContain(t, hook, "Regenerating state from the last checkpoint at slot 225")
|
||||
@@ -477,7 +486,7 @@ func TestProcessChainStartTime_ReceivedFeed(t *testing.T) {
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := service.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
service.processChainStartTime(context.Background(), time.Now())
|
||||
service.onPowchainStart(context.Background(), time.Now())
|
||||
|
||||
stateEvent := <-stateChannel
|
||||
require.Equal(t, int(stateEvent.Type), statefeed.Initialized)
|
||||
|
||||
82
beacon-chain/blockchain/state_balance_cache.go
Normal file
82
beacon-chain/blockchain/state_balance_cache.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
)
|
||||
|
||||
var errNilStateFromStategen = errors.New("justified state can't be nil")
|
||||
|
||||
type stateBalanceCache struct {
|
||||
sync.Mutex
|
||||
balances []uint64
|
||||
root [32]byte
|
||||
stateGen stateByRooter
|
||||
}
|
||||
|
||||
type stateByRooter interface {
|
||||
StateByRoot(context.Context, [32]byte) (state.BeaconState, error)
|
||||
}
|
||||
|
||||
// newStateBalanceCache exists to remind us that stateBalanceCache needs a stagegen
|
||||
// to avoid nil pointer bugs when updating the cache in the read path (get())
|
||||
func newStateBalanceCache(sg *stategen.State) (*stateBalanceCache, error) {
|
||||
if sg == nil {
|
||||
return nil, errors.New("Can't initialize state balance cache without stategen")
|
||||
}
|
||||
return &stateBalanceCache{stateGen: sg}, nil
|
||||
}
|
||||
|
||||
// update is called by get() when the requested root doesn't match
|
||||
// the previously read value. This cache assumes we only want to cache one
|
||||
// set of balances for a single root (the current justified root).
|
||||
//
|
||||
// warning: this is not thread-safe on its own, relies on get() for locking
|
||||
func (c *stateBalanceCache) update(ctx context.Context, justifiedRoot [32]byte) ([]uint64, error) {
|
||||
stateBalanceCacheMiss.Inc()
|
||||
justifiedState, err := c.stateGen.StateByRoot(ctx, justifiedRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if justifiedState == nil || justifiedState.IsNil() {
|
||||
return nil, errNilStateFromStategen
|
||||
}
|
||||
epoch := time.CurrentEpoch(justifiedState)
|
||||
|
||||
justifiedBalances := make([]uint64, justifiedState.NumValidators())
|
||||
var balanceAccumulator = func(idx int, val state.ReadOnlyValidator) error {
|
||||
if helpers.IsActiveValidatorUsingTrie(val, epoch) {
|
||||
justifiedBalances[idx] = val.EffectiveBalance()
|
||||
} else {
|
||||
justifiedBalances[idx] = 0
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err := justifiedState.ReadFromEveryValidator(balanceAccumulator); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.balances = justifiedBalances
|
||||
c.root = justifiedRoot
|
||||
return c.balances, nil
|
||||
}
|
||||
|
||||
// getBalances takes an explicit justifiedRoot so it can invalidate the singleton cache key
|
||||
// when the justified root changes, and takes a context so that the long-running stategen
|
||||
// read path can connect to the upstream cancellation/timeout chain.
|
||||
func (c *stateBalanceCache) get(ctx context.Context, justifiedRoot [32]byte) ([]uint64, error) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
if justifiedRoot == c.root {
|
||||
stateBalanceCacheHit.Inc()
|
||||
return c.balances, nil
|
||||
}
|
||||
|
||||
return c.update(ctx, justifiedRoot)
|
||||
}
|
||||
225
beacon-chain/blockchain/state_balance_cache_test.go
Normal file
225
beacon-chain/blockchain/state_balance_cache_test.go
Normal file
@@ -0,0 +1,225 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
type mockStateByRoot struct {
|
||||
state state.BeaconState
|
||||
err error
|
||||
}
|
||||
|
||||
func (m *mockStateByRoot) StateByRoot(context.Context, [32]byte) (state.BeaconState, error) {
|
||||
return m.state, m.err
|
||||
}
|
||||
|
||||
type testStateOpt func(*ethpb.BeaconStateAltair)
|
||||
|
||||
func testStateWithValidators(v []*ethpb.Validator) testStateOpt {
|
||||
return func(a *ethpb.BeaconStateAltair) {
|
||||
a.Validators = v
|
||||
}
|
||||
}
|
||||
|
||||
func testStateWithSlot(slot types.Slot) testStateOpt {
|
||||
return func(a *ethpb.BeaconStateAltair) {
|
||||
a.Slot = slot
|
||||
}
|
||||
}
|
||||
|
||||
func testStateFixture(opts ...testStateOpt) state.BeaconState {
|
||||
a := ðpb.BeaconStateAltair{}
|
||||
for _, o := range opts {
|
||||
o(a)
|
||||
}
|
||||
s, _ := v2.InitializeFromProtoUnsafe(a)
|
||||
return s
|
||||
}
|
||||
|
||||
func generateTestValidators(count int, opts ...func(*ethpb.Validator)) []*ethpb.Validator {
|
||||
vs := make([]*ethpb.Validator, count)
|
||||
var i uint32 = 0
|
||||
for ; i < uint32(count); i++ {
|
||||
pk := make([]byte, 48)
|
||||
binary.LittleEndian.PutUint32(pk, i)
|
||||
v := ðpb.Validator{PublicKey: pk}
|
||||
for _, o := range opts {
|
||||
o(v)
|
||||
}
|
||||
vs[i] = v
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
func oddValidatorsExpired(currentSlot types.Slot) func(*ethpb.Validator) {
|
||||
return func(v *ethpb.Validator) {
|
||||
pki := binary.LittleEndian.Uint64(v.PublicKey)
|
||||
if pki%2 == 0 {
|
||||
v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
|
||||
} else {
|
||||
v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) - 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func oddValidatorsQueued(currentSlot types.Slot) func(*ethpb.Validator) {
|
||||
return func(v *ethpb.Validator) {
|
||||
v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
|
||||
pki := binary.LittleEndian.Uint64(v.PublicKey)
|
||||
if pki%2 == 0 {
|
||||
v.ActivationEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) - 1)
|
||||
} else {
|
||||
v.ActivationEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func allValidatorsValid(currentSlot types.Slot) func(*ethpb.Validator) {
|
||||
return func(v *ethpb.Validator) {
|
||||
v.ActivationEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) - 1)
|
||||
v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
|
||||
}
|
||||
}
|
||||
|
||||
func balanceIsKeyTimes2(v *ethpb.Validator) {
|
||||
pki := binary.LittleEndian.Uint64(v.PublicKey)
|
||||
v.EffectiveBalance = uint64(pki) * 2
|
||||
}
|
||||
|
||||
func testHalfExpiredValidators() ([]*ethpb.Validator, []uint64) {
|
||||
balances := []uint64{0, 0, 4, 0, 8, 0, 12, 0, 16, 0}
|
||||
return generateTestValidators(10,
|
||||
oddValidatorsExpired(types.Slot(99)),
|
||||
balanceIsKeyTimes2), balances
|
||||
}
|
||||
|
||||
func testHalfQueuedValidators() ([]*ethpb.Validator, []uint64) {
|
||||
balances := []uint64{0, 0, 4, 0, 8, 0, 12, 0, 16, 0}
|
||||
return generateTestValidators(10,
|
||||
oddValidatorsQueued(types.Slot(99)),
|
||||
balanceIsKeyTimes2), balances
|
||||
}
|
||||
|
||||
func testAllValidValidators() ([]*ethpb.Validator, []uint64) {
|
||||
balances := []uint64{0, 2, 4, 6, 8, 10, 12, 14, 16, 18}
|
||||
return generateTestValidators(10,
|
||||
allValidatorsValid(types.Slot(99)),
|
||||
balanceIsKeyTimes2), balances
|
||||
}
|
||||
|
||||
func TestStateBalanceCache(t *testing.T) {
|
||||
type sbcTestCase struct {
|
||||
err error
|
||||
root [32]byte
|
||||
sbc *stateBalanceCache
|
||||
balances []uint64
|
||||
name string
|
||||
}
|
||||
sentinelCacheMiss := errors.New("Cache missed, as expected!")
|
||||
sentinelBalances := []uint64{1, 2, 3, 4, 5}
|
||||
halfExpiredValidators, halfExpiredBalances := testHalfExpiredValidators()
|
||||
halfQueuedValidators, halfQueuedBalances := testHalfQueuedValidators()
|
||||
allValidValidators, allValidBalances := testAllValidValidators()
|
||||
cases := []sbcTestCase{
|
||||
{
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
balances: sentinelBalances,
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{
|
||||
err: sentinelCacheMiss,
|
||||
},
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
balances: sentinelBalances,
|
||||
},
|
||||
name: "cache hit",
|
||||
},
|
||||
// this works by using a staterooter that returns a known error
|
||||
// so really we're testing the miss by making sure stategen got called
|
||||
// this also tells us stategen errors are propagated
|
||||
{
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{
|
||||
//state: generateTestValidators(1, testWithBadEpoch),
|
||||
err: sentinelCacheMiss,
|
||||
},
|
||||
root: bytesutil.ToBytes32([]byte{'B'}),
|
||||
},
|
||||
err: sentinelCacheMiss,
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
name: "cache miss",
|
||||
},
|
||||
{
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{},
|
||||
root: bytesutil.ToBytes32([]byte{'B'}),
|
||||
},
|
||||
err: errNilStateFromStategen,
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
name: "error for nil state upon cache miss",
|
||||
},
|
||||
{
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{
|
||||
state: testStateFixture(
|
||||
testStateWithSlot(99),
|
||||
testStateWithValidators(halfExpiredValidators)),
|
||||
},
|
||||
},
|
||||
balances: halfExpiredBalances,
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
name: "test filtering by exit epoch",
|
||||
},
|
||||
{
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{
|
||||
state: testStateFixture(
|
||||
testStateWithSlot(99),
|
||||
testStateWithValidators(halfQueuedValidators)),
|
||||
},
|
||||
},
|
||||
balances: halfQueuedBalances,
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
name: "test filtering by activation epoch",
|
||||
},
|
||||
{
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{
|
||||
state: testStateFixture(
|
||||
testStateWithSlot(99),
|
||||
testStateWithValidators(allValidValidators)),
|
||||
},
|
||||
},
|
||||
balances: allValidBalances,
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
name: "happy path",
|
||||
},
|
||||
}
|
||||
ctx := context.Background()
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
cache := c.sbc
|
||||
cacheRootStart := cache.root
|
||||
b, err := cache.get(ctx, c.root)
|
||||
require.ErrorIs(t, err, c.err)
|
||||
require.DeepEqual(t, c.balances, b)
|
||||
if c.err != nil {
|
||||
// if there was an error somewhere, the root should not have changed (unless it already matched)
|
||||
require.Equal(t, cacheRootStart, cache.root)
|
||||
} else {
|
||||
// when successful, the cache should always end with a root matching the request
|
||||
require.Equal(t, c.root, cache.root)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,6 @@
|
||||
package depositcache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"math/big"
|
||||
@@ -54,6 +53,7 @@ type DepositCache struct {
|
||||
pendingDeposits []*dbpb.DepositContainer
|
||||
deposits []*dbpb.DepositContainer
|
||||
finalizedDeposits *FinalizedDeposits
|
||||
depositsByKey map[[48]byte][]*dbpb.DepositContainer
|
||||
depositsLock sync.RWMutex
|
||||
}
|
||||
|
||||
@@ -69,6 +69,7 @@ func New() (*DepositCache, error) {
|
||||
return &DepositCache{
|
||||
pendingDeposits: []*dbpb.DepositContainer{},
|
||||
deposits: []*dbpb.DepositContainer{},
|
||||
depositsByKey: map[[48]byte][]*ethpb.DepositContainer{},
|
||||
finalizedDeposits: &FinalizedDeposits{Deposits: finalizedDepositsTrie, MerkleTrieIndex: -1},
|
||||
}, nil
|
||||
}
|
||||
@@ -95,10 +96,15 @@ func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blo
|
||||
}
|
||||
// Keep the slice sorted on insertion in order to avoid costly sorting on retrieval.
|
||||
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Index >= index })
|
||||
depCtr := &dbpb.DepositContainer{Deposit: d, Eth1BlockHeight: blockNum, DepositRoot: depositRoot[:], Index: index}
|
||||
newDeposits := append(
|
||||
[]*dbpb.DepositContainer{{Deposit: d, Eth1BlockHeight: blockNum, DepositRoot: depositRoot[:], Index: index}},
|
||||
[]*dbpb.DepositContainer{depCtr},
|
||||
dc.deposits[heightIdx:]...)
|
||||
dc.deposits = append(dc.deposits[:heightIdx], newDeposits...)
|
||||
// Append the deposit to our map, in the event no deposits
|
||||
// exist for the pubkey , it is simply added to the map.
|
||||
pubkey := bytesutil.ToBytes48(d.Data.PublicKey)
|
||||
dc.depositsByKey[pubkey] = append(dc.depositsByKey[pubkey], depCtr)
|
||||
historicalDepositsCount.Inc()
|
||||
return nil
|
||||
}
|
||||
@@ -112,6 +118,13 @@ func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*dbp
|
||||
|
||||
sort.SliceStable(ctrs, func(i int, j int) bool { return ctrs[i].Index < ctrs[j].Index })
|
||||
dc.deposits = ctrs
|
||||
for _, c := range ctrs {
|
||||
// Use a new value, as the reference
|
||||
// of c changes in the next iteration.
|
||||
newPtr := c
|
||||
pKey := bytesutil.ToBytes48(newPtr.Deposit.Data.PublicKey)
|
||||
dc.depositsByKey[pKey] = append(dc.depositsByKey[pKey], newPtr)
|
||||
}
|
||||
historicalDepositsCount.Add(float64(len(ctrs)))
|
||||
}
|
||||
|
||||
@@ -202,13 +215,15 @@ func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*et
|
||||
|
||||
var deposit *ethpb.Deposit
|
||||
var blockNum *big.Int
|
||||
for _, ctnr := range dc.deposits {
|
||||
if bytes.Equal(ctnr.Deposit.Data.PublicKey, pubKey) {
|
||||
deposit = ctnr.Deposit
|
||||
blockNum = big.NewInt(int64(ctnr.Eth1BlockHeight))
|
||||
break
|
||||
}
|
||||
deps, ok := dc.depositsByKey[bytesutil.ToBytes48(pubKey)]
|
||||
if !ok || len(deps) == 0 {
|
||||
return deposit, blockNum
|
||||
}
|
||||
// We always return the first deposit if a particular
|
||||
// validator key has multiple deposits assigned to
|
||||
// it.
|
||||
deposit = deps[0].Deposit
|
||||
blockNum = big.NewInt(int64(deps[0].Eth1BlockHeight))
|
||||
return deposit, blockNum
|
||||
}
|
||||
|
||||
|
||||
@@ -44,31 +44,31 @@ func TestInsertDeposit_MaintainsSortedOrderByIndex(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
deposit: ðpb.Deposit{Data: &dbpb.Deposit_Data{PublicKey: []byte{'A'}}},
|
||||
index: 0,
|
||||
expectedErr: "",
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
deposit: ðpb.Deposit{Data: &dbpb.Deposit_Data{PublicKey: []byte{'B'}}},
|
||||
index: 3,
|
||||
expectedErr: "wanted deposit with index 1 to be inserted but received 3",
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
deposit: ðpb.Deposit{Data: &dbpb.Deposit_Data{PublicKey: []byte{'C'}}},
|
||||
index: 1,
|
||||
expectedErr: "",
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
deposit: ðpb.Deposit{Data: &dbpb.Deposit_Data{PublicKey: []byte{'D'}}},
|
||||
index: 4,
|
||||
expectedErr: "wanted deposit with index 2 to be inserted but received 4",
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
deposit: ðpb.Deposit{Data: &dbpb.Deposit_Data{PublicKey: []byte{'E'}}},
|
||||
index: 2,
|
||||
expectedErr: "",
|
||||
},
|
||||
@@ -316,8 +316,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
|
||||
func TestDepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
ctrs := []*dbpb.DepositContainer{
|
||||
{
|
||||
Eth1BlockHeight: 9,
|
||||
Deposit: ðpb.Deposit{
|
||||
@@ -359,6 +358,7 @@ func TestDepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
dc.InsertDepositContainers(context.Background(), ctrs)
|
||||
|
||||
pk1 := bytesutil.PadTo([]byte("pk1"), 48)
|
||||
dep, blkNum := dc.DepositByPubkey(context.Background(), pk1)
|
||||
@@ -626,24 +626,28 @@ func TestPruneProofs_Ok(t *testing.T) {
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 0,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 1,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 2,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 3,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -669,24 +673,26 @@ func TestPruneProofs_SomeAlreadyPruned(t *testing.T) {
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: nil},
|
||||
index: 0,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: nil, Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: nil, Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}}, index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: nil},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(), Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 3,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -709,24 +715,28 @@ func TestPruneProofs_PruneAllWhenDepositIndexTooBig(t *testing.T) {
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 0,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 1,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 2,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 3,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -752,24 +762,28 @@ func TestPruneProofs_CorrectlyHandleLastIndex(t *testing.T) {
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 0,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 1,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 2,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 3,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -785,6 +799,36 @@ func TestPruneProofs_CorrectlyHandleLastIndex(t *testing.T) {
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestDepositMap_WorksCorrectly(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
pk0 := bytesutil.PadTo([]byte("pk0"), 48)
|
||||
dep, _ := dc.DepositByPubkey(context.Background(), pk0)
|
||||
var nilDep *ethpb.Deposit
|
||||
assert.DeepEqual(t, nilDep, dep)
|
||||
|
||||
dep = ðpb.Deposit{Proof: makeDepositProof(), Data: ðpb.Deposit_Data{PublicKey: pk0, Amount: 1000}}
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), dep, 1000, 0, [32]byte{}))
|
||||
|
||||
dep, _ = dc.DepositByPubkey(context.Background(), pk0)
|
||||
assert.NotEqual(t, nilDep, dep)
|
||||
assert.Equal(t, uint64(1000), dep.Data.Amount)
|
||||
|
||||
dep = ðpb.Deposit{Proof: makeDepositProof(), Data: ðpb.Deposit_Data{PublicKey: pk0, Amount: 10000}}
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), dep, 1000, 1, [32]byte{}))
|
||||
|
||||
// Make sure we have the same deposit returned over here.
|
||||
dep, _ = dc.DepositByPubkey(context.Background(), pk0)
|
||||
assert.NotEqual(t, nilDep, dep)
|
||||
assert.Equal(t, uint64(1000), dep.Data.Amount)
|
||||
|
||||
// Make sure another key doesn't work.
|
||||
pk1 := bytesutil.PadTo([]byte("pk1"), 48)
|
||||
dep, _ = dc.DepositByPubkey(context.Background(), pk1)
|
||||
assert.DeepEqual(t, nilDep, dep)
|
||||
}
|
||||
|
||||
func makeDepositProof() [][]byte {
|
||||
proof := make([][]byte, int(params.BeaconConfig().DepositContractTreeDepth)+1)
|
||||
for i := range proof {
|
||||
|
||||
12
beacon-chain/cache/skip_slot_cache.go
vendored
12
beacon-chain/cache/skip_slot_cache.go
vendored
@@ -120,26 +120,22 @@ func (c *SkipSlotCache) MarkInProgress(r [32]byte) error {
|
||||
|
||||
// MarkNotInProgress will release the lock on a given request. This should be
|
||||
// called after put.
|
||||
func (c *SkipSlotCache) MarkNotInProgress(r [32]byte) error {
|
||||
func (c *SkipSlotCache) MarkNotInProgress(r [32]byte) {
|
||||
if c.disabled {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
delete(c.inProgress, r)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put the response in the cache.
|
||||
func (c *SkipSlotCache) Put(_ context.Context, r [32]byte, state state.BeaconState) error {
|
||||
func (c *SkipSlotCache) Put(_ context.Context, r [32]byte, state state.BeaconState) {
|
||||
if c.disabled {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
// Copy state so cached value is not mutated.
|
||||
c.cache.Add(r, state.Copy())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
4
beacon-chain/cache/skip_slot_cache_test.go
vendored
4
beacon-chain/cache/skip_slot_cache_test.go
vendored
@@ -28,8 +28,8 @@ func TestSkipSlotCache_RoundTrip(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, c.Put(ctx, r, s))
|
||||
require.NoError(t, c.MarkNotInProgress(r))
|
||||
c.Put(ctx, r, s)
|
||||
c.MarkNotInProgress(r)
|
||||
|
||||
res, err := c.Get(ctx, r)
|
||||
require.NoError(t, err)
|
||||
|
||||
50
beacon-chain/cache/sync_committee_test.go
vendored
50
beacon-chain/cache/sync_committee_test.go
vendored
@@ -5,8 +5,6 @@ import (
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
@@ -28,10 +26,10 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "only current epoch",
|
||||
currentSyncCommittee: convertToCommittee([][]byte{
|
||||
currentSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[1], pubKeys[2], pubKeys[3], pubKeys[2], pubKeys[2],
|
||||
}),
|
||||
nextSyncCommittee: convertToCommittee([][]byte{}),
|
||||
nextSyncCommittee: util.ConvertToCommittee([][]byte{}),
|
||||
currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
|
||||
1: {0},
|
||||
2: {1, 3, 4},
|
||||
@@ -45,8 +43,8 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "only next epoch",
|
||||
currentSyncCommittee: convertToCommittee([][]byte{}),
|
||||
nextSyncCommittee: convertToCommittee([][]byte{
|
||||
currentSyncCommittee: util.ConvertToCommittee([][]byte{}),
|
||||
nextSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[1], pubKeys[2], pubKeys[3], pubKeys[2], pubKeys[2],
|
||||
}),
|
||||
currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
|
||||
@@ -62,14 +60,14 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "some current epoch and some next epoch",
|
||||
currentSyncCommittee: convertToCommittee([][]byte{
|
||||
currentSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[1],
|
||||
pubKeys[2],
|
||||
pubKeys[3],
|
||||
pubKeys[2],
|
||||
pubKeys[2],
|
||||
}),
|
||||
nextSyncCommittee: convertToCommittee([][]byte{
|
||||
nextSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[7],
|
||||
pubKeys[6],
|
||||
pubKeys[5],
|
||||
@@ -90,14 +88,14 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "some current epoch and some next epoch duplicated across",
|
||||
currentSyncCommittee: convertToCommittee([][]byte{
|
||||
currentSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[1],
|
||||
pubKeys[2],
|
||||
pubKeys[3],
|
||||
pubKeys[2],
|
||||
pubKeys[2],
|
||||
}),
|
||||
nextSyncCommittee: convertToCommittee([][]byte{
|
||||
nextSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[2],
|
||||
pubKeys[1],
|
||||
pubKeys[3],
|
||||
@@ -117,13 +115,13 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "all duplicated",
|
||||
currentSyncCommittee: convertToCommittee([][]byte{
|
||||
currentSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
}),
|
||||
nextSyncCommittee: convertToCommittee([][]byte{
|
||||
nextSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
@@ -138,13 +136,13 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "unknown keys",
|
||||
currentSyncCommittee: convertToCommittee([][]byte{
|
||||
currentSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
}),
|
||||
nextSyncCommittee: convertToCommittee([][]byte{
|
||||
nextSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
@@ -189,13 +187,13 @@ func TestSyncCommitteeCache_RootDoesNotExist(t *testing.T) {
|
||||
func TestSyncCommitteeCache_CanRotate(t *testing.T) {
|
||||
c := cache.NewSyncCommittee()
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, 64)
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{1}})))
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(util.ConvertToCommittee([][]byte{{1}})))
|
||||
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'a'}, s))
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{2}})))
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(util.ConvertToCommittee([][]byte{{2}})))
|
||||
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'b'}, s))
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{3}})))
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(util.ConvertToCommittee([][]byte{{3}})))
|
||||
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'c'}, s))
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{4}})))
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(util.ConvertToCommittee([][]byte{{4}})))
|
||||
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'d'}, s))
|
||||
|
||||
_, err := c.CurrentPeriodIndexPosition([32]byte{'a'}, 0)
|
||||
@@ -204,19 +202,3 @@ func TestSyncCommitteeCache_CanRotate(t *testing.T) {
|
||||
_, err = c.CurrentPeriodIndexPosition([32]byte{'c'}, 0)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func convertToCommittee(inputKeys [][]byte) *ethpb.SyncCommittee {
|
||||
var pubKeys [][]byte
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSize; i++ {
|
||||
if i < uint64(len(inputKeys)) {
|
||||
pubKeys = append(pubKeys, bytesutil.PadTo(inputKeys[i], params.BeaconConfig().BLSPubkeyLength))
|
||||
} else {
|
||||
pubKeys = append(pubKeys, bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength))
|
||||
}
|
||||
}
|
||||
|
||||
return ðpb.SyncCommittee{
|
||||
Pubkeys: pubKeys,
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -183,12 +183,14 @@ func ProcessEpochParticipation(
|
||||
}
|
||||
if has && vals[i].IsActivePrevEpoch {
|
||||
vals[i].IsPrevEpochAttester = true
|
||||
vals[i].IsPrevEpochSourceAttester = true
|
||||
}
|
||||
has, err = HasValidatorFlag(b, targetIdx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if has && vals[i].IsActivePrevEpoch {
|
||||
vals[i].IsPrevEpochAttester = true
|
||||
vals[i].IsPrevEpochTargetAttester = true
|
||||
}
|
||||
has, err = HasValidatorFlag(b, headIdx)
|
||||
@@ -199,7 +201,7 @@ func ProcessEpochParticipation(
|
||||
vals[i].IsPrevEpochHeadAttester = true
|
||||
}
|
||||
}
|
||||
bal = precompute.UpdateBalance(vals, bal)
|
||||
bal = precompute.UpdateBalance(vals, bal, beaconState.Version())
|
||||
return vals, bal, nil
|
||||
}
|
||||
|
||||
@@ -298,7 +300,7 @@ func attestationDelta(
|
||||
headWeight := cfg.TimelyHeadWeight
|
||||
reward, penalty = uint64(0), uint64(0)
|
||||
// Process source reward / penalty
|
||||
if val.IsPrevEpochAttester && !val.IsSlashed {
|
||||
if val.IsPrevEpochSourceAttester && !val.IsSlashed {
|
||||
if !inactivityLeak {
|
||||
n := baseReward * srcWeight * (bal.PrevEpochAttested / increment)
|
||||
reward += n / (activeIncrement * weightDenominator)
|
||||
|
||||
@@ -108,6 +108,7 @@ func TestProcessEpochParticipation(t *testing.T) {
|
||||
CurrentEpochEffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
IsCurrentEpochAttester: true,
|
||||
IsPrevEpochAttester: true,
|
||||
IsPrevEpochSourceAttester: true,
|
||||
}, validators[1])
|
||||
require.DeepEqual(t, &precompute.Validator{
|
||||
IsActiveCurrentEpoch: true,
|
||||
@@ -116,6 +117,7 @@ func TestProcessEpochParticipation(t *testing.T) {
|
||||
CurrentEpochEffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
IsCurrentEpochAttester: true,
|
||||
IsPrevEpochAttester: true,
|
||||
IsPrevEpochSourceAttester: true,
|
||||
IsCurrentEpochTargetAttester: true,
|
||||
IsPrevEpochTargetAttester: true,
|
||||
}, validators[2])
|
||||
@@ -126,6 +128,7 @@ func TestProcessEpochParticipation(t *testing.T) {
|
||||
CurrentEpochEffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
IsCurrentEpochAttester: true,
|
||||
IsPrevEpochAttester: true,
|
||||
IsPrevEpochSourceAttester: true,
|
||||
IsCurrentEpochTargetAttester: true,
|
||||
IsPrevEpochTargetAttester: true,
|
||||
IsPrevEpochHeadAttester: true,
|
||||
@@ -180,6 +183,7 @@ func TestProcessEpochParticipation_InactiveValidator(t *testing.T) {
|
||||
IsActiveCurrentEpoch: false,
|
||||
IsActivePrevEpoch: true,
|
||||
IsPrevEpochAttester: true,
|
||||
IsPrevEpochSourceAttester: true,
|
||||
IsPrevEpochTargetAttester: true,
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
CurrentEpochEffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
@@ -191,6 +195,7 @@ func TestProcessEpochParticipation_InactiveValidator(t *testing.T) {
|
||||
CurrentEpochEffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
IsCurrentEpochAttester: true,
|
||||
IsPrevEpochAttester: true,
|
||||
IsPrevEpochSourceAttester: true,
|
||||
IsCurrentEpochTargetAttester: true,
|
||||
IsPrevEpochTargetAttester: true,
|
||||
IsPrevEpochHeadAttester: true,
|
||||
|
||||
@@ -358,7 +358,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
|
||||
}
|
||||
|
||||
want := "nil or missing indexed attestation data"
|
||||
_, err := blocks.AttestationSignatureSet(context.Background(), beaconState, atts)
|
||||
_, err := blocks.AttestationSignatureBatch(context.Background(), beaconState, atts)
|
||||
assert.ErrorContains(t, want, err)
|
||||
|
||||
atts = []*ethpb.Attestation{}
|
||||
@@ -378,7 +378,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
|
||||
}
|
||||
|
||||
want = "expected non-empty attesting indices"
|
||||
_, err = blocks.AttestationSignatureSet(context.Background(), beaconState, atts)
|
||||
_, err = blocks.AttestationSignatureBatch(context.Background(), beaconState, atts)
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -502,7 +502,7 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing
|
||||
}
|
||||
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
set, err := blocks.AttestationSignatureSet(ctx, st, []*ethpb.Attestation{att1, att2})
|
||||
set, err := blocks.AttestationSignatureBatch(ctx, st, []*ethpb.Attestation{att1, att2})
|
||||
require.NoError(t, err)
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
@@ -566,6 +566,6 @@ func TestRetrieveAttestationSignatureSet_AcrossFork(t *testing.T) {
|
||||
}
|
||||
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
_, err = blocks.AttestationSignatureSet(ctx, st, []*ethpb.Attestation{att1, att2})
|
||||
_, err = blocks.AttestationSignatureBatch(ctx, st, []*ethpb.Attestation{att1, att2})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -62,7 +62,7 @@ func ProcessAttesterSlashing(
|
||||
if err := VerifyAttesterSlashing(ctx, beaconState, slashing); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify attester slashing")
|
||||
}
|
||||
slashableIndices := slashableAttesterIndices(slashing)
|
||||
slashableIndices := SlashableAttesterIndices(slashing)
|
||||
sort.SliceStable(slashableIndices, func(i, j int) bool {
|
||||
return slashableIndices[i] < slashableIndices[j]
|
||||
})
|
||||
@@ -152,7 +152,8 @@ func IsSlashableAttestationData(data1, data2 *ethpb.AttestationData) bool {
|
||||
return isDoubleVote || isSurroundVote
|
||||
}
|
||||
|
||||
func slashableAttesterIndices(slashing *ethpb.AttesterSlashing) []uint64 {
|
||||
// SlashableAttesterIndices returns the intersection of attester indices from both attestations in this slashing.
|
||||
func SlashableAttesterIndices(slashing *ethpb.AttesterSlashing) []uint64 {
|
||||
if slashing == nil || slashing.Attestation_1 == nil || slashing.Attestation_2 == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -47,7 +47,7 @@ func TestFuzzProcessBlockHeader_10000(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzverifyDepositDataSigningRoot_10000(t *testing.T) {
|
||||
func TestFuzzverifyDepositDataSigningRoot_10000(_ *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
var ba []byte
|
||||
pubkey := [48]byte{}
|
||||
@@ -85,7 +85,7 @@ func TestFuzzProcessEth1DataInBlock_10000(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzareEth1DataEqual_10000(t *testing.T) {
|
||||
func TestFuzzareEth1DataEqual_10000(_ *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
eth1data := ð.Eth1Data{}
|
||||
eth1data2 := ð.Eth1Data{}
|
||||
@@ -227,7 +227,7 @@ func TestFuzzVerifyAttesterSlashing_10000(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzIsSlashableAttestationData_10000(t *testing.T) {
|
||||
func TestFuzzIsSlashableAttestationData_10000(_ *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
attestationData := ð.AttestationData{}
|
||||
attestationData2 := ð.AttestationData{}
|
||||
@@ -239,13 +239,13 @@ func TestFuzzIsSlashableAttestationData_10000(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzslashableAttesterIndices_10000(t *testing.T) {
|
||||
func TestFuzzslashableAttesterIndices_10000(_ *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
attesterSlashing := ð.AttesterSlashing{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(attesterSlashing)
|
||||
slashableAttesterIndices(attesterSlashing)
|
||||
SlashableAttesterIndices(attesterSlashing)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -397,7 +397,7 @@ func TestFuzzProcessVoluntaryExitsNoVerify_10000(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzVerifyExit_10000(t *testing.T) {
|
||||
func TestFuzzVerifyExit_10000(_ *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
ve := ð.SignedVoluntaryExit{}
|
||||
rawVal := ðpb.Validator{}
|
||||
|
||||
@@ -321,7 +321,7 @@ func TestBlockSignatureSet_OK(t *testing.T) {
|
||||
validators[proposerIdx].PublicKey = priv.PublicKey().Marshal()
|
||||
err = state.UpdateValidatorAtIndex(proposerIdx, validators[proposerIdx])
|
||||
require.NoError(t, err)
|
||||
set, err := blocks.BlockSignatureSet(state, block.Block.ProposerIndex, block.Signature, block.Block.HashTreeRoot)
|
||||
set, err := blocks.BlockSignatureBatch(state, block.Block.ProposerIndex, block.Signature, block.Block.HashTreeRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
verified, err := set.Verify()
|
||||
|
||||
@@ -82,7 +82,7 @@ func TestRandaoSignatureSet_OK(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
set, err := blocks.RandaoSignatureSet(context.Background(), beaconState, block.Body.RandaoReveal)
|
||||
set, err := blocks.RandaoSignatureBatch(context.Background(), beaconState, block.Body.RandaoReveal)
|
||||
require.NoError(t, err)
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -18,8 +18,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
// retrieves the signature set from the raw data, public key,signature and domain provided.
|
||||
func signatureSet(signedData, pub, signature, domain []byte) (*bls.SignatureSet, error) {
|
||||
// retrieves the signature batch from the raw data, public key,signature and domain provided.
|
||||
func signatureBatch(signedData, pub, signature, domain []byte) (*bls.SignatureBatch, error) {
|
||||
publicKey, err := bls.PublicKeyFromBytes(pub)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert bytes to public key")
|
||||
@@ -32,7 +32,7 @@ func signatureSet(signedData, pub, signature, domain []byte) (*bls.SignatureSet,
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not hash container")
|
||||
}
|
||||
return &bls.SignatureSet{
|
||||
return &bls.SignatureBatch{
|
||||
Signatures: [][]byte{signature},
|
||||
PublicKeys: []bls.PublicKey{publicKey},
|
||||
Messages: [][32]byte{root},
|
||||
@@ -41,7 +41,7 @@ func signatureSet(signedData, pub, signature, domain []byte) (*bls.SignatureSet,
|
||||
|
||||
// verifies the signature from the raw data, public key and domain provided.
|
||||
func verifySignature(signedData, pub, signature, domain []byte) error {
|
||||
set, err := signatureSet(signedData, pub, signature, domain)
|
||||
set, err := signatureBatch(signedData, pub, signature, domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -116,11 +116,11 @@ func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState,
|
||||
return signing.VerifyBlockSigningRoot(proposerPubKey, blk.Signature(), domain, blk.Block().HashTreeRoot)
|
||||
}
|
||||
|
||||
// BlockSignatureSet retrieves the block signature set from the provided block and its corresponding state.
|
||||
func BlockSignatureSet(beaconState state.ReadOnlyBeaconState,
|
||||
// BlockSignatureBatch retrieves the block signature batch from the provided block and its corresponding state.
|
||||
func BlockSignatureBatch(beaconState state.ReadOnlyBeaconState,
|
||||
proposerIndex types.ValidatorIndex,
|
||||
sig []byte,
|
||||
rootFunc func() ([32]byte, error)) (*bls.SignatureSet, error) {
|
||||
rootFunc func() ([32]byte, error)) (*bls.SignatureBatch, error) {
|
||||
currentEpoch := slots.ToEpoch(beaconState.Slot())
|
||||
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot())
|
||||
if err != nil {
|
||||
@@ -131,21 +131,21 @@ func BlockSignatureSet(beaconState state.ReadOnlyBeaconState,
|
||||
return nil, err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.BlockSignatureSet(proposerPubKey, sig, domain, rootFunc)
|
||||
return signing.BlockSignatureBatch(proposerPubKey, sig, domain, rootFunc)
|
||||
}
|
||||
|
||||
// RandaoSignatureSet retrieves the relevant randao specific signature set object
|
||||
// RandaoSignatureBatch retrieves the relevant randao specific signature batch object
|
||||
// from a block and its corresponding state.
|
||||
func RandaoSignatureSet(
|
||||
func RandaoSignatureBatch(
|
||||
ctx context.Context,
|
||||
beaconState state.ReadOnlyBeaconState,
|
||||
reveal []byte,
|
||||
) (*bls.SignatureSet, error) {
|
||||
) (*bls.SignatureBatch, error) {
|
||||
buf, proposerPub, domain, err := randaoSigningData(ctx, beaconState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
set, err := signatureSet(buf, proposerPub, reveal, domain)
|
||||
set, err := signatureBatch(buf, proposerPub, reveal, domain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -171,13 +171,13 @@ func randaoSigningData(ctx context.Context, beaconState state.ReadOnlyBeaconStat
|
||||
return buf, proposerPub[:], domain, nil
|
||||
}
|
||||
|
||||
// Method to break down attestations of the same domain and collect them into a single signature set.
|
||||
func createAttestationSignatureSet(
|
||||
// Method to break down attestations of the same domain and collect them into a single signature batch.
|
||||
func createAttestationSignatureBatch(
|
||||
ctx context.Context,
|
||||
beaconState state.ReadOnlyBeaconState,
|
||||
atts []*ethpb.Attestation,
|
||||
domain []byte,
|
||||
) (*bls.SignatureSet, error) {
|
||||
) (*bls.SignatureBatch, error) {
|
||||
if len(atts) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -216,16 +216,16 @@ func createAttestationSignatureSet(
|
||||
}
|
||||
msgs[i] = root
|
||||
}
|
||||
return &bls.SignatureSet{
|
||||
return &bls.SignatureBatch{
|
||||
Signatures: sigs,
|
||||
PublicKeys: pks,
|
||||
Messages: msgs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AttestationSignatureSet retrieves all the related attestation signature data such as the relevant public keys,
|
||||
// signatures and attestation signing data and collate it into a signature set object.
|
||||
func AttestationSignatureSet(ctx context.Context, beaconState state.ReadOnlyBeaconState, atts []*ethpb.Attestation) (*bls.SignatureSet, error) {
|
||||
// AttestationSignatureBatch retrieves all the related attestation signature data such as the relevant public keys,
|
||||
// signatures and attestation signing data and collate it into a signature batch object.
|
||||
func AttestationSignatureBatch(ctx context.Context, beaconState state.ReadOnlyBeaconState, atts []*ethpb.Attestation) (*bls.SignatureBatch, error) {
|
||||
if len(atts) == 0 {
|
||||
return bls.NewSet(), nil
|
||||
}
|
||||
@@ -247,12 +247,12 @@ func AttestationSignatureSet(ctx context.Context, beaconState state.ReadOnlyBeac
|
||||
set := bls.NewSet()
|
||||
|
||||
// Check attestations from before the fork.
|
||||
if fork.Epoch > 0 && len(preForkAtts) > 0 { // Check to prevent underflow and there is valid attestations to create sig set.
|
||||
if fork.Epoch > 0 && len(preForkAtts) > 0 { // Check to prevent underflow and there is valid attestations to create sig batch.
|
||||
prevDomain, err := signing.Domain(fork, fork.Epoch-1, dt, gvr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
aSet, err := createAttestationSignatureSet(ctx, beaconState, preForkAtts, prevDomain)
|
||||
aSet, err := createAttestationSignatureBatch(ctx, beaconState, preForkAtts, prevDomain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -272,7 +272,7 @@ func AttestationSignatureSet(ctx context.Context, beaconState state.ReadOnlyBeac
|
||||
return nil, err
|
||||
}
|
||||
|
||||
aSet, err := createAttestationSignatureSet(ctx, beaconState, postForkAtts, currDomain)
|
||||
aSet, err := createAttestationSignatureBatch(ctx, beaconState, postForkAtts, currDomain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ go_library(
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
@@ -51,6 +52,7 @@ go_test(
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/monitoring/tracing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -64,7 +65,7 @@ func ProcessAttestations(
|
||||
vp = UpdateValidator(vp, v, indices, a, a.Data.Slot)
|
||||
}
|
||||
|
||||
pBal = UpdateBalance(vp, pBal)
|
||||
pBal = UpdateBalance(vp, pBal, state.Version())
|
||||
|
||||
return vp, pBal, nil
|
||||
}
|
||||
@@ -170,7 +171,7 @@ func UpdateValidator(vp []*Validator, record *Validator, indices []uint64, a *et
|
||||
}
|
||||
|
||||
// UpdateBalance updates pre computed balance store.
|
||||
func UpdateBalance(vp []*Validator, bBal *Balance) *Balance {
|
||||
func UpdateBalance(vp []*Validator, bBal *Balance, stateVersion int) *Balance {
|
||||
for _, v := range vp {
|
||||
if !v.IsSlashed {
|
||||
if v.IsCurrentEpochAttester {
|
||||
@@ -179,7 +180,10 @@ func UpdateBalance(vp []*Validator, bBal *Balance) *Balance {
|
||||
if v.IsCurrentEpochTargetAttester {
|
||||
bBal.CurrentEpochTargetAttested += v.CurrentEpochEffectiveBalance
|
||||
}
|
||||
if v.IsPrevEpochAttester {
|
||||
if stateVersion == version.Phase0 && v.IsPrevEpochAttester {
|
||||
bBal.PrevEpochAttested += v.CurrentEpochEffectiveBalance
|
||||
}
|
||||
if stateVersion == version.Altair && v.IsPrevEpochSourceAttester {
|
||||
bBal.PrevEpochAttested += v.CurrentEpochEffectiveBalance
|
||||
}
|
||||
if v.IsPrevEpochTargetAttester {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
@@ -65,7 +66,7 @@ func TestUpdateBalance(t *testing.T) {
|
||||
PrevEpochTargetAttested: 100 * params.BeaconConfig().EffectiveBalanceIncrement,
|
||||
PrevEpochHeadAttested: 200 * params.BeaconConfig().EffectiveBalanceIncrement,
|
||||
}
|
||||
pBal := precompute.UpdateBalance(vp, &precompute.Balance{})
|
||||
pBal := precompute.UpdateBalance(vp, &precompute.Balance{}, version.Phase0)
|
||||
assert.DeepEqual(t, wantedPBal, pBal, "Incorrect balance calculations")
|
||||
}
|
||||
|
||||
|
||||
@@ -20,12 +20,14 @@ type Validator struct {
|
||||
IsCurrentEpochTargetAttester bool
|
||||
// IsPrevEpochAttester is true if the validator attested previous epoch.
|
||||
IsPrevEpochAttester bool
|
||||
// IsPrevEpochSourceAttester is true if the validator attested to source previous epoch. [Only for Altair]
|
||||
IsPrevEpochSourceAttester bool
|
||||
// IsPrevEpochTargetAttester is true if the validator attested previous epoch target.
|
||||
IsPrevEpochTargetAttester bool
|
||||
// IsHeadAttester is true if the validator attested head.
|
||||
IsPrevEpochHeadAttester bool
|
||||
|
||||
// CurrentEpochEffectiveBalance is how much effective balance this validator validator has current epoch.
|
||||
// CurrentEpochEffectiveBalance is how much effective balance this validator has current epoch.
|
||||
CurrentEpochEffectiveBalance uint64
|
||||
// InclusionSlot is the slot of when the attestation gets included in the chain.
|
||||
InclusionSlot types.Slot
|
||||
|
||||
@@ -199,7 +199,7 @@ func CommitteeAssignments(
|
||||
// Each slot in an epoch has a different set of committees. This value is derived from the
|
||||
// active validator set, which does not change.
|
||||
numCommitteesPerSlot := SlotCommitteeCount(uint64(len(activeValidatorIndices)))
|
||||
validatorIndexToCommittee := make(map[types.ValidatorIndex]*CommitteeAssignmentContainer, params.BeaconConfig().SlotsPerEpoch.Mul(numCommitteesPerSlot))
|
||||
validatorIndexToCommittee := make(map[types.ValidatorIndex]*CommitteeAssignmentContainer, len(activeValidatorIndices))
|
||||
|
||||
// Compute all committees for all slots.
|
||||
for i := types.Slot(0); i < params.BeaconConfig().SlotsPerEpoch; i++ {
|
||||
|
||||
@@ -59,7 +59,7 @@ func TotalActiveBalance(s state.ReadOnlyBeaconState) (uint64, error) {
|
||||
case err == nil:
|
||||
return bal, nil
|
||||
case errors.Is(err, cache.ErrNotFound):
|
||||
break
|
||||
// Do nothing if we receive a not found error.
|
||||
default:
|
||||
// In the event, we encounter another error we return it.
|
||||
return 0, err
|
||||
|
||||
@@ -2,9 +2,9 @@ package signing
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/eth2-types"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// Domain returns the domain version for BLS private key to sign and verify.
|
||||
|
||||
@@ -3,9 +3,9 @@ package signing
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/eth2-types"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
@@ -118,11 +118,11 @@ func VerifyBlockHeaderSigningRoot(blkHdr *ethpb.BeaconBlockHeader, pub, signatur
|
||||
|
||||
// VerifyBlockSigningRoot verifies the signing root of a block given its public key, signature and domain.
|
||||
func VerifyBlockSigningRoot(pub, signature, domain []byte, rootFunc func() ([32]byte, error)) error {
|
||||
set, err := BlockSignatureSet(pub, signature, domain, rootFunc)
|
||||
set, err := BlockSignatureBatch(pub, signature, domain, rootFunc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// We assume only one signature set is returned here.
|
||||
// We assume only one signature batch is returned here.
|
||||
sig := set.Signatures[0]
|
||||
publicKey := set.PublicKeys[0]
|
||||
root := set.Messages[0]
|
||||
@@ -137,9 +137,9 @@ func VerifyBlockSigningRoot(pub, signature, domain []byte, rootFunc func() ([32]
|
||||
return nil
|
||||
}
|
||||
|
||||
// BlockSignatureSet retrieves the relevant signature, message and pubkey data from a block and collating it
|
||||
// into a signature set object.
|
||||
func BlockSignatureSet(pub, signature, domain []byte, rootFunc func() ([32]byte, error)) (*bls.SignatureSet, error) {
|
||||
// BlockSignatureBatch retrieves the relevant signature, message and pubkey data from a block and collating it
|
||||
// into a signature batch object.
|
||||
func BlockSignatureBatch(pub, signature, domain []byte, rootFunc func() ([32]byte, error)) (*bls.SignatureBatch, error) {
|
||||
publicKey, err := bls.PublicKeyFromBytes(pub)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert bytes to public key")
|
||||
@@ -149,7 +149,7 @@ func BlockSignatureSet(pub, signature, domain []byte, rootFunc func() ([32]byte,
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute signing root")
|
||||
}
|
||||
return &bls.SignatureSet{
|
||||
return &bls.SignatureBatch{
|
||||
Signatures: [][]byte{signature},
|
||||
PublicKeys: []bls.PublicKey{publicKey},
|
||||
Messages: [][32]byte{root},
|
||||
|
||||
@@ -60,6 +60,16 @@ func CanUpgradeToAltair(slot types.Slot) bool {
|
||||
return epochStart && altairEpoch
|
||||
}
|
||||
|
||||
// CanUpgradeToMerge returns true if the input `slot` can upgrade to Merge fork.
|
||||
//
|
||||
// Spec code:
|
||||
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == MERGE_FORK_EPOCH
|
||||
func CanUpgradeToMerge(slot types.Slot) bool {
|
||||
epochStart := slots.IsEpochStart(slot)
|
||||
mergeEpoch := slots.ToEpoch(slot) == params.BeaconConfig().MergeForkEpoch
|
||||
return epochStart && mergeEpoch
|
||||
}
|
||||
|
||||
// CanProcessEpoch checks the eligibility to process epoch.
|
||||
// The epoch can be processed at the end of the last slot of every epoch.
|
||||
//
|
||||
|
||||
@@ -114,6 +114,40 @@ func TestCanUpgradeToAltair(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanUpgradeToMerge(t *testing.T) {
|
||||
bc := params.BeaconConfig()
|
||||
bc.MergeForkEpoch = 5
|
||||
params.OverrideBeaconConfig(bc)
|
||||
tests := []struct {
|
||||
name string
|
||||
slot types.Slot
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "not epoch start",
|
||||
slot: 1,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "not merge epoch",
|
||||
slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "merge epoch",
|
||||
slot: types.Slot(params.BeaconConfig().MergeForkEpoch) * params.BeaconConfig().SlotsPerEpoch,
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := CanUpgradeToMerge(tt.slot); got != tt.want {
|
||||
t.Errorf("CanUpgradeToMerge() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanProcessEpoch_TrueOnEpochsLastSlot(t *testing.T) {
|
||||
tests := []struct {
|
||||
slot types.Slot
|
||||
|
||||
@@ -35,6 +35,7 @@ go_library(
|
||||
"//beacon-chain/core/transition/interop:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -105,7 +106,7 @@ func OptimizedGenesisBeaconState(genesisTime uint64, preState state.BeaconState,
|
||||
|
||||
slashings := make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector)
|
||||
|
||||
genesisValidatorsRoot, err := v1.ValidatorRegistryRoot(preState.Validators())
|
||||
genesisValidatorsRoot, err := stateutil.ValidatorRegistryRoot(preState.Validators())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not hash tree root genesis validators %v", err)
|
||||
}
|
||||
|
||||
@@ -214,10 +214,7 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err := SkipSlotCache.MarkNotInProgress(key); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
log.WithError(err).Error("Failed to mark skip slot no longer in progress")
|
||||
}
|
||||
SkipSlotCache.MarkNotInProgress(key)
|
||||
}()
|
||||
|
||||
for state.Slot() < slot {
|
||||
@@ -225,7 +222,7 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
|
||||
tracing.AnnotateError(span, ctx.Err())
|
||||
// Cache last best value.
|
||||
if highestSlot < state.Slot() {
|
||||
if err := SkipSlotCache.Put(ctx, key, state); err != nil {
|
||||
if SkipSlotCache.Put(ctx, key, state); err != nil {
|
||||
log.WithError(err).Error("Failed to put skip slot cache value")
|
||||
}
|
||||
}
|
||||
@@ -269,10 +266,7 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
|
||||
}
|
||||
|
||||
if highestSlot < state.Slot() {
|
||||
if err := SkipSlotCache.Put(ctx, key, state); err != nil {
|
||||
log.WithError(err).Error("Failed to put skip slot cache value")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
SkipSlotCache.Put(ctx, key, state)
|
||||
}
|
||||
|
||||
return state, nil
|
||||
|
||||
@@ -45,7 +45,7 @@ func ExecuteStateTransitionNoVerifyAnySig(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
signed block.SignedBeaconBlock,
|
||||
) (*bls.SignatureSet, state.BeaconState, error) {
|
||||
) (*bls.SignatureBatch, state.BeaconState, error) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, nil, ctx.Err()
|
||||
}
|
||||
@@ -182,7 +182,7 @@ func ProcessBlockNoVerifyAnySig(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
signed block.SignedBeaconBlock,
|
||||
) (*bls.SignatureSet, state.BeaconState, error) {
|
||||
) (*bls.SignatureBatch, state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.ProcessBlockNoVerifyAnySig")
|
||||
defer span.End()
|
||||
if err := helpers.BeaconBlockIsNil(signed); err != nil {
|
||||
@@ -209,17 +209,17 @@ func ProcessBlockNoVerifyAnySig(
|
||||
}
|
||||
}
|
||||
|
||||
bSet, err := b.BlockSignatureSet(state, blk.ProposerIndex(), signed.Signature(), blk.HashTreeRoot)
|
||||
bSet, err := b.BlockSignatureBatch(state, blk.ProposerIndex(), signed.Signature(), blk.HashTreeRoot)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, nil, errors.Wrap(err, "could not retrieve block signature set")
|
||||
}
|
||||
rSet, err := b.RandaoSignatureSet(ctx, state, signed.Block().Body().RandaoReveal())
|
||||
rSet, err := b.RandaoSignatureBatch(ctx, state, signed.Block().Body().RandaoReveal())
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, nil, errors.Wrap(err, "could not retrieve randao signature set")
|
||||
}
|
||||
aSet, err := b.AttestationSignatureSet(ctx, state, signed.Block().Body().Attestations())
|
||||
aSet, err := b.AttestationSignatureBatch(ctx, state, signed.Block().Body().Attestations())
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not retrieve attestation signature set")
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"alias.go",
|
||||
"db.go",
|
||||
"errors.go",
|
||||
"log.go",
|
||||
"restore.go",
|
||||
],
|
||||
|
||||
5
beacon-chain/db/errors.go
Normal file
5
beacon-chain/db/errors.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package db
|
||||
|
||||
import "github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
|
||||
|
||||
var ErrNotFound = kv.ErrNotFound
|
||||
@@ -39,14 +39,6 @@ type ReadOnlyDatabase interface {
|
||||
StateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error)
|
||||
HasStateSummary(ctx context.Context, blockRoot [32]byte) bool
|
||||
HighestSlotStatesBelow(ctx context.Context, slot types.Slot) ([]state.ReadOnlyBeaconState, error)
|
||||
// Slashing operations.
|
||||
ProposerSlashing(ctx context.Context, slashingRoot [32]byte) (*eth.ProposerSlashing, error)
|
||||
AttesterSlashing(ctx context.Context, slashingRoot [32]byte) (*eth.AttesterSlashing, error)
|
||||
HasProposerSlashing(ctx context.Context, slashingRoot [32]byte) bool
|
||||
HasAttesterSlashing(ctx context.Context, slashingRoot [32]byte) bool
|
||||
// Block operations.
|
||||
VoluntaryExit(ctx context.Context, exitRoot [32]byte) (*eth.VoluntaryExit, error)
|
||||
HasVoluntaryExit(ctx context.Context, exitRoot [32]byte) bool
|
||||
// Checkpoint operations.
|
||||
JustifiedCheckpoint(ctx context.Context) (*eth.Checkpoint, error)
|
||||
FinalizedCheckpoint(ctx context.Context) (*eth.Checkpoint, error)
|
||||
@@ -58,6 +50,9 @@ type ReadOnlyDatabase interface {
|
||||
DepositContractAddress(ctx context.Context) ([]byte, error)
|
||||
// Powchain operations.
|
||||
PowchainData(ctx context.Context) (*v2.ETH1ChainData, error)
|
||||
|
||||
// Origin checkpoint sync support
|
||||
OriginCheckpointRoot(ctx context.Context) ([32]byte, error)
|
||||
}
|
||||
|
||||
// NoHeadAccessDatabase defines a struct without access to chain head data.
|
||||
@@ -75,11 +70,6 @@ type NoHeadAccessDatabase interface {
|
||||
DeleteStates(ctx context.Context, blockRoots [][32]byte) error
|
||||
SaveStateSummary(ctx context.Context, summary *ethpb.StateSummary) error
|
||||
SaveStateSummaries(ctx context.Context, summaries []*ethpb.StateSummary) error
|
||||
// Slashing operations.
|
||||
SaveProposerSlashing(ctx context.Context, slashing *eth.ProposerSlashing) error
|
||||
SaveAttesterSlashing(ctx context.Context, slashing *eth.AttesterSlashing) error
|
||||
// Block operations.
|
||||
SaveVoluntaryExit(ctx context.Context, exit *eth.VoluntaryExit) error
|
||||
// Checkpoint operations.
|
||||
SaveJustifiedCheckpoint(ctx context.Context, checkpoint *eth.Checkpoint) error
|
||||
SaveFinalizedCheckpoint(ctx context.Context, checkpoint *eth.Checkpoint) error
|
||||
@@ -105,6 +95,9 @@ type HeadAccessDatabase interface {
|
||||
LoadGenesis(ctx context.Context, r io.Reader) error
|
||||
SaveGenesisData(ctx context.Context, state state.BeaconState) error
|
||||
EnsureEmbeddedGenesis(ctx context.Context) error
|
||||
|
||||
// db support for checkpoint sync
|
||||
SaveCheckpointInitialState(ctx context.Context, state io.Reader, block io.Reader) error
|
||||
}
|
||||
|
||||
// SlasherDatabase interface for persisting data related to detecting slashable offenses on Ethereum.
|
||||
|
||||
@@ -10,6 +10,7 @@ go_library(
|
||||
"checkpoint.go",
|
||||
"deposit_contract.go",
|
||||
"encoding.go",
|
||||
"error.go",
|
||||
"finalized_block_roots.go",
|
||||
"genesis.go",
|
||||
"kv.go",
|
||||
@@ -18,14 +19,13 @@ go_library(
|
||||
"migration_archived_index.go",
|
||||
"migration_block_slot_index.go",
|
||||
"migration_state_validators.go",
|
||||
"operations.go",
|
||||
"powchain.go",
|
||||
"schema.go",
|
||||
"slashings.go",
|
||||
"state.go",
|
||||
"state_summary.go",
|
||||
"state_summary_cache.go",
|
||||
"utils.go",
|
||||
"wss.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/db/kv",
|
||||
visibility = [
|
||||
@@ -81,6 +81,7 @@ go_test(
|
||||
"checkpoint_test.go",
|
||||
"deposit_contract_test.go",
|
||||
"encoding_test.go",
|
||||
"error_test.go",
|
||||
"finalized_block_roots_test.go",
|
||||
"genesis_test.go",
|
||||
"init_test.go",
|
||||
@@ -88,9 +89,7 @@ go_test(
|
||||
"migration_archived_index_test.go",
|
||||
"migration_block_slot_index_test.go",
|
||||
"migration_state_validators_test.go",
|
||||
"operations_test.go",
|
||||
"powchain_test.go",
|
||||
"slashings_test.go",
|
||||
"state_summary_test.go",
|
||||
"state_test.go",
|
||||
"utils_test.go",
|
||||
|
||||
@@ -46,6 +46,24 @@ func (s *Store) Block(ctx context.Context, blockRoot [32]byte) (block.SignedBeac
|
||||
return blk, err
|
||||
}
|
||||
|
||||
func (s *Store) OriginCheckpointRoot(ctx context.Context) ([32]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.OriginCheckpointRoot")
|
||||
defer span.End()
|
||||
|
||||
var root [32]byte
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
rootSlice := bkt.Get(originCheckpointBlockKey)
|
||||
if rootSlice == nil {
|
||||
return ErrNotFoundOriginCheckpoint
|
||||
}
|
||||
copy(root[:], rootSlice)
|
||||
return nil
|
||||
})
|
||||
|
||||
return root, err
|
||||
}
|
||||
|
||||
// HeadBlock returns the latest canonical block in the Ethereum Beacon Chain.
|
||||
func (s *Store) HeadBlock(ctx context.Context) (block.SignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HeadBlock")
|
||||
@@ -150,11 +168,7 @@ func (s *Store) BlocksBySlot(ctx context.Context, slot types.Slot) (bool, []bloc
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
|
||||
keys, err := blockRootsBySlot(ctx, tx, slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
keys := blockRootsBySlot(ctx, tx, slot)
|
||||
for i := 0; i < len(keys); i++ {
|
||||
encoded := bkt.Get(keys[i])
|
||||
blk, err := unmarshalBlock(ctx, encoded)
|
||||
@@ -174,11 +188,7 @@ func (s *Store) BlockRootsBySlot(ctx context.Context, slot types.Slot) (bool, []
|
||||
defer span.End()
|
||||
blockRoots := make([][32]byte, 0)
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
keys, err := blockRootsBySlot(ctx, tx, slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
keys := blockRootsBySlot(ctx, tx, slot)
|
||||
for i := 0; i < len(keys); i++ {
|
||||
blockRoots = append(blockRoots, bytesutil.ToBytes32(keys[i]))
|
||||
}
|
||||
@@ -344,6 +354,17 @@ func (s *Store) SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) er
|
||||
})
|
||||
}
|
||||
|
||||
// SaveCheckpointInitialBlockRoot saves the latest block header from the weak subjectivity
|
||||
// initial sync state.
|
||||
func (s *Store) SaveCheckpointInitialBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveCheckpointInitialBlockRoot")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
return bucket.Put(originCheckpointBlockKey, blockRoot[:])
|
||||
})
|
||||
}
|
||||
|
||||
// HighestSlotBlocksBelow returns the block with the highest slot below the input slot from the db.
|
||||
func (s *Store) HighestSlotBlocksBelow(ctx context.Context, slot types.Slot) ([]block.SignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HighestSlotBlocksBelow")
|
||||
@@ -519,7 +540,7 @@ func blockRootsBySlotRange(
|
||||
}
|
||||
|
||||
// blockRootsBySlot retrieves the block roots by slot
|
||||
func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot types.Slot) ([][]byte, error) {
|
||||
func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot types.Slot) [][]byte {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlot")
|
||||
defer span.End()
|
||||
|
||||
@@ -533,7 +554,7 @@ func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot types.Slot) ([][]by
|
||||
roots = append(roots, v[i:i+32])
|
||||
}
|
||||
}
|
||||
return roots, nil
|
||||
return roots
|
||||
}
|
||||
|
||||
// createBlockIndicesFromBlock takes in a beacon block and returns
|
||||
|
||||
31
beacon-chain/db/kv/error.go
Normal file
31
beacon-chain/db/kv/error.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package kv
|
||||
|
||||
import "errors"
|
||||
|
||||
var ErrNotFound = errors.New("not found in db")
|
||||
var ErrNotFoundOriginCheckpoint = WrapDBError(ErrNotFound, "OriginCheckpointRoot")
|
||||
var ErrNotFoundFinalizedCheckpoint = WrapDBError(ErrNotFound, "FinalizedCheckpoint")
|
||||
|
||||
func WrapDBError(e error, outer string) error {
|
||||
return DBError{
|
||||
Wraps: e,
|
||||
Outer: errors.New(outer),
|
||||
}
|
||||
}
|
||||
|
||||
type DBError struct {
|
||||
Wraps error
|
||||
Outer error
|
||||
}
|
||||
|
||||
func (e DBError) Error() string {
|
||||
es := e.Outer.Error()
|
||||
if e.Wraps != nil {
|
||||
es += ": " + e.Wraps.Error()
|
||||
}
|
||||
return es
|
||||
}
|
||||
|
||||
func (e DBError) Unwrap() error {
|
||||
return e.Wraps
|
||||
}
|
||||
24
beacon-chain/db/kv/error_test.go
Normal file
24
beacon-chain/db/kv/error_test.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestWrappedSentinelError(t *testing.T) {
|
||||
e := ErrNotFoundOriginCheckpoint
|
||||
if !errors.Is(e, ErrNotFoundOriginCheckpoint) {
|
||||
t.Error("expected that a copy of ErrNotFoundOriginCheckpoint would have an is-a relationship")
|
||||
}
|
||||
|
||||
outer := errors.New("wrapped error")
|
||||
e2 := DBError{Wraps: ErrNotFoundOriginCheckpoint, Outer: outer}
|
||||
if !errors.Is(e2, ErrNotFoundOriginCheckpoint) {
|
||||
t.Error("expected that errors.Is would know DBError wraps ErrNotFoundOriginCheckpoint")
|
||||
}
|
||||
|
||||
// test that the innermost not found error is detected
|
||||
if !errors.Is(e2, ErrNotFound) {
|
||||
t.Error("expected that errors.Is would know ErrNotFoundOriginCheckpoint wraps ErrNotFound")
|
||||
}
|
||||
}
|
||||
@@ -1,78 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// VoluntaryExit retrieval by signing root.
|
||||
func (s *Store) VoluntaryExit(ctx context.Context, exitRoot [32]byte) (*ethpb.VoluntaryExit, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.VoluntaryExit")
|
||||
defer span.End()
|
||||
enc, err := s.voluntaryExitBytes(ctx, exitRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(enc) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
exit := ðpb.VoluntaryExit{}
|
||||
if err := decode(ctx, enc, exit); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return exit, nil
|
||||
}
|
||||
|
||||
// HasVoluntaryExit verifies if a voluntary exit is stored in the db by its signing root.
|
||||
func (s *Store) HasVoluntaryExit(ctx context.Context, exitRoot [32]byte) bool {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasVoluntaryExit")
|
||||
defer span.End()
|
||||
enc, err := s.voluntaryExitBytes(ctx, exitRoot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return len(enc) > 0
|
||||
}
|
||||
|
||||
// SaveVoluntaryExit to the db by its signing root.
|
||||
func (s *Store) SaveVoluntaryExit(ctx context.Context, exit *ethpb.VoluntaryExit) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveVoluntaryExit")
|
||||
defer span.End()
|
||||
exitRoot, err := exit.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
enc, err := encode(ctx, exit)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(voluntaryExitsBucket)
|
||||
return bucket.Put(exitRoot[:], enc)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) voluntaryExitBytes(ctx context.Context, exitRoot [32]byte) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.voluntaryExitBytes")
|
||||
defer span.End()
|
||||
var dst []byte
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(voluntaryExitsBucket)
|
||||
dst = bkt.Get(exitRoot[:])
|
||||
return nil
|
||||
})
|
||||
return dst, err
|
||||
}
|
||||
|
||||
// deleteVoluntaryExit clears a voluntary exit from the db by its signing root.
|
||||
func (s *Store) deleteVoluntaryExit(ctx context.Context, exitRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.deleteVoluntaryExit")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(voluntaryExitsBucket)
|
||||
return bucket.Delete(exitRoot[:])
|
||||
})
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestStore_VoluntaryExits_CRUD(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
exit := ðpb.VoluntaryExit{
|
||||
Epoch: 5,
|
||||
}
|
||||
exitRoot, err := exit.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
retrieved, err := db.VoluntaryExit(ctx, exitRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, (*ethpb.VoluntaryExit)(nil), retrieved, "Expected nil voluntary exit")
|
||||
require.NoError(t, db.SaveVoluntaryExit(ctx, exit))
|
||||
assert.Equal(t, true, db.HasVoluntaryExit(ctx, exitRoot), "Expected voluntary exit to exist in the db")
|
||||
retrieved, err = db.VoluntaryExit(ctx, exitRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(exit, retrieved), "Wanted %v, received %v", exit, retrieved)
|
||||
require.NoError(t, db.deleteVoluntaryExit(ctx, exitRoot))
|
||||
assert.Equal(t, false, db.HasVoluntaryExit(ctx, exitRoot), "Expected voluntary exit to have been deleted from the db")
|
||||
}
|
||||
@@ -46,6 +46,8 @@ var (
|
||||
// Altair key used to identify object is altair compatible.
|
||||
// Objects that are only compatible with altair should be prefixed with such key.
|
||||
altairKey = []byte("altair")
|
||||
// block root included in the beacon state used by weak subjectivity initial sync
|
||||
originCheckpointBlockKey = []byte("checkpoint-initial-block-root")
|
||||
|
||||
// Deprecated: This index key was migrated in PR 6461. Do not use, except for migrations.
|
||||
lastArchivedIndexKey = []byte("last-archived")
|
||||
|
||||
@@ -1,147 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ProposerSlashing retrieval by slashing root.
|
||||
func (s *Store) ProposerSlashing(ctx context.Context, slashingRoot [32]byte) (*ethpb.ProposerSlashing, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.ProposerSlashing")
|
||||
defer span.End()
|
||||
enc, err := s.proposerSlashingBytes(ctx, slashingRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(enc) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
proposerSlashing := ðpb.ProposerSlashing{}
|
||||
if err := decode(ctx, enc, proposerSlashing); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proposerSlashing, nil
|
||||
}
|
||||
|
||||
// HasProposerSlashing verifies if a slashing is stored in the db.
|
||||
func (s *Store) HasProposerSlashing(ctx context.Context, slashingRoot [32]byte) bool {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasProposerSlashing")
|
||||
defer span.End()
|
||||
enc, err := s.proposerSlashingBytes(ctx, slashingRoot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return len(enc) > 0
|
||||
}
|
||||
|
||||
// SaveProposerSlashing to the db by its hash tree root.
|
||||
func (s *Store) SaveProposerSlashing(ctx context.Context, slashing *ethpb.ProposerSlashing) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveProposerSlashing")
|
||||
defer span.End()
|
||||
slashingRoot, err := slashing.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
enc, err := encode(ctx, slashing)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(proposerSlashingsBucket)
|
||||
return bucket.Put(slashingRoot[:], enc)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) proposerSlashingBytes(ctx context.Context, slashingRoot [32]byte) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.proposerSlashingBytes")
|
||||
defer span.End()
|
||||
var dst []byte
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(proposerSlashingsBucket)
|
||||
dst = bkt.Get(slashingRoot[:])
|
||||
return nil
|
||||
})
|
||||
return dst, err
|
||||
}
|
||||
|
||||
// deleteProposerSlashing clears a proposer slashing from the db by its hash tree root.
|
||||
func (s *Store) deleteProposerSlashing(ctx context.Context, slashingRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.deleteProposerSlashing")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(proposerSlashingsBucket)
|
||||
return bucket.Delete(slashingRoot[:])
|
||||
})
|
||||
}
|
||||
|
||||
// AttesterSlashing retrieval by hash tree root.
|
||||
func (s *Store) AttesterSlashing(ctx context.Context, slashingRoot [32]byte) (*ethpb.AttesterSlashing, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.AttesterSlashing")
|
||||
defer span.End()
|
||||
enc, err := s.attesterSlashingBytes(ctx, slashingRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(enc) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
attSlashing := ðpb.AttesterSlashing{}
|
||||
if err := decode(ctx, enc, attSlashing); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return attSlashing, nil
|
||||
}
|
||||
|
||||
// HasAttesterSlashing verifies if a slashing is stored in the db.
|
||||
func (s *Store) HasAttesterSlashing(ctx context.Context, slashingRoot [32]byte) bool {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasAttesterSlashing")
|
||||
defer span.End()
|
||||
enc, err := s.attesterSlashingBytes(ctx, slashingRoot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return len(enc) > 0
|
||||
}
|
||||
|
||||
// SaveAttesterSlashing to the db by its hash tree root.
|
||||
func (s *Store) SaveAttesterSlashing(ctx context.Context, slashing *ethpb.AttesterSlashing) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveAttesterSlashing")
|
||||
defer span.End()
|
||||
slashingRoot, err := slashing.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
enc, err := encode(ctx, slashing)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(attesterSlashingsBucket)
|
||||
return bucket.Put(slashingRoot[:], enc)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) attesterSlashingBytes(ctx context.Context, slashingRoot [32]byte) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.attesterSlashingBytes")
|
||||
defer span.End()
|
||||
var dst []byte
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(attesterSlashingsBucket)
|
||||
dst = bkt.Get(slashingRoot[:])
|
||||
return nil
|
||||
})
|
||||
return dst, err
|
||||
}
|
||||
|
||||
// deleteAttesterSlashing clears an attester slashing from the db by its hash tree root.
|
||||
func (s *Store) deleteAttesterSlashing(ctx context.Context, slashingRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.deleteAttesterSlashing")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(attesterSlashingsBucket)
|
||||
return bucket.Delete(slashingRoot[:])
|
||||
})
|
||||
}
|
||||
@@ -1,67 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestStore_ProposerSlashing_CRUD(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
prop := ðpb.ProposerSlashing{
|
||||
Header_1: util.HydrateSignedBeaconHeader(ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: 5,
|
||||
},
|
||||
}),
|
||||
Header_2: util.HydrateSignedBeaconHeader(ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: 5,
|
||||
},
|
||||
}),
|
||||
}
|
||||
slashingRoot, err := prop.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
retrieved, err := db.ProposerSlashing(ctx, slashingRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, (*ethpb.ProposerSlashing)(nil), retrieved, "Expected nil proposer slashing")
|
||||
require.NoError(t, db.SaveProposerSlashing(ctx, prop))
|
||||
assert.Equal(t, true, db.HasProposerSlashing(ctx, slashingRoot), "Expected proposer slashing to exist in the db")
|
||||
retrieved, err = db.ProposerSlashing(ctx, slashingRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(prop, retrieved), "Wanted %v, received %v", prop, retrieved)
|
||||
require.NoError(t, db.deleteProposerSlashing(ctx, slashingRoot))
|
||||
assert.Equal(t, false, db.HasProposerSlashing(ctx, slashingRoot), "Expected proposer slashing to have been deleted from the db")
|
||||
}
|
||||
|
||||
func TestStore_AttesterSlashing_CRUD(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
att := ðpb.AttesterSlashing{
|
||||
Attestation_1: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 5,
|
||||
}}),
|
||||
Attestation_2: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 7,
|
||||
}})}
|
||||
slashingRoot, err := att.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
retrieved, err := db.AttesterSlashing(ctx, slashingRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, (*ethpb.AttesterSlashing)(nil), retrieved, "Expected nil attester slashing")
|
||||
require.NoError(t, db.SaveAttesterSlashing(ctx, att))
|
||||
assert.Equal(t, true, db.HasAttesterSlashing(ctx, slashingRoot), "Expected attester slashing to exist in the db")
|
||||
retrieved, err = db.AttesterSlashing(ctx, slashingRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(att, retrieved), "Wanted %v, received %v", att, retrieved)
|
||||
require.NoError(t, db.deleteAttesterSlashing(ctx, slashingRoot))
|
||||
assert.Equal(t, false, db.HasAttesterSlashing(ctx, slashingRoot), "Expected attester slashing to have been deleted from the db")
|
||||
}
|
||||
85
beacon-chain/db/kv/wss.go
Normal file
85
beacon-chain/db/kv/wss.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
statev2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
)
|
||||
|
||||
const SLOTS_PER_EPOCH = 32
|
||||
|
||||
// SaveStateToHead sets the current head state.
|
||||
func (s *Store) SaveStateToHead(ctx context.Context, bs state.BeaconState) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveInitialCheckpointState loads an ssz serialized BeaconState from an io.Reader
|
||||
// (ex: an open file) and sets the given state to the head of the chain.
|
||||
func (s *Store) SaveCheckpointInitialState(ctx context.Context, stateReader io.Reader, blockReader io.Reader) error {
|
||||
// save block to database
|
||||
blk := ðpb.SignedBeaconBlockAltair{}
|
||||
bb, err := ioutil.ReadAll(blockReader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := blk.UnmarshalSSZ(bb); err != nil {
|
||||
return errors.Wrap(err, "could not unmarshal checkpoint block")
|
||||
}
|
||||
wblk, err := wrapper.WrappedAltairSignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not wrap checkpoint block")
|
||||
}
|
||||
if err := s.SaveBlock(ctx, wblk); err != nil {
|
||||
return errors.Wrap(err, "could not save checkpoint block")
|
||||
}
|
||||
blockRoot, err := blk.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute HashTreeRoot of checkpoint block")
|
||||
}
|
||||
|
||||
bs, err := statev2.InitializeFromSSZReader(stateReader)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not initialize checkpoint state from reader")
|
||||
}
|
||||
|
||||
if err = s.SaveState(ctx, bs, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
if err = s.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: bs.Slot(),
|
||||
Root: blockRoot[:],
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = s.SaveHeadBlockRoot(ctx, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save head block root")
|
||||
}
|
||||
if err = s.SaveCheckpointInitialBlockRoot(ctx, blockRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
slotEpoch, err := blk.Block.Slot.SafeDivSlot(params.BeaconConfig().SlotsPerEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
chkpt := ðpb.Checkpoint{
|
||||
Epoch: types.Epoch(slotEpoch),
|
||||
Root: blockRoot[:],
|
||||
}
|
||||
if err = s.SaveJustifiedCheckpoint(ctx, chkpt); err != nil {
|
||||
return errors.Wrap(err, "could not mark checkpoint sync block as justified")
|
||||
}
|
||||
if err = s.SaveFinalizedCheckpoint(ctx, chkpt); err != nil {
|
||||
return errors.Wrap(err, "could not mark checkpoint sync block as finalized")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
// PruneAttestationsAtEpoch deletes all attestations from the slasher DB with target epoch
|
||||
// less than or equal to the specified epoch.
|
||||
func (s *Store) PruneAttestationsAtEpoch(
|
||||
ctx context.Context, maxEpoch types.Epoch,
|
||||
_ context.Context, maxEpoch types.Epoch,
|
||||
) (numPruned uint, err error) {
|
||||
// We can prune everything less than the current epoch - history length.
|
||||
encodedEndPruneEpoch := fssz.MarshalUint64([]byte{}, uint64(maxEpoch))
|
||||
@@ -85,7 +85,7 @@ func (s *Store) PruneAttestationsAtEpoch(
|
||||
// PruneProposalsAtEpoch deletes all proposals from the slasher DB with epoch
|
||||
// less than or equal to the specified epoch.
|
||||
func (s *Store) PruneProposalsAtEpoch(
|
||||
ctx context.Context, maxEpoch types.Epoch,
|
||||
_ context.Context, maxEpoch types.Epoch,
|
||||
) (numPruned uint, err error) {
|
||||
var endPruneSlot types.Slot
|
||||
endPruneSlot, err = slots.EpochEnd(maxEpoch)
|
||||
|
||||
@@ -394,7 +394,7 @@ func (s *Store) SaveBlockProposals(
|
||||
|
||||
// HighestAttestations retrieves the last attestation data from the database for all indices.
|
||||
func (s *Store) HighestAttestations(
|
||||
ctx context.Context,
|
||||
_ context.Context,
|
||||
indices []types.ValidatorIndex,
|
||||
) ([]*slashpb.HighestAttestation, error) {
|
||||
if len(indices) == 0 {
|
||||
|
||||
70
beacon-chain/monitor/BUILD.bazel
Normal file
70
beacon-chain/monitor/BUILD.bazel
Normal file
@@ -0,0 +1,70 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"metrics.go",
|
||||
"process_attestation.go",
|
||||
"process_block.go",
|
||||
"process_exit.go",
|
||||
"process_sync_committee.go",
|
||||
"service.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/monitor",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/block:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"process_attestation_test.go",
|
||||
"process_block_test.go",
|
||||
"process_exit_test.go",
|
||||
"process_sync_committee_test.go",
|
||||
"service_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/wrapper:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
)
|
||||
7
beacon-chain/monitor/doc.go
Normal file
7
beacon-chain/monitor/doc.go
Normal file
@@ -0,0 +1,7 @@
|
||||
/*
|
||||
Package monitor defines a runtime service which receives
|
||||
notifications triggered by events related to performance of tracked
|
||||
validating keys. It then logs and emits metrics for a user to keep finely
|
||||
detailed performance measures.
|
||||
*/
|
||||
package monitor
|
||||
93
beacon-chain/monitor/metrics.go
Normal file
93
beacon-chain/monitor/metrics.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
log = logrus.WithField("prefix", "monitor")
|
||||
// TODO: The Prometheus gauge vectors and counters in this package deprecate the
|
||||
// corresponding gauge vectors and counters in the validator client.
|
||||
|
||||
// inclusionSlotGauge used to track attestation inclusion distance
|
||||
inclusionSlotGauge = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "monitor",
|
||||
Name: "inclusion_slot",
|
||||
Help: "Attestations inclusion slot",
|
||||
},
|
||||
[]string{
|
||||
"validator_index",
|
||||
},
|
||||
)
|
||||
// timelyHeadCounter used to track attestation timely head flags
|
||||
timelyHeadCounter = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "monitor",
|
||||
Name: "timely_head",
|
||||
Help: "Attestation timely Head flag",
|
||||
},
|
||||
[]string{
|
||||
"validator_index",
|
||||
},
|
||||
)
|
||||
// timelyTargetCounter used to track attestation timely head flags
|
||||
timelyTargetCounter = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "monitor",
|
||||
Name: "timely_target",
|
||||
Help: "Attestation timely Target flag",
|
||||
},
|
||||
[]string{
|
||||
"validator_index",
|
||||
},
|
||||
)
|
||||
// timelySourceCounter used to track attestation timely head flags
|
||||
timelySourceCounter = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "monitor",
|
||||
Name: "timely_source",
|
||||
Help: "Attestation timely Source flag",
|
||||
},
|
||||
[]string{
|
||||
"validator_index",
|
||||
},
|
||||
)
|
||||
|
||||
// proposedSlotsCounter used to track proposed blocks
|
||||
proposedSlotsCounter = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "monitor",
|
||||
Name: "proposed_slots_total",
|
||||
Help: "Number of proposed blocks included",
|
||||
},
|
||||
[]string{
|
||||
"validator_index",
|
||||
},
|
||||
)
|
||||
// aggregationCounter used to track aggregations
|
||||
aggregationCounter = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "monitor",
|
||||
Name: "aggregations",
|
||||
Help: "Number of aggregation duties performed",
|
||||
},
|
||||
[]string{
|
||||
"validator_index",
|
||||
},
|
||||
)
|
||||
// syncCommitteeContributionCounter used to track sync committee
|
||||
// contributions
|
||||
syncCommitteeContributionCounter = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "monitor",
|
||||
Name: "sync_committee_contributions_total",
|
||||
Help: "Number of Sync committee contributions performed",
|
||||
},
|
||||
[]string{
|
||||
"validator_index",
|
||||
},
|
||||
)
|
||||
)
|
||||
234
beacon-chain/monitor/process_attestation.go
Normal file
234
beacon-chain/monitor/process_attestation.go
Normal file
@@ -0,0 +1,234 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// updatedPerformanceFromTrackedVal returns true if the validator is tracked and if the
|
||||
// given slot is different than the last attested slot from this validator.
|
||||
// It assumes that a read lock is held on the monitor service.
|
||||
func (s *Service) updatedPerformanceFromTrackedVal(idx types.ValidatorIndex, slot types.Slot) bool {
|
||||
if !s.trackedIndex(idx) {
|
||||
return false
|
||||
}
|
||||
|
||||
if lp, ok := s.latestPerformance[idx]; ok {
|
||||
return lp.attestedSlot != slot
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// attestingIndices returns the indices of validators that appear in the
|
||||
// given aggregated atestation.
|
||||
func attestingIndices(ctx context.Context, state state.BeaconState, att *ethpb.Attestation) ([]uint64, error) {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return attestation.AttestingIndices(att.AggregationBits, committee)
|
||||
}
|
||||
|
||||
// logMessageTimelyFlagsForIndex returns the log message with the basic
|
||||
// performance indicators for the attestation (head, source, target)
|
||||
func logMessageTimelyFlagsForIndex(idx types.ValidatorIndex, data *ethpb.AttestationData) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"ValidatorIndex": idx,
|
||||
"Slot": data.Slot,
|
||||
"Source": fmt.Sprintf("%#x", bytesutil.Trunc(data.Source.Root)),
|
||||
"Target": fmt.Sprintf("%#x", bytesutil.Trunc(data.Target.Root)),
|
||||
"Head": fmt.Sprintf("%#x", bytesutil.Trunc(data.BeaconBlockRoot)),
|
||||
}
|
||||
}
|
||||
|
||||
// processAttestations logs the event that one of our tracked validators'
|
||||
// attestations was included in a block
|
||||
func (s *Service) processAttestations(ctx context.Context, state state.BeaconState, blk block.BeaconBlock) {
|
||||
if blk == nil || blk.Body() == nil {
|
||||
return
|
||||
}
|
||||
for _, attestation := range blk.Body().Attestations() {
|
||||
s.processIncludedAttestation(ctx, state, attestation)
|
||||
}
|
||||
}
|
||||
|
||||
// processIncludedAttestation logs in the event that one of our tracked validators'
|
||||
// appears in the attesting indices and this included attestation was not included
|
||||
// before.
|
||||
func (s *Service) processIncludedAttestation(ctx context.Context, state state.BeaconState, att *ethpb.Attestation) {
|
||||
attestingIndices, err := attestingIndices(ctx, state, att)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get attesting indices")
|
||||
return
|
||||
}
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
for _, idx := range attestingIndices {
|
||||
if s.updatedPerformanceFromTrackedVal(types.ValidatorIndex(idx), att.Data.Slot) {
|
||||
logFields := logMessageTimelyFlagsForIndex(types.ValidatorIndex(idx), att.Data)
|
||||
balance, err := state.BalanceAtIndex(types.ValidatorIndex(idx))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get balance")
|
||||
return
|
||||
}
|
||||
|
||||
aggregatedPerf := s.aggregatedPerformance[types.ValidatorIndex(idx)]
|
||||
aggregatedPerf.totalAttestedCount++
|
||||
aggregatedPerf.totalRequestedCount++
|
||||
|
||||
latestPerf := s.latestPerformance[types.ValidatorIndex(idx)]
|
||||
balanceChg := int64(balance - latestPerf.balance)
|
||||
latestPerf.balanceChange = balanceChg
|
||||
latestPerf.balance = balance
|
||||
latestPerf.attestedSlot = att.Data.Slot
|
||||
latestPerf.inclusionSlot = state.Slot()
|
||||
inclusionSlotGauge.WithLabelValues(fmt.Sprintf("%d", idx)).Set(float64(latestPerf.inclusionSlot))
|
||||
aggregatedPerf.totalDistance += uint64(latestPerf.inclusionSlot - latestPerf.attestedSlot)
|
||||
|
||||
if state.Version() == version.Altair {
|
||||
targetIdx := params.BeaconConfig().TimelyTargetFlagIndex
|
||||
sourceIdx := params.BeaconConfig().TimelySourceFlagIndex
|
||||
headIdx := params.BeaconConfig().TimelyHeadFlagIndex
|
||||
|
||||
var participation []byte
|
||||
if slots.ToEpoch(latestPerf.inclusionSlot) ==
|
||||
slots.ToEpoch(latestPerf.attestedSlot) {
|
||||
participation, err = state.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get current epoch participation")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
participation, err = state.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get previous epoch participation")
|
||||
return
|
||||
}
|
||||
}
|
||||
flags := participation[idx]
|
||||
hasFlag, err := altair.HasValidatorFlag(flags, sourceIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get timely Source flag")
|
||||
return
|
||||
}
|
||||
latestPerf.timelySource = hasFlag
|
||||
hasFlag, err = altair.HasValidatorFlag(flags, headIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get timely Head flag")
|
||||
return
|
||||
}
|
||||
latestPerf.timelyHead = hasFlag
|
||||
hasFlag, err = altair.HasValidatorFlag(flags, targetIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get timely Target flag")
|
||||
return
|
||||
}
|
||||
latestPerf.timelyTarget = hasFlag
|
||||
|
||||
if latestPerf.timelySource {
|
||||
timelySourceCounter.WithLabelValues(fmt.Sprintf("%d", idx)).Inc()
|
||||
aggregatedPerf.totalCorrectSource++
|
||||
}
|
||||
if latestPerf.timelyHead {
|
||||
timelyHeadCounter.WithLabelValues(fmt.Sprintf("%d", idx)).Inc()
|
||||
aggregatedPerf.totalCorrectHead++
|
||||
}
|
||||
if latestPerf.timelyTarget {
|
||||
timelyTargetCounter.WithLabelValues(fmt.Sprintf("%d", idx)).Inc()
|
||||
aggregatedPerf.totalCorrectTarget++
|
||||
}
|
||||
}
|
||||
logFields["CorrectHead"] = latestPerf.timelyHead
|
||||
logFields["CorrectSource"] = latestPerf.timelySource
|
||||
logFields["CorrectTarget"] = latestPerf.timelyTarget
|
||||
logFields["InclusionSlot"] = latestPerf.inclusionSlot
|
||||
logFields["NewBalance"] = balance
|
||||
logFields["BalanceChange"] = balanceChg
|
||||
|
||||
s.latestPerformance[types.ValidatorIndex(idx)] = latestPerf
|
||||
s.aggregatedPerformance[types.ValidatorIndex(idx)] = aggregatedPerf
|
||||
log.WithFields(logFields).Info("Attestation included")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// processUnaggregatedAttestation logs when the beacon node sees an unaggregated attestation from one of our
|
||||
// tracked validators
|
||||
func (s *Service) processUnaggregatedAttestation(ctx context.Context, att *ethpb.Attestation) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
root := bytesutil.ToBytes32(att.Data.BeaconBlockRoot)
|
||||
state := s.config.StateGen.StateByRootIfCachedNoCopy(root)
|
||||
if state == nil {
|
||||
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
|
||||
"Skipping unaggregated attestation due to state not found in cache")
|
||||
return
|
||||
}
|
||||
attestingIndices, err := attestingIndices(ctx, state, att)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get attesting indices")
|
||||
return
|
||||
}
|
||||
for _, idx := range attestingIndices {
|
||||
if s.updatedPerformanceFromTrackedVal(types.ValidatorIndex(idx), att.Data.Slot) {
|
||||
logFields := logMessageTimelyFlagsForIndex(types.ValidatorIndex(idx), att.Data)
|
||||
log.WithFields(logFields).Info("Processed unaggregated attestation")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// processAggregatedAttestation logs when we see an aggregation from one of our tracked validators or an aggregated
|
||||
// attestation from one of our tracked validators
|
||||
func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.AggregateAttestationAndProof) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if s.trackedIndex(att.AggregatorIndex) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"AggregatorIndex": att.AggregatorIndex,
|
||||
"Slot": att.Aggregate.Data.Slot,
|
||||
"BeaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
|
||||
att.Aggregate.Data.BeaconBlockRoot)),
|
||||
"SourceRoot:": fmt.Sprintf("%#x", bytesutil.Trunc(
|
||||
att.Aggregate.Data.Source.Root)),
|
||||
"TargetRoot:": fmt.Sprintf("%#x", bytesutil.Trunc(
|
||||
att.Aggregate.Data.Target.Root)),
|
||||
}).Info("Processed attestation aggregation")
|
||||
aggregatedPerf := s.aggregatedPerformance[att.AggregatorIndex]
|
||||
aggregatedPerf.totalAggregations++
|
||||
s.aggregatedPerformance[att.AggregatorIndex] = aggregatedPerf
|
||||
aggregationCounter.WithLabelValues(fmt.Sprintf("%d", att.AggregatorIndex)).Inc()
|
||||
}
|
||||
|
||||
var root [32]byte
|
||||
copy(root[:], att.Aggregate.Data.BeaconBlockRoot)
|
||||
state := s.config.StateGen.StateByRootIfCachedNoCopy(root)
|
||||
if state == nil {
|
||||
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
|
||||
"Skipping agregated attestation due to state not found in cache")
|
||||
return
|
||||
}
|
||||
attestingIndices, err := attestingIndices(ctx, state, att.Aggregate)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get attesting indices")
|
||||
return
|
||||
}
|
||||
for _, idx := range attestingIndices {
|
||||
if s.updatedPerformanceFromTrackedVal(types.ValidatorIndex(idx), att.Aggregate.Data.Slot) {
|
||||
logFields := logMessageTimelyFlagsForIndex(types.ValidatorIndex(idx), att.Aggregate.Data)
|
||||
log.WithFields(logFields).Info("Processed aggregated attestation")
|
||||
}
|
||||
}
|
||||
}
|
||||
248
beacon-chain/monitor/process_attestation_test.go
Normal file
248
beacon-chain/monitor/process_attestation_test.go
Normal file
@@ -0,0 +1,248 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestGetAttestingIndices(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 256)
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b11, 0b1},
|
||||
}
|
||||
attestingIndices, err := attestingIndices(ctx, beaconState, att)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, attestingIndices, []uint64{0xc, 0x2})
|
||||
|
||||
}
|
||||
|
||||
func TestProcessIncludedAttestationTwoTracked(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
require.NoError(t, state.SetSlot(2))
|
||||
require.NoError(t, state.SetCurrentParticipationBits(bytes.Repeat([]byte{0xff}, 13)))
|
||||
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b11, 0b1},
|
||||
}
|
||||
s.processIncludedAttestation(context.Background(), state, att)
|
||||
wanted1 := "\"Attestation included\" BalanceChange=0 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
|
||||
wanted2 := "\"Attestation included\" BalanceChange=100000000 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
|
||||
require.LogsContain(t, hook, wanted1)
|
||||
require.LogsContain(t, hook, wanted2)
|
||||
}
|
||||
|
||||
func TestProcessUnaggregatedAttestationStateNotCached(t *testing.T) {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
|
||||
s := setupService(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
require.NoError(t, state.SetSlot(2))
|
||||
header := state.LatestBlockHeader()
|
||||
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
require.NoError(t, state.SetCurrentParticipationBits(participation))
|
||||
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: header.GetStateRoot(),
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b11, 0b1},
|
||||
}
|
||||
s.processUnaggregatedAttestation(ctx, att)
|
||||
require.LogsContain(t, hook, "Skipping unaggregated attestation due to state not found in cache")
|
||||
logrus.SetLevel(logrus.InfoLevel)
|
||||
}
|
||||
|
||||
func TestProcessUnaggregatedAttestationStateCached(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
s := setupService(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
require.NoError(t, state.SetCurrentParticipationBits(participation))
|
||||
|
||||
root := [32]byte{}
|
||||
copy(root[:], "hello-world")
|
||||
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: root[:],
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: root[:],
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root[:],
|
||||
},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b11, 0b1},
|
||||
}
|
||||
require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
|
||||
s.processUnaggregatedAttestation(context.Background(), att)
|
||||
wanted1 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
|
||||
wanted2 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
|
||||
require.LogsContain(t, hook, wanted1)
|
||||
require.LogsContain(t, hook, wanted2)
|
||||
}
|
||||
|
||||
func TestProcessAggregatedAttestationStateNotCached(t *testing.T) {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
|
||||
s := setupService(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
require.NoError(t, state.SetSlot(2))
|
||||
header := state.LatestBlockHeader()
|
||||
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
require.NoError(t, state.SetCurrentParticipationBits(participation))
|
||||
|
||||
att := ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 2,
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: header.GetStateRoot(),
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b11, 0b1},
|
||||
},
|
||||
}
|
||||
s.processAggregatedAttestation(ctx, att)
|
||||
require.LogsContain(t, hook, "\"Processed attestation aggregation\" AggregatorIndex=2 BeaconBlockRoot=0x000000000000 Slot=1 SourceRoot:=0x68656c6c6f2d TargetRoot:=0x68656c6c6f2d prefix=monitor")
|
||||
require.LogsContain(t, hook, "Skipping agregated attestation due to state not found in cache")
|
||||
logrus.SetLevel(logrus.InfoLevel)
|
||||
}
|
||||
|
||||
func TestProcessAggregatedAttestationStateCached(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
s := setupService(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
require.NoError(t, state.SetCurrentParticipationBits(participation))
|
||||
|
||||
root := [32]byte{}
|
||||
copy(root[:], "hello-world")
|
||||
|
||||
att := ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 2,
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: root[:],
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: root[:],
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root[:],
|
||||
},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b10, 0b1},
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
|
||||
s.processAggregatedAttestation(ctx, att)
|
||||
require.LogsContain(t, hook, "\"Processed attestation aggregation\" AggregatorIndex=2 BeaconBlockRoot=0x68656c6c6f2d Slot=1 SourceRoot:=0x68656c6c6f2d TargetRoot:=0x68656c6c6f2d prefix=monitor")
|
||||
require.LogsContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor")
|
||||
require.LogsDoNotContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor")
|
||||
}
|
||||
|
||||
func TestProcessAttestations(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
ctx := context.Background()
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
require.NoError(t, state.SetSlot(2))
|
||||
require.NoError(t, state.SetCurrentParticipationBits(bytes.Repeat([]byte{0xff}, 13)))
|
||||
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b11, 0b1},
|
||||
}
|
||||
|
||||
block := ðpb.BeaconBlockAltair{
|
||||
Slot: 2,
|
||||
Body: ðpb.BeaconBlockBodyAltair{
|
||||
Attestations: []*ethpb.Attestation{att},
|
||||
},
|
||||
}
|
||||
|
||||
wrappedBlock, err := wrapper.WrappedAltairBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
s.processAttestations(ctx, state, wrappedBlock)
|
||||
wanted1 := "\"Attestation included\" BalanceChange=0 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
|
||||
wanted2 := "\"Attestation included\" BalanceChange=100000000 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
|
||||
require.LogsContain(t, hook, wanted1)
|
||||
require.LogsContain(t, hook, wanted2)
|
||||
|
||||
}
|
||||
177
beacon-chain/monitor/process_block.go
Normal file
177
beacon-chain/monitor/process_block.go
Normal file
@@ -0,0 +1,177 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Number of epochs between aggregate reports
|
||||
const AggregateReportingPeriod = 5
|
||||
|
||||
// processBlock handles the cases when
|
||||
// 1) A block was proposed by one of our tracked validators
|
||||
// 2) An attestation by one of our tracked validators was included
|
||||
// 3) An Exit by one of our validators was included
|
||||
// 4) A Slashing by one of our tracked validators was included
|
||||
// 5) A Sync Committe Contribution by one of our tracked validators was included
|
||||
func (s *Service) processBlock(ctx context.Context, b block.SignedBeaconBlock) {
|
||||
if b == nil || b.Block() == nil {
|
||||
return
|
||||
}
|
||||
blk := b.Block()
|
||||
|
||||
s.processSlashings(blk)
|
||||
s.processExitsFromBlock(blk)
|
||||
|
||||
root, err := blk.HashTreeRoot()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not compute block's hash tree root")
|
||||
return
|
||||
}
|
||||
state := s.config.StateGen.StateByRootIfCachedNoCopy(root)
|
||||
if state == nil {
|
||||
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
|
||||
"Skipping block collection due to state not found in cache")
|
||||
return
|
||||
}
|
||||
|
||||
currEpoch := slots.ToEpoch(blk.Slot())
|
||||
s.RLock()
|
||||
lastSyncedEpoch := s.lastSyncedEpoch
|
||||
s.RUnlock()
|
||||
|
||||
if currEpoch != lastSyncedEpoch &&
|
||||
slots.SyncCommitteePeriod(currEpoch) == slots.SyncCommitteePeriod(lastSyncedEpoch) {
|
||||
s.updateSyncCommitteeTrackedVals(state)
|
||||
}
|
||||
|
||||
s.processSyncAggregate(state, blk)
|
||||
s.processProposedBlock(state, root, blk)
|
||||
s.processAttestations(ctx, state, blk)
|
||||
|
||||
if blk.Slot()%(AggregateReportingPeriod*params.BeaconConfig().SlotsPerEpoch) == 0 {
|
||||
s.logAggregatedPerformance()
|
||||
}
|
||||
}
|
||||
|
||||
// processProposedBlock logs the event that one of our tracked validators proposed a block that was included
|
||||
func (s *Service) processProposedBlock(state state.BeaconState, root [32]byte, blk block.BeaconBlock) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if s.trackedIndex(blk.ProposerIndex()) {
|
||||
// update metrics
|
||||
proposedSlotsCounter.WithLabelValues(fmt.Sprintf("%d", blk.ProposerIndex())).Inc()
|
||||
|
||||
// update the performance map
|
||||
balance, err := state.BalanceAtIndex(blk.ProposerIndex())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get balance")
|
||||
return
|
||||
}
|
||||
|
||||
latestPerf := s.latestPerformance[blk.ProposerIndex()]
|
||||
balanceChg := int64(balance - latestPerf.balance)
|
||||
latestPerf.balanceChange = balanceChg
|
||||
latestPerf.balance = balance
|
||||
s.latestPerformance[blk.ProposerIndex()] = latestPerf
|
||||
|
||||
aggPerf := s.aggregatedPerformance[blk.ProposerIndex()]
|
||||
aggPerf.totalProposedCount++
|
||||
s.aggregatedPerformance[blk.ProposerIndex()] = aggPerf
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"ProposerIndex": blk.ProposerIndex(),
|
||||
"Slot": blk.Slot(),
|
||||
"Version": blk.Version(),
|
||||
"ParentRoot": fmt.Sprintf("%#x", bytesutil.Trunc(blk.ParentRoot())),
|
||||
"BlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(root[:])),
|
||||
"NewBalance": balance,
|
||||
"BalanceChange": balanceChg,
|
||||
}).Info("Proposed block was included")
|
||||
}
|
||||
}
|
||||
|
||||
// processSlashings logs the event of one of our tracked validators was slashed
|
||||
func (s *Service) processSlashings(blk block.BeaconBlock) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
for _, slashing := range blk.Body().ProposerSlashings() {
|
||||
idx := slashing.Header_1.Header.ProposerIndex
|
||||
if s.trackedIndex(idx) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"ProposerIndex": idx,
|
||||
"Slot:": blk.Slot(),
|
||||
"SlashingSlot": slashing.Header_1.Header.Slot,
|
||||
"Root1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_1.Header.BodyRoot)),
|
||||
"Root2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_2.Header.BodyRoot)),
|
||||
}).Info("Proposer slashing was included")
|
||||
}
|
||||
}
|
||||
|
||||
for _, slashing := range blk.Body().AttesterSlashings() {
|
||||
for _, idx := range blocks.SlashableAttesterIndices(slashing) {
|
||||
if s.trackedIndex(types.ValidatorIndex(idx)) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"AttesterIndex": idx,
|
||||
"Slot:": blk.Slot(),
|
||||
"Slot1": slashing.Attestation_1.Data.Slot,
|
||||
"Root1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_1.Data.BeaconBlockRoot)),
|
||||
"SourceEpoch1": slashing.Attestation_1.Data.Source.Epoch,
|
||||
"TargetEpoch1": slashing.Attestation_1.Data.Target.Epoch,
|
||||
"Slot2": slashing.Attestation_2.Data.Slot,
|
||||
"Root2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_2.Data.BeaconBlockRoot)),
|
||||
"SourceEpoch2": slashing.Attestation_2.Data.Source.Epoch,
|
||||
"TargetEpoch2": slashing.Attestation_2.Data.Target.Epoch,
|
||||
}).Info("Attester slashing was included")
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// logAggregatedPerformance logs the performance statistics collected since the run started
|
||||
func (s *Service) logAggregatedPerformance() {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
for idx, p := range s.aggregatedPerformance {
|
||||
if p.totalAttestedCount == 0 || p.totalRequestedCount == 0 || p.startBalance == 0 {
|
||||
break
|
||||
}
|
||||
l, ok := s.latestPerformance[idx]
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
percentAtt := float64(p.totalAttestedCount) / float64(p.totalRequestedCount)
|
||||
percentBal := float64(l.balance-p.startBalance) / float64(p.startBalance)
|
||||
percentDistance := float64(p.totalDistance) / float64(p.totalAttestedCount)
|
||||
percentCorrectSource := float64(p.totalCorrectSource) / float64(p.totalAttestedCount)
|
||||
percentCorrectHead := float64(p.totalCorrectHead) / float64(p.totalAttestedCount)
|
||||
percentCorrectTarget := float64(p.totalCorrectTarget) / float64(p.totalAttestedCount)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"ValidatorIndex": idx,
|
||||
"StartEpoch": p.startEpoch,
|
||||
"StartBalance": p.startBalance,
|
||||
"TotalRequested": p.totalRequestedCount,
|
||||
"AttestationInclusion": fmt.Sprintf("%.2f%%", percentAtt*100),
|
||||
"BalanceChangePct": fmt.Sprintf("%.2f%%", percentBal*100),
|
||||
"CorrectlyVotedSourcePct": fmt.Sprintf("%.2f%%", percentCorrectSource*100),
|
||||
"CorrectlyVotedTargetPct": fmt.Sprintf("%.2f%%", percentCorrectTarget*100),
|
||||
"CorrectlyVotedHeadPct": fmt.Sprintf("%.2f%%", percentCorrectHead*100),
|
||||
"AverageInclusionDistance": fmt.Sprintf("%.1f", percentDistance),
|
||||
"TotalProposedBlocks": p.totalProposedCount,
|
||||
"TotalAggregations": p.totalAggregations,
|
||||
"TotalSyncContributions": p.totalSyncComitteeContributions,
|
||||
}).Info("Aggregated performance since launch")
|
||||
}
|
||||
}
|
||||
246
beacon-chain/monitor/process_block_test.go
Normal file
246
beacon-chain/monitor/process_block_test.go
Normal file
@@ -0,0 +1,246 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestProcessSlashings(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
block *ethpb.BeaconBlock
|
||||
wantedErr string
|
||||
}{
|
||||
{
|
||||
name: "Proposer slashing a tracked index",
|
||||
block: ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
ProposerSlashings: []*ethpb.ProposerSlashing{
|
||||
{
|
||||
Header_1: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: 2,
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch + 1,
|
||||
},
|
||||
},
|
||||
Header_2: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: 2,
|
||||
Slot: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantedErr: "\"Proposer slashing was included\" ProposerIndex=2",
|
||||
},
|
||||
{
|
||||
name: "Proposer slashing an untracked index",
|
||||
block: ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
ProposerSlashings: []*ethpb.ProposerSlashing{
|
||||
{
|
||||
Header_1: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: 3,
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch + 4,
|
||||
},
|
||||
},
|
||||
Header_2: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: 3,
|
||||
Slot: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantedErr: "",
|
||||
},
|
||||
{
|
||||
name: "Attester slashing a tracked index",
|
||||
block: ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
AttesterSlashings: []*ethpb.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{1, 3, 4},
|
||||
}),
|
||||
Attestation_2: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: []uint64{1, 5, 6},
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantedErr: "\"Attester slashing was included\" AttesterIndex=1",
|
||||
},
|
||||
{
|
||||
name: "Attester slashing untracked index",
|
||||
block: ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
AttesterSlashings: []*ethpb.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{1, 3, 4},
|
||||
}),
|
||||
Attestation_2: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: []uint64{3, 5, 6},
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantedErr: "",
|
||||
}}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := &Service{
|
||||
TrackedValidators: map[types.ValidatorIndex]bool{
|
||||
1: true,
|
||||
2: true,
|
||||
},
|
||||
}
|
||||
s.processSlashings(wrapper.WrappedPhase0BeaconBlock(tt.block))
|
||||
if tt.wantedErr != "" {
|
||||
require.LogsContain(t, hook, tt.wantedErr)
|
||||
} else {
|
||||
require.LogsDoNotContain(t, hook, "slashing")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessProposedBlock(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
block *ethpb.BeaconBlock
|
||||
wantedErr string
|
||||
}{
|
||||
{
|
||||
name: "Block proposed by tracked validator",
|
||||
block: ðpb.BeaconBlock{
|
||||
Slot: 6,
|
||||
ProposerIndex: 12,
|
||||
ParentRoot: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("state-world"), 32),
|
||||
},
|
||||
wantedErr: "\"Proposed block was included\" BalanceChange=100000000 BlockRoot=0x68656c6c6f2d NewBalance=32000000000 ParentRoot=0x68656c6c6f2d ProposerIndex=12 Slot=6 Version=0 prefix=monitor",
|
||||
},
|
||||
{
|
||||
name: "Block proposed by untracked validator",
|
||||
block: ðpb.BeaconBlock{
|
||||
Slot: 6,
|
||||
ProposerIndex: 13,
|
||||
ParentRoot: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("state-world"), 32),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 256)
|
||||
root := [32]byte{}
|
||||
copy(root[:], "hello-world")
|
||||
s.processProposedBlock(beaconState, root, wrapper.WrappedPhase0BeaconBlock(tt.block))
|
||||
if tt.wantedErr != "" {
|
||||
require.LogsContain(t, hook, tt.wantedErr)
|
||||
} else {
|
||||
require.LogsDoNotContain(t, hook, "included")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestProcessBlock_AllEventsTrackedVals(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
|
||||
genesis, keys := util.DeterministicGenesisStateAltair(t, 64)
|
||||
c, err := altair.NextSyncCommittee(ctx, genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, genesis.SetCurrentSyncCommittee(c))
|
||||
|
||||
genConfig := util.DefaultBlockGenConfig()
|
||||
genConfig.NumProposerSlashings = 1
|
||||
b, err := util.GenerateFullBlockAltair(genesis, keys, genConfig, 1)
|
||||
require.NoError(t, err)
|
||||
s := setupService(t)
|
||||
|
||||
pubKeys := make([][]byte, 3)
|
||||
pubKeys[0] = genesis.Validators()[0].PublicKey
|
||||
pubKeys[1] = genesis.Validators()[1].PublicKey
|
||||
pubKeys[2] = genesis.Validators()[2].PublicKey
|
||||
|
||||
currentSyncCommittee := util.ConvertToCommittee([][]byte{
|
||||
pubKeys[0], pubKeys[1], pubKeys[2], pubKeys[1], pubKeys[1],
|
||||
})
|
||||
require.NoError(t, genesis.SetCurrentSyncCommittee(currentSyncCommittee))
|
||||
|
||||
idx := b.Block.Body.ProposerSlashings[0].Header_1.Header.ProposerIndex
|
||||
s.RLock()
|
||||
if !s.trackedIndex(idx) {
|
||||
s.TrackedValidators[idx] = true
|
||||
s.latestPerformance[idx] = ValidatorLatestPerformance{
|
||||
balance: 31900000000,
|
||||
}
|
||||
s.aggregatedPerformance[idx] = ValidatorAggregatedPerformance{}
|
||||
}
|
||||
s.RUnlock()
|
||||
s.updateSyncCommitteeTrackedVals(genesis)
|
||||
|
||||
root, err := b.GetBlock().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.config.StateGen.SaveState(ctx, root, genesis))
|
||||
wanted1 := fmt.Sprintf("\"Proposed block was included\" BalanceChange=100000000 BlockRoot=%#x NewBalance=32000000000 ParentRoot=0xf732eaeb7fae ProposerIndex=15 Slot=1 Version=1 prefix=monitor", bytesutil.Trunc(root[:]))
|
||||
wanted2 := fmt.Sprintf("\"Proposer slashing was included\" ProposerIndex=%d Root1=0x000100000000 Root2=0x000200000000 SlashingSlot=0 Slot:=1 prefix=monitor", idx)
|
||||
wanted3 := "\"Sync committee contribution included\" BalanceChange=0 Contributions=3 ExpectedContrib=3 NewBalance=32000000000 ValidatorIndex=1 prefix=monitor"
|
||||
wanted4 := "\"Sync committee contribution included\" BalanceChange=0 Contributions=1 ExpectedContrib=1 NewBalance=32000000000 ValidatorIndex=2 prefix=monitor"
|
||||
wrapped, err := wrapper.WrappedAltairSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
s.processBlock(ctx, wrapped)
|
||||
require.LogsContain(t, hook, wanted1)
|
||||
require.LogsContain(t, hook, wanted2)
|
||||
require.LogsContain(t, hook, wanted3)
|
||||
require.LogsContain(t, hook, wanted4)
|
||||
}
|
||||
|
||||
func TestLogAggregatedPerformance(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
|
||||
s.logAggregatedPerformance()
|
||||
time.Sleep(3000 * time.Millisecond)
|
||||
wanted := "\"Aggregated performance since launch\" AttestationInclusion=\"80.00%\"" +
|
||||
" AverageInclusionDistance=1.2 BalanceChangePct=\"0.95%\" CorrectlyVotedHeadPct=\"66.67%\" " +
|
||||
"CorrectlyVotedSourcePct=\"91.67%\" CorrectlyVotedTargetPct=\"100.00%\" StartBalance=31700000000 " +
|
||||
"StartEpoch=0 TotalAggregations=0 TotalProposedBlocks=1 TotalRequested=15 TotalSyncContributions=0 " +
|
||||
"ValidatorIndex=1 prefix=monitor"
|
||||
require.LogsContain(t, hook, wanted)
|
||||
}
|
||||
35
beacon-chain/monitor/process_exit.go
Normal file
35
beacon-chain/monitor/process_exit.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// processExitsFromBlock logs the event of one of our tracked validators' exit was
|
||||
// included in a block
|
||||
func (s *Service) processExitsFromBlock(blk block.BeaconBlock) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
for _, exit := range blk.Body().VoluntaryExits() {
|
||||
idx := exit.Exit.ValidatorIndex
|
||||
if s.trackedIndex(idx) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"ValidatorIndex": idx,
|
||||
"Slot": blk.Slot(),
|
||||
}).Info("Voluntary exit was included")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// processExit logs the event of one of our tracked validators' exit was processed
|
||||
func (s *Service) processExit(exit *ethpb.SignedVoluntaryExit) {
|
||||
idx := exit.Exit.ValidatorIndex
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
if s.trackedIndex(idx) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"ValidatorIndex": idx,
|
||||
}).Info("Voluntary exit was processed")
|
||||
}
|
||||
}
|
||||
118
beacon-chain/monitor/process_exit_test.go
Normal file
118
beacon-chain/monitor/process_exit_test.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestProcessExitsFromBlockTrackedIndices(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := &Service{
|
||||
TrackedValidators: map[types.ValidatorIndex]bool{
|
||||
1: true,
|
||||
2: true,
|
||||
},
|
||||
}
|
||||
|
||||
exits := []*ethpb.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: 3,
|
||||
Epoch: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: 2,
|
||||
Epoch: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
block := ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
VoluntaryExits: exits,
|
||||
},
|
||||
}
|
||||
|
||||
s.processExitsFromBlock(wrapper.WrappedPhase0BeaconBlock(block))
|
||||
require.LogsContain(t, hook, "\"Voluntary exit was included\" Slot=0 ValidatorIndex=2")
|
||||
}
|
||||
|
||||
func TestProcessExitsFromBlockUntrackedIndices(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := &Service{
|
||||
TrackedValidators: map[types.ValidatorIndex]bool{
|
||||
1: true,
|
||||
2: true,
|
||||
},
|
||||
}
|
||||
|
||||
exits := []*ethpb.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: 3,
|
||||
Epoch: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: 4,
|
||||
Epoch: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
block := ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
VoluntaryExits: exits,
|
||||
},
|
||||
}
|
||||
|
||||
s.processExitsFromBlock(wrapper.WrappedPhase0BeaconBlock(block))
|
||||
require.LogsDoNotContain(t, hook, "\"Voluntary exit was included\"")
|
||||
}
|
||||
|
||||
func TestProcessExitP2PTrackedIndices(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := &Service{
|
||||
TrackedValidators: map[types.ValidatorIndex]bool{
|
||||
1: true,
|
||||
2: true,
|
||||
},
|
||||
}
|
||||
|
||||
exit := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: 1,
|
||||
Epoch: 1,
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
s.processExit(exit)
|
||||
require.LogsContain(t, hook, "\"Voluntary exit was processed\" ValidatorIndex=1")
|
||||
}
|
||||
|
||||
func TestProcessExitP2PUntrackedIndices(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := &Service{
|
||||
TrackedValidators: map[types.ValidatorIndex]bool{
|
||||
1: true,
|
||||
2: true,
|
||||
},
|
||||
}
|
||||
|
||||
exit := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: 3,
|
||||
Epoch: 1,
|
||||
},
|
||||
}
|
||||
s.processExit(exit)
|
||||
require.LogsDoNotContain(t, hook, "\"Voluntary exit was processed\"")
|
||||
}
|
||||
79
beacon-chain/monitor/process_sync_committee.go
Normal file
79
beacon-chain/monitor/process_sync_committee.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// processSyncCommitteeContribution logs the event that one of our tracked
|
||||
// validators' aggregated sync contribution has been processed.
|
||||
// TODO: We do not log if a sync contribution was included in an aggregate (we
|
||||
// log them when they are included in blocks)
|
||||
func (s *Service) processSyncCommitteeContribution(contribution *ethpb.SignedContributionAndProof) {
|
||||
idx := contribution.Message.AggregatorIndex
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if s.trackedIndex(idx) {
|
||||
aggPerf := s.aggregatedPerformance[idx]
|
||||
aggPerf.totalSyncComitteeAggregations++
|
||||
s.aggregatedPerformance[idx] = aggPerf
|
||||
|
||||
log.WithField("ValidatorIndex", contribution.Message.AggregatorIndex).Info("Sync committee aggregation processed")
|
||||
}
|
||||
}
|
||||
|
||||
// processSyncAggregate logs the event that one of our tracked validators is a sync-committee member and its
|
||||
// contribution was included
|
||||
func (s *Service) processSyncAggregate(state state.BeaconState, blk block.BeaconBlock) {
|
||||
if blk == nil || blk.Body() == nil {
|
||||
return
|
||||
}
|
||||
bits, err := blk.Body().SyncAggregate()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Cannot get SyncAggregate")
|
||||
return
|
||||
}
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
for validatorIdx, committeeIndices := range s.trackedSyncCommitteeIndices {
|
||||
if len(committeeIndices) > 0 {
|
||||
contrib := 0
|
||||
for _, idx := range committeeIndices {
|
||||
if bits.SyncCommitteeBits.BitAt(uint64(idx)) {
|
||||
contrib++
|
||||
}
|
||||
}
|
||||
|
||||
balance, err := state.BalanceAtIndex(validatorIdx)
|
||||
if err != nil {
|
||||
log.Error("Could not get balance")
|
||||
return
|
||||
}
|
||||
|
||||
latestPerf := s.latestPerformance[validatorIdx]
|
||||
balanceChg := int64(balance - latestPerf.balance)
|
||||
latestPerf.balanceChange = balanceChg
|
||||
latestPerf.balance = balance
|
||||
s.latestPerformance[validatorIdx] = latestPerf
|
||||
|
||||
aggPerf := s.aggregatedPerformance[validatorIdx]
|
||||
aggPerf.totalSyncComitteeContributions += uint64(contrib)
|
||||
s.aggregatedPerformance[validatorIdx] = aggPerf
|
||||
|
||||
syncCommitteeContributionCounter.WithLabelValues(
|
||||
fmt.Sprintf("%d", validatorIdx)).Add(float64(contrib))
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"ValidatorIndex": validatorIdx,
|
||||
"ExpectedContrib": len(committeeIndices),
|
||||
"Contributions": contrib,
|
||||
"NewBalance": balance,
|
||||
"BalanceChange": balanceChg,
|
||||
}).Info("Sync committee contribution included")
|
||||
}
|
||||
}
|
||||
}
|
||||
59
beacon-chain/monitor/process_sync_committee_test.go
Normal file
59
beacon-chain/monitor/process_sync_committee_test.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestProcessSyncCommitteeContribution(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
|
||||
contrib := ðpb.SignedContributionAndProof{
|
||||
Message: ðpb.ContributionAndProof{
|
||||
AggregatorIndex: 1,
|
||||
},
|
||||
}
|
||||
|
||||
s.processSyncCommitteeContribution(contrib)
|
||||
require.LogsContain(t, hook, "\"Sync committee aggregation processed\" ValidatorIndex=1")
|
||||
require.LogsDoNotContain(t, hook, "ValidatorIndex=2")
|
||||
}
|
||||
|
||||
func TestProcessSyncAggregate(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
beaconState, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
|
||||
block := ðpb.BeaconBlockAltair{
|
||||
Slot: 2,
|
||||
Body: ðpb.BeaconBlockBodyAltair{
|
||||
SyncAggregate: ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: bitfield.Bitvector512{
|
||||
0x31, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
wrappedBlock, err := wrapper.WrappedAltairBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.processSyncAggregate(beaconState, wrappedBlock)
|
||||
require.LogsContain(t, hook, "\"Sync committee contribution included\" BalanceChange=0 Contributions=1 ExpectedContrib=4 NewBalance=32000000000 ValidatorIndex=1 prefix=monitor")
|
||||
require.LogsContain(t, hook, "\"Sync committee contribution included\" BalanceChange=100000000 Contributions=2 ExpectedContrib=2 NewBalance=32000000000 ValidatorIndex=12 prefix=monitor")
|
||||
require.LogsDoNotContain(t, hook, "ValidatorIndex=2")
|
||||
}
|
||||
311
beacon-chain/monitor/service.go
Normal file
311
beacon-chain/monitor/service.go
Normal file
@@ -0,0 +1,311 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
// Error when event feed data is not statefeed.SyncedData.
|
||||
errNotSyncedData = errors.New("event feed data is not of type *statefeed.SyncedData")
|
||||
|
||||
// Error when the context is closed while waiting for sync.
|
||||
errContextClosedWhileWaiting = errors.New("context closed while waiting for beacon to sync to latest Head")
|
||||
)
|
||||
|
||||
// ValidatorLatestPerformance keeps track of the latest participation of the validator
|
||||
type ValidatorLatestPerformance struct {
|
||||
attestedSlot types.Slot
|
||||
inclusionSlot types.Slot
|
||||
timelySource bool
|
||||
timelyTarget bool
|
||||
timelyHead bool
|
||||
balance uint64
|
||||
balanceChange int64
|
||||
}
|
||||
|
||||
// ValidatorAggregatedPerformance keeps track of the accumulated performance of
|
||||
// the validator since launch
|
||||
type ValidatorAggregatedPerformance struct {
|
||||
startEpoch types.Epoch
|
||||
startBalance uint64
|
||||
totalAttestedCount uint64
|
||||
totalRequestedCount uint64
|
||||
totalDistance uint64
|
||||
totalCorrectSource uint64
|
||||
totalCorrectTarget uint64
|
||||
totalCorrectHead uint64
|
||||
totalProposedCount uint64
|
||||
totalAggregations uint64
|
||||
totalSyncComitteeContributions uint64
|
||||
totalSyncComitteeAggregations uint64
|
||||
}
|
||||
|
||||
// ValidatorMonitorConfig contains the list of validator indices that the
|
||||
// monitor service tracks, as well as the event feed notifier that the
|
||||
// monitor needs to subscribe.
|
||||
type ValidatorMonitorConfig struct {
|
||||
StateNotifier statefeed.Notifier
|
||||
AttestationNotifier operation.Notifier
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
StateGen stategen.StateManager
|
||||
}
|
||||
|
||||
// Service is the main structure that tracks validators and reports logs and
|
||||
// metrics of their performances throughout their lifetime.
|
||||
type Service struct {
|
||||
config *ValidatorMonitorConfig
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
isLogging bool
|
||||
|
||||
// Locks access to TrackedValidators, latestPerformance, aggregatedPerformance,
|
||||
// trackedSyncedCommitteeIndices and lastSyncedEpoch
|
||||
sync.RWMutex
|
||||
|
||||
TrackedValidators map[types.ValidatorIndex]bool
|
||||
latestPerformance map[types.ValidatorIndex]ValidatorLatestPerformance
|
||||
aggregatedPerformance map[types.ValidatorIndex]ValidatorAggregatedPerformance
|
||||
trackedSyncCommitteeIndices map[types.ValidatorIndex][]types.CommitteeIndex
|
||||
lastSyncedEpoch types.Epoch
|
||||
}
|
||||
|
||||
// NewService sets up a new validator monitor instance when given a list of validator indices to track.
|
||||
func NewService(ctx context.Context, config *ValidatorMonitorConfig, tracked []types.ValidatorIndex) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
r := &Service{
|
||||
config: config,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
TrackedValidators: make(map[types.ValidatorIndex]bool, len(tracked)),
|
||||
latestPerformance: make(map[types.ValidatorIndex]ValidatorLatestPerformance),
|
||||
aggregatedPerformance: make(map[types.ValidatorIndex]ValidatorAggregatedPerformance),
|
||||
trackedSyncCommitteeIndices: make(map[types.ValidatorIndex][]types.CommitteeIndex),
|
||||
}
|
||||
for _, idx := range tracked {
|
||||
r.TrackedValidators[idx] = true
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Start sets up the TrackedValidators map and then calls to wait until the beacon is synced.
|
||||
func (s *Service) Start() {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
tracked := make([]types.ValidatorIndex, 0, len(s.TrackedValidators))
|
||||
for idx := range s.TrackedValidators {
|
||||
tracked = append(tracked, idx)
|
||||
}
|
||||
sort.Slice(tracked, func(i, j int) bool { return tracked[i] < tracked[j] })
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"ValidatorIndices": tracked,
|
||||
}).Info("Starting service")
|
||||
|
||||
s.isLogging = false
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
|
||||
go s.run(stateChannel, stateSub)
|
||||
}
|
||||
|
||||
// run waits until the beacon is synced and starts the monitoring system.
|
||||
func (s *Service) run(stateChannel chan *feed.Event, stateSub event.Subscription) {
|
||||
if stateChannel == nil {
|
||||
log.Error("State state is nil")
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.waitForSync(stateChannel, stateSub); err != nil {
|
||||
log.WithError(err)
|
||||
return
|
||||
}
|
||||
state, err := s.config.HeadFetcher.HeadState(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get head state")
|
||||
return
|
||||
}
|
||||
if state == nil {
|
||||
log.Error("Head state is nil")
|
||||
return
|
||||
}
|
||||
|
||||
epoch := slots.ToEpoch(state.Slot())
|
||||
log.WithField("Epoch", epoch).Info("Synced to head epoch, starting reporting performance")
|
||||
|
||||
s.Lock()
|
||||
s.initializePerformanceStructures(state, epoch)
|
||||
s.Unlock()
|
||||
|
||||
s.updateSyncCommitteeTrackedVals(state)
|
||||
|
||||
s.Lock()
|
||||
s.isLogging = true
|
||||
s.Unlock()
|
||||
|
||||
s.monitorRoutine(stateChannel, stateSub)
|
||||
}
|
||||
|
||||
// initializePerformanceStructures initializes the validatorLatestPerformance
|
||||
// and validatorAggregatedPerformance for each tracked validator.
|
||||
func (s *Service) initializePerformanceStructures(state state.BeaconState, epoch types.Epoch) {
|
||||
for idx := range s.TrackedValidators {
|
||||
balance, err := state.BalanceAtIndex(idx)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("ValidatorIndex", idx).Error(
|
||||
"Could not fetch starting balance, skipping aggregated logs.")
|
||||
balance = 0
|
||||
}
|
||||
s.aggregatedPerformance[idx] = ValidatorAggregatedPerformance{
|
||||
startEpoch: epoch,
|
||||
startBalance: balance,
|
||||
}
|
||||
s.latestPerformance[idx] = ValidatorLatestPerformance{
|
||||
balance: balance,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Status retrieves the status of the service.
|
||||
func (s *Service) Status() error {
|
||||
if s.isLogging {
|
||||
return nil
|
||||
}
|
||||
return errors.New("not running")
|
||||
}
|
||||
|
||||
// Stop stops the service.
|
||||
func (s *Service) Stop() error {
|
||||
defer s.cancel()
|
||||
s.isLogging = false
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForSync waits until the beacon node is synced to the latest head.
|
||||
func (s *Service) waitForSync(stateChannel chan *feed.Event, stateSub event.Subscription) error {
|
||||
for {
|
||||
select {
|
||||
case event := <-stateChannel:
|
||||
if event.Type == statefeed.Synced {
|
||||
_, ok := event.Data.(*statefeed.SyncedData)
|
||||
if !ok {
|
||||
return errNotSyncedData
|
||||
}
|
||||
return nil
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return errContextClosedWhileWaiting
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("Could not subscribe to state notifier")
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// monitorRoutine is the main dispatcher, it registers event channels for the
|
||||
// state feed and the operation feed. It then calls the appropriate function
|
||||
// when we get messages after syncing a block or processing attestations/sync
|
||||
// committee contributions.
|
||||
func (s *Service) monitorRoutine(stateChannel chan *feed.Event, stateSub event.Subscription) {
|
||||
defer stateSub.Unsubscribe()
|
||||
|
||||
opChannel := make(chan *feed.Event, 1)
|
||||
opSub := s.config.AttestationNotifier.OperationFeed().Subscribe(opChannel)
|
||||
defer opSub.Unsubscribe()
|
||||
|
||||
for {
|
||||
select {
|
||||
case event := <-stateChannel:
|
||||
if event.Type == statefeed.BlockProcessed {
|
||||
data, ok := event.Data.(*statefeed.BlockProcessedData)
|
||||
if !ok {
|
||||
log.Error("Event feed data is not of type *statefeed.BlockProcessedData")
|
||||
} else if data.Verified {
|
||||
// We only process blocks that have been verified
|
||||
s.processBlock(s.ctx, data.SignedBlock)
|
||||
}
|
||||
}
|
||||
case event := <-opChannel:
|
||||
switch event.Type {
|
||||
case operation.UnaggregatedAttReceived:
|
||||
data, ok := event.Data.(*operation.UnAggregatedAttReceivedData)
|
||||
if !ok {
|
||||
log.Error("Event feed data is not of type *operation.UnAggregatedAttReceivedData")
|
||||
} else {
|
||||
s.processUnaggregatedAttestation(s.ctx, data.Attestation)
|
||||
}
|
||||
case operation.AggregatedAttReceived:
|
||||
data, ok := event.Data.(*operation.AggregatedAttReceivedData)
|
||||
if !ok {
|
||||
log.Error("Event feed data is not of type *operation.AggregatedAttReceivedData")
|
||||
} else {
|
||||
s.processAggregatedAttestation(s.ctx, data.Attestation)
|
||||
}
|
||||
case operation.ExitReceived:
|
||||
data, ok := event.Data.(*operation.ExitReceivedData)
|
||||
if !ok {
|
||||
log.Error("Event feed data is not of type *operation.ExitReceivedData")
|
||||
} else {
|
||||
s.processExit(data.Exit)
|
||||
}
|
||||
case operation.SyncCommitteeContributionReceived:
|
||||
data, ok := event.Data.(*operation.SyncCommitteeContributionReceivedData)
|
||||
if !ok {
|
||||
log.Error("Event feed data is not of type *operation.SyncCommitteeContributionReceivedData")
|
||||
} else {
|
||||
s.processSyncCommitteeContribution(data.Contribution)
|
||||
}
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("Could not subscribe to state notifier")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TrackedIndex returns if the given validator index corresponds to one of the
|
||||
// validators we follow.
|
||||
// It assumes the caller holds the service Lock
|
||||
func (s *Service) trackedIndex(idx types.ValidatorIndex) bool {
|
||||
_, ok := s.TrackedValidators[idx]
|
||||
return ok
|
||||
}
|
||||
|
||||
// updateSyncCommitteeTrackedVals updates the sync committee assignments of our
|
||||
// tracked validators. It gets called when we sync a block after the Sync Period changes.
|
||||
func (s *Service) updateSyncCommitteeTrackedVals(state state.BeaconState) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
for idx := range s.TrackedValidators {
|
||||
syncIdx, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, idx)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("ValidatorIndex", idx).Error(
|
||||
"Sync committee assignments will not be reported")
|
||||
delete(s.trackedSyncCommitteeIndices, idx)
|
||||
} else if len(syncIdx) == 0 {
|
||||
delete(s.trackedSyncCommitteeIndices, idx)
|
||||
} else {
|
||||
s.trackedSyncCommitteeIndices[idx] = syncIdx
|
||||
}
|
||||
}
|
||||
s.lastSyncedEpoch = slots.ToEpoch(state.Slot())
|
||||
}
|
||||
312
beacon-chain/monitor/service_test.go
Normal file
312
beacon-chain/monitor/service_test.go
Normal file
@@ -0,0 +1,312 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func setupService(t *testing.T) *Service {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
|
||||
pubKeys := make([][]byte, 3)
|
||||
pubKeys[0] = state.Validators()[0].PublicKey
|
||||
pubKeys[1] = state.Validators()[1].PublicKey
|
||||
pubKeys[2] = state.Validators()[2].PublicKey
|
||||
|
||||
currentSyncCommittee := util.ConvertToCommittee([][]byte{
|
||||
pubKeys[0], pubKeys[1], pubKeys[2], pubKeys[1], pubKeys[1],
|
||||
})
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(currentSyncCommittee))
|
||||
|
||||
chainService := &mock.ChainService{
|
||||
Genesis: time.Now(),
|
||||
DB: beaconDB,
|
||||
State: state,
|
||||
Root: []byte("hello-world"),
|
||||
ValidatorsRoot: [32]byte{},
|
||||
}
|
||||
|
||||
trackedVals := map[types.ValidatorIndex]bool{
|
||||
1: true,
|
||||
2: true,
|
||||
12: true,
|
||||
15: true,
|
||||
}
|
||||
latestPerformance := map[types.ValidatorIndex]ValidatorLatestPerformance{
|
||||
1: {
|
||||
balance: 32000000000,
|
||||
},
|
||||
2: {
|
||||
balance: 32000000000,
|
||||
},
|
||||
12: {
|
||||
balance: 31900000000,
|
||||
},
|
||||
15: {
|
||||
balance: 31900000000,
|
||||
},
|
||||
}
|
||||
aggregatedPerformance := map[types.ValidatorIndex]ValidatorAggregatedPerformance{
|
||||
1: {
|
||||
startEpoch: 0,
|
||||
startBalance: 31700000000,
|
||||
totalAttestedCount: 12,
|
||||
totalRequestedCount: 15,
|
||||
totalDistance: 14,
|
||||
totalCorrectHead: 8,
|
||||
totalCorrectSource: 11,
|
||||
totalCorrectTarget: 12,
|
||||
totalProposedCount: 1,
|
||||
totalSyncComitteeContributions: 0,
|
||||
totalSyncComitteeAggregations: 0,
|
||||
},
|
||||
2: {},
|
||||
12: {},
|
||||
15: {},
|
||||
}
|
||||
trackedSyncCommitteeIndices := map[types.ValidatorIndex][]types.CommitteeIndex{
|
||||
1: {0, 1, 2, 3},
|
||||
12: {4, 5},
|
||||
}
|
||||
return &Service{
|
||||
config: &ValidatorMonitorConfig{
|
||||
StateGen: stategen.New(beaconDB),
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
HeadFetcher: chainService,
|
||||
AttestationNotifier: chainService.OperationNotifier(),
|
||||
},
|
||||
|
||||
ctx: context.Background(),
|
||||
TrackedValidators: trackedVals,
|
||||
latestPerformance: latestPerformance,
|
||||
aggregatedPerformance: aggregatedPerformance,
|
||||
trackedSyncCommitteeIndices: trackedSyncCommitteeIndices,
|
||||
lastSyncedEpoch: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func TestTrackedIndex(t *testing.T) {
|
||||
s := &Service{
|
||||
TrackedValidators: map[types.ValidatorIndex]bool{
|
||||
1: true,
|
||||
2: true,
|
||||
},
|
||||
}
|
||||
require.Equal(t, s.trackedIndex(types.ValidatorIndex(1)), true)
|
||||
require.Equal(t, s.trackedIndex(types.ValidatorIndex(3)), false)
|
||||
}
|
||||
|
||||
func TestUpdateSyncCommitteeTrackedVals(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 1024)
|
||||
|
||||
s.updateSyncCommitteeTrackedVals(state)
|
||||
require.LogsDoNotContain(t, hook, "Sync committee assignments will not be reported")
|
||||
newTrackedSyncIndices := map[types.ValidatorIndex][]types.CommitteeIndex{
|
||||
1: {1, 3, 4},
|
||||
2: {2},
|
||||
}
|
||||
require.DeepEqual(t, s.trackedSyncCommitteeIndices, newTrackedSyncIndices)
|
||||
}
|
||||
|
||||
func TestNewService(t *testing.T) {
|
||||
config := &ValidatorMonitorConfig{}
|
||||
tracked := []types.ValidatorIndex{}
|
||||
ctx := context.Background()
|
||||
_, err := NewService(ctx, config, tracked)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestStart(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
s.Start()
|
||||
|
||||
go func() {
|
||||
select {
|
||||
case stateEvent := <-stateChannel:
|
||||
if stateEvent.Type == statefeed.Synced {
|
||||
_, ok := stateEvent.Data.(*statefeed.SyncedData)
|
||||
require.Equal(t, true, ok, "Event feed data is not type *statefeed.SyncedData")
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
for sent := 0; sent == 0; {
|
||||
sent = s.config.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Synced,
|
||||
Data: &statefeed.SyncedData{
|
||||
StartTime: time.Now(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// wait for Logrus
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
require.LogsContain(t, hook, "Synced to head epoch, starting reporting performance")
|
||||
require.LogsContain(t, hook, "\"Starting service\" ValidatorIndices=\"[1 2 12 15]\"")
|
||||
require.Equal(t, s.isLogging, true, "monitor is not running")
|
||||
}
|
||||
|
||||
func TestInitializePerformanceStructures(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
s := setupService(t)
|
||||
state, err := s.config.HeadFetcher.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
epoch := slots.ToEpoch(state.Slot())
|
||||
s.initializePerformanceStructures(state, epoch)
|
||||
require.LogsDoNotContain(t, hook, "Could not fetch starting balance")
|
||||
latestPerformance := map[types.ValidatorIndex]ValidatorLatestPerformance{
|
||||
1: {
|
||||
balance: 32000000000,
|
||||
},
|
||||
2: {
|
||||
balance: 32000000000,
|
||||
},
|
||||
12: {
|
||||
balance: 32000000000,
|
||||
},
|
||||
15: {
|
||||
balance: 32000000000,
|
||||
},
|
||||
}
|
||||
aggregatedPerformance := map[types.ValidatorIndex]ValidatorAggregatedPerformance{
|
||||
1: {
|
||||
startBalance: 32000000000,
|
||||
},
|
||||
2: {
|
||||
startBalance: 32000000000,
|
||||
},
|
||||
12: {
|
||||
startBalance: 32000000000,
|
||||
},
|
||||
15: {
|
||||
startBalance: 32000000000,
|
||||
},
|
||||
}
|
||||
|
||||
require.DeepEqual(t, s.latestPerformance, latestPerformance)
|
||||
require.DeepEqual(t, s.aggregatedPerformance, aggregatedPerformance)
|
||||
}
|
||||
|
||||
func TestMonitorRoutine(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
s.monitorRoutine(stateChannel, stateSub)
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
genesis, keys := util.DeterministicGenesisStateAltair(t, 64)
|
||||
c, err := altair.NextSyncCommittee(ctx, genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, genesis.SetCurrentSyncCommittee(c))
|
||||
|
||||
genConfig := util.DefaultBlockGenConfig()
|
||||
block, err := util.GenerateFullBlockAltair(genesis, keys, genConfig, 1)
|
||||
require.NoError(t, err)
|
||||
root, err := block.GetBlock().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.config.StateGen.SaveState(ctx, root, genesis))
|
||||
|
||||
wrapped, err := wrapper.WrappedAltairSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
|
||||
stateChannel <- &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: 1,
|
||||
Verified: true,
|
||||
SignedBlock: wrapped,
|
||||
},
|
||||
}
|
||||
|
||||
// Wait for Logrus
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
wanted1 := fmt.Sprintf("\"Proposed block was included\" BalanceChange=100000000 BlockRoot=%#x NewBalance=32000000000 ParentRoot=0xf732eaeb7fae ProposerIndex=15 Slot=1 Version=1 prefix=monitor", bytesutil.Trunc(root[:]))
|
||||
require.LogsContain(t, hook, wanted1)
|
||||
|
||||
}
|
||||
|
||||
func TestWaitForSync(t *testing.T) {
|
||||
s := setupService(t)
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
err := s.waitForSync(stateChannel, stateSub)
|
||||
require.NoError(t, err)
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
stateChannel <- &feed.Event{
|
||||
Type: statefeed.Synced,
|
||||
Data: &statefeed.SyncedData{
|
||||
StartTime: time.Now(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestRun(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
s.run(stateChannel, stateSub)
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
stateChannel <- &feed.Event{
|
||||
Type: statefeed.Synced,
|
||||
Data: &statefeed.SyncedData{
|
||||
StartTime: time.Now(),
|
||||
},
|
||||
}
|
||||
//wait for Logrus
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
require.LogsContain(t, hook, "Synced to head epoch, starting reporting performance")
|
||||
}
|
||||
@@ -26,6 +26,7 @@ go_library(
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/protoarray:go_default_library",
|
||||
"//beacon-chain/gateway:go_default_library",
|
||||
"//beacon-chain/monitor:go_default_library",
|
||||
"//beacon-chain/node/registration:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
@@ -36,6 +37,7 @@ go_library(
|
||||
"//beacon-chain/rpc:go_default_library",
|
||||
"//beacon-chain/rpc/apimiddleware:go_default_library",
|
||||
"//beacon-chain/slasher:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//beacon-chain/sync/initial-sync:go_default_library",
|
||||
@@ -44,6 +46,7 @@ go_library(
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//monitoring/backup:go_default_library",
|
||||
"//monitoring/prometheus:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
@@ -75,6 +78,7 @@ go_test(
|
||||
"//config/params:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/cmd"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
@@ -95,3 +96,26 @@ func configureInteropConfig(cliCtx *cli.Context) {
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
}
|
||||
}
|
||||
|
||||
func configureExecutionSetting(cliCtx *cli.Context) {
|
||||
if cliCtx.IsSet(flags.TerminalTotalDifficultyOverride.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c.TerminalTotalDifficulty = cliCtx.Uint64(flags.TerminalTotalDifficultyOverride.Name)
|
||||
params.OverrideBeaconConfig(c)
|
||||
}
|
||||
if cliCtx.IsSet(flags.TerminalBlockHashOverride.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c.TerminalBlockHash = common.HexToHash(cliCtx.String(flags.TerminalBlockHashOverride.Name))
|
||||
params.OverrideBeaconConfig(c)
|
||||
}
|
||||
if cliCtx.IsSet(flags.TerminalBlockHashActivationEpochOverride.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c.TerminalBlockHashActivationEpoch = types.Epoch(cliCtx.Uint64(flags.TerminalBlockHashActivationEpochOverride.Name))
|
||||
params.OverrideBeaconConfig(c)
|
||||
}
|
||||
if cliCtx.IsSet(flags.FeeRecipient.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c.FeeRecipient = common.HexToAddress(cliCtx.String(flags.FeeRecipient.Name))
|
||||
params.OverrideBeaconConfig(c)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/cmd"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
@@ -70,6 +71,29 @@ func TestConfigureProofOfWork(t *testing.T) {
|
||||
assert.Equal(t, "deposit-contract", params.BeaconConfig().DepositContractAddress)
|
||||
}
|
||||
|
||||
func TestConfigureExecutionSetting(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.Uint64(flags.TerminalTotalDifficultyOverride.Name, 0, "")
|
||||
set.String(flags.TerminalBlockHashOverride.Name, "", "")
|
||||
set.Uint64(flags.TerminalBlockHashActivationEpochOverride.Name, 0, "")
|
||||
set.String(flags.FeeRecipient.Name, "", "")
|
||||
require.NoError(t, set.Set(flags.TerminalTotalDifficultyOverride.Name, strconv.Itoa(100)))
|
||||
require.NoError(t, set.Set(flags.TerminalBlockHashOverride.Name, "0xA"))
|
||||
require.NoError(t, set.Set(flags.TerminalBlockHashActivationEpochOverride.Name, strconv.Itoa(200)))
|
||||
require.NoError(t, set.Set(flags.FeeRecipient.Name, "0xB"))
|
||||
cliCtx := cli.NewContext(&app, set, nil)
|
||||
|
||||
configureExecutionSetting(cliCtx)
|
||||
|
||||
assert.Equal(t, uint64(100), params.BeaconConfig().TerminalTotalDifficulty)
|
||||
assert.Equal(t, common.HexToHash("0xA"), params.BeaconConfig().TerminalBlockHash)
|
||||
assert.Equal(t, types.Epoch(200), params.BeaconConfig().TerminalBlockHashActivationEpoch)
|
||||
assert.Equal(t, common.HexToAddress("0xB"), params.BeaconConfig().FeeRecipient)
|
||||
}
|
||||
|
||||
func TestConfigureNetwork(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
apigateway "github.com/prysmaticlabs/prysm/api/gateway"
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
@@ -28,6 +29,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/gateway"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/monitor"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/node/registration"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
|
||||
@@ -38,6 +40,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/rpc"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/rpc/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/slasher"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
regularsync "github.com/prysmaticlabs/prysm/beacon-chain/sync"
|
||||
initialsync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync"
|
||||
@@ -46,6 +49,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/container/slice"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/monitoring/backup"
|
||||
"github.com/prysmaticlabs/prysm/monitoring/prometheus"
|
||||
"github.com/prysmaticlabs/prysm/runtime"
|
||||
@@ -61,6 +65,14 @@ const testSkipPowFlag = "test-skip-pow"
|
||||
// 128MB max message size when enabling debug endpoints.
|
||||
const debugGrpcMaxMsgSize = 1 << 27
|
||||
|
||||
// Used as a struct to keep cli flag options for configuring services
|
||||
// for the beacon node. We keep this as a separate struct to not pollute the actual BeaconNode
|
||||
// struct, as it is merely used to pass down configuration options into the appropriate services.
|
||||
type serviceFlagOpts struct {
|
||||
blockchainFlagOpts []blockchain.Option
|
||||
powchainFlagOpts []powchain.Option
|
||||
}
|
||||
|
||||
// BeaconNode defines a struct that handles the services running a random beacon chain
|
||||
// full PoS node. It handles the lifecycle of the entire system and registers
|
||||
// services to a service registry.
|
||||
@@ -86,7 +98,8 @@ type BeaconNode struct {
|
||||
collector *bcnodeCollector
|
||||
slasherBlockHeadersFeed *event.Feed
|
||||
slasherAttestationsFeed *event.Feed
|
||||
blockchainFlagOpts []blockchain.Option
|
||||
finalizedStateAtStartUp state.BeaconState
|
||||
serviceFlagOpts *serviceFlagOpts
|
||||
}
|
||||
|
||||
// New creates a new node instance, sets up configuration options, and registers
|
||||
@@ -127,6 +140,7 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
syncCommitteePool: synccommittee.NewPool(),
|
||||
slasherBlockHeadersFeed: new(event.Feed),
|
||||
slasherAttestationsFeed: new(event.Feed),
|
||||
serviceFlagOpts: &serviceFlagOpts{},
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
@@ -135,7 +149,7 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
}
|
||||
}
|
||||
|
||||
depositAddress, err := registration.DepositContractAddress()
|
||||
depositAddress, err := powchain.DepositContractAddress()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -147,7 +161,9 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
beacon.startStateGen()
|
||||
if err := beacon.startStateGen(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := beacon.registerP2P(cliCtx); err != nil {
|
||||
return nil, err
|
||||
@@ -191,6 +207,10 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := beacon.registerValidatorMonitorService(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !cliCtx.Bool(cmd.DisableMonitoringFlag.Name) {
|
||||
if err := beacon.registerPrometheusService(cliCtx); err != nil {
|
||||
return nil, err
|
||||
@@ -420,8 +440,34 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) startStateGen() {
|
||||
func (b *BeaconNode) startStateGen() error {
|
||||
b.stateGen = stategen.New(b.db)
|
||||
|
||||
cp, err := b.db.FinalizedCheckpoint(b.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r := bytesutil.ToBytes32(cp.Root)
|
||||
// Consider edge case where finalized root are zeros instead of genesis root hash.
|
||||
if r == params.BeaconConfig().ZeroHash {
|
||||
genesisBlock, err := b.db.GenesisBlock(b.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if genesisBlock != nil && !genesisBlock.IsNil() {
|
||||
r, err = genesisBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
b.finalizedStateAtStartUp, err = b.stateGen.StateByRoot(b.ctx, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
@@ -443,7 +489,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name),
|
||||
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
|
||||
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
|
||||
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),
|
||||
MaxPeers: cliCtx.Uint64(cmd.P2PMaxPeers.Name),
|
||||
AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name),
|
||||
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
|
||||
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
|
||||
@@ -488,7 +534,7 @@ func (b *BeaconNode) registerBlockchainService() error {
|
||||
|
||||
// skipcq: CRT-D0001
|
||||
opts := append(
|
||||
b.blockchainFlagOpts,
|
||||
b.serviceFlagOpts.blockchainFlagOpts,
|
||||
blockchain.WithDatabase(b.db),
|
||||
blockchain.WithDepositCache(b.depositCache),
|
||||
blockchain.WithChainStartFetcher(web3Service),
|
||||
@@ -501,6 +547,7 @@ func (b *BeaconNode) registerBlockchainService() error {
|
||||
blockchain.WithAttestationService(attService),
|
||||
blockchain.WithStateGen(b.stateGen),
|
||||
blockchain.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
|
||||
blockchain.WithFinalizedStateAtStartUp(b.finalizedStateAtStartUp),
|
||||
)
|
||||
blockchainService, err := blockchain.NewService(b.ctx, opts...)
|
||||
if err != nil {
|
||||
@@ -513,29 +560,27 @@ func (b *BeaconNode) registerPOWChainService() error {
|
||||
if b.cliCtx.Bool(testSkipPowFlag) {
|
||||
return b.services.RegisterService(&powchain.Service{})
|
||||
}
|
||||
|
||||
depAddress, endpoints, err := registration.PowchainPreregistration(b.cliCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bs, err := powchain.NewPowchainCollector(b.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg := &powchain.Web3ServiceConfig{
|
||||
HttpEndpoints: endpoints,
|
||||
DepositContract: common.HexToAddress(depAddress),
|
||||
BeaconDB: b.db,
|
||||
DepositCache: b.depositCache,
|
||||
StateNotifier: b,
|
||||
StateGen: b.stateGen,
|
||||
Eth1HeaderReqLimit: b.cliCtx.Uint64(flags.Eth1HeaderReqLimit.Name),
|
||||
BeaconNodeStatsUpdater: bs,
|
||||
depositContractAddr, err := powchain.DepositContractAddress()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
web3Service, err := powchain.NewService(b.ctx, cfg)
|
||||
// skipcq: CRT-D0001
|
||||
opts := append(
|
||||
b.serviceFlagOpts.powchainFlagOpts,
|
||||
powchain.WithDepositContractAddress(common.HexToAddress(depositContractAddr)),
|
||||
powchain.WithDatabase(b.db),
|
||||
powchain.WithDepositCache(b.depositCache),
|
||||
powchain.WithStateNotifier(b),
|
||||
powchain.WithStateGen(b.stateGen),
|
||||
powchain.WithBeaconNodeStatsUpdater(bs),
|
||||
powchain.WithFinalizedStateAtStartup(b.finalizedStateAtStartUp),
|
||||
)
|
||||
web3Service, err := powchain.NewService(b.ctx, opts...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not register proof-of-work chain web3Service")
|
||||
}
|
||||
@@ -559,24 +604,24 @@ func (b *BeaconNode) registerSyncService() error {
|
||||
return err
|
||||
}
|
||||
|
||||
rs := regularsync.NewService(b.ctx, ®ularsync.Config{
|
||||
DB: b.db,
|
||||
P2P: b.fetchP2P(),
|
||||
Chain: chainService,
|
||||
InitialSync: initSync,
|
||||
StateNotifier: b,
|
||||
BlockNotifier: b,
|
||||
AttestationNotifier: b,
|
||||
OperationNotifier: b,
|
||||
AttPool: b.attestationPool,
|
||||
ExitPool: b.exitPool,
|
||||
SlashingPool: b.slashingsPool,
|
||||
SyncCommsPool: b.syncCommitteePool,
|
||||
StateGen: b.stateGen,
|
||||
SlasherAttestationsFeed: b.slasherAttestationsFeed,
|
||||
SlasherBlockHeadersFeed: b.slasherBlockHeadersFeed,
|
||||
})
|
||||
|
||||
rs := regularsync.NewService(
|
||||
b.ctx,
|
||||
regularsync.WithDatabase(b.db),
|
||||
regularsync.WithP2P(b.fetchP2P()),
|
||||
regularsync.WithChainService(chainService),
|
||||
regularsync.WithInitialSync(initSync),
|
||||
regularsync.WithStateNotifier(b),
|
||||
regularsync.WithBlockNotifier(b),
|
||||
regularsync.WithAttestationNotifier(b),
|
||||
regularsync.WithOperationNotifier(b),
|
||||
regularsync.WithAttestationPool(b.attestationPool),
|
||||
regularsync.WithExitPool(b.exitPool),
|
||||
regularsync.WithSlashingPool(b.slashingsPool),
|
||||
regularsync.WithSyncCommsPool(b.syncCommitteePool),
|
||||
regularsync.WithStateGen(b.stateGen),
|
||||
regularsync.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
|
||||
regularsync.WithSlasherBlockHeadersFeed(b.slasherBlockHeadersFeed),
|
||||
)
|
||||
return b.services.RegisterService(rs)
|
||||
}
|
||||
|
||||
@@ -815,7 +860,45 @@ func (b *BeaconNode) registerDeterminsticGenesisService() error {
|
||||
GenesisPath: genesisStatePath,
|
||||
})
|
||||
|
||||
// Register genesis state as start-up state when interop mode.
|
||||
// The start-up state gets reused across services.
|
||||
st, err := b.db.GenesisState(b.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.finalizedStateAtStartUp = st
|
||||
|
||||
return b.services.RegisterService(svc)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerValidatorMonitorService() error {
|
||||
if cmd.ValidatorMonitorIndicesFlag.Value == nil {
|
||||
return nil
|
||||
}
|
||||
cliSlice := cmd.ValidatorMonitorIndicesFlag.Value.Value()
|
||||
if cliSlice == nil {
|
||||
return nil
|
||||
}
|
||||
tracked := make([]types.ValidatorIndex, len(cliSlice))
|
||||
for i := range tracked {
|
||||
tracked[i] = types.ValidatorIndex(cliSlice[i])
|
||||
}
|
||||
|
||||
var chainService *blockchain.Service
|
||||
if err := b.services.FetchService(&chainService); err != nil {
|
||||
return err
|
||||
}
|
||||
monitorConfig := &monitor.ValidatorMonitorConfig{
|
||||
StateNotifier: b,
|
||||
AttestationNotifier: b,
|
||||
StateGen: b.stateGen,
|
||||
HeadFetcher: chainService,
|
||||
}
|
||||
svc, err := monitor.NewService(b.ctx, monitorConfig, tracked)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return b.services.RegisterService(svc)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
package node
|
||||
|
||||
import "github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
)
|
||||
|
||||
// Option for beacon node configuration.
|
||||
type Option func(bn *BeaconNode) error
|
||||
@@ -8,7 +11,15 @@ type Option func(bn *BeaconNode) error
|
||||
// WithBlockchainFlagOptions includes functional options for the blockchain service related to CLI flags.
|
||||
func WithBlockchainFlagOptions(opts []blockchain.Option) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
bn.blockchainFlagOpts = opts
|
||||
bn.serviceFlagOpts.blockchainFlagOpts = opts
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPowchainFlagOptions includes functional options for the powchain service related to CLI flags.
|
||||
func WithPowchainFlagOptions(opts []powchain.Option) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
bn.serviceFlagOpts.powchainFlagOpts = opts
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,15 +5,12 @@ go_library(
|
||||
srcs = [
|
||||
"log.go",
|
||||
"p2p.go",
|
||||
"powchain.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/node/registration",
|
||||
visibility = ["//beacon-chain/node:__subpackages__"],
|
||||
deps = [
|
||||
"//cmd:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@in_gopkg_yaml_v2//:go_default_library",
|
||||
@@ -22,18 +19,13 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"p2p_test.go",
|
||||
"powchain_test.go",
|
||||
],
|
||||
srcs = ["p2p_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//cmd:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
package registration
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
// PowchainPreregistration prepares data for powchain.Service's registration.
|
||||
func PowchainPreregistration(cliCtx *cli.Context) (depositContractAddress string, endpoints []string, err error) {
|
||||
depositContractAddress, err = DepositContractAddress()
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
if cliCtx.String(flags.HTTPWeb3ProviderFlag.Name) == "" && len(cliCtx.StringSlice(flags.FallbackWeb3ProviderFlag.Name)) == 0 {
|
||||
log.Error(
|
||||
"No ETH1 node specified to run with the beacon node. Please consider running your own Ethereum proof-of-work node for better uptime, security, and decentralization of Ethereum. Visit https://docs.prylabs.network/docs/prysm-usage/setup-eth1 for more information.",
|
||||
)
|
||||
log.Error(
|
||||
"You will need to specify --http-web3provider and/or --fallback-web3provider to attach an eth1 node to the prysm node. Without an eth1 node block proposals for your validator will be affected and the beacon node will not be able to initialize the genesis state.",
|
||||
)
|
||||
}
|
||||
|
||||
endpoints = []string{cliCtx.String(flags.HTTPWeb3ProviderFlag.Name)}
|
||||
endpoints = append(endpoints, cliCtx.StringSlice(flags.FallbackWeb3ProviderFlag.Name)...)
|
||||
return
|
||||
}
|
||||
|
||||
// DepositContractAddress returns the address of the deposit contract.
|
||||
func DepositContractAddress() (string, error) {
|
||||
address := params.BeaconConfig().DepositContractAddress
|
||||
if address == "" {
|
||||
return "", errors.New("valid deposit contract is required")
|
||||
}
|
||||
|
||||
if !common.IsHexAddress(address) {
|
||||
return "", errors.New("invalid deposit contract address given: " + address)
|
||||
}
|
||||
|
||||
return address, nil
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
package registration
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func TestPowchainPreregistration(t *testing.T) {
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.String(flags.HTTPWeb3ProviderFlag.Name, "primary", "")
|
||||
fallback := cli.StringSlice{}
|
||||
err := fallback.Set("fallback1")
|
||||
require.NoError(t, err)
|
||||
err = fallback.Set("fallback2")
|
||||
require.NoError(t, err)
|
||||
set.Var(&fallback, flags.FallbackWeb3ProviderFlag.Name, "")
|
||||
ctx := cli.NewContext(&app, set, nil)
|
||||
|
||||
address, endpoints, err := PowchainPreregistration(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, params.BeaconConfig().DepositContractAddress, address)
|
||||
assert.DeepEqual(t, []string{"primary", "fallback1", "fallback2"}, endpoints)
|
||||
}
|
||||
|
||||
func TestPowchainPreregistration_EmptyWeb3Provider(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.String(flags.HTTPWeb3ProviderFlag.Name, "", "")
|
||||
fallback := cli.StringSlice{}
|
||||
set.Var(&fallback, flags.FallbackWeb3ProviderFlag.Name, "")
|
||||
ctx := cli.NewContext(&app, set, nil)
|
||||
|
||||
_, _, err := PowchainPreregistration(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.LogsContain(t, hook, "No ETH1 node specified to run with the beacon node")
|
||||
}
|
||||
|
||||
func TestDepositContractAddress_Ok(t *testing.T) {
|
||||
address, err := DepositContractAddress()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, params.BeaconConfig().DepositContractAddress, address)
|
||||
}
|
||||
|
||||
func TestDepositContractAddress_EmptyAddress(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.DepositContractAddress = ""
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
_, err := DepositContractAddress()
|
||||
assert.ErrorContains(t, "valid deposit contract is required", err)
|
||||
}
|
||||
|
||||
func TestDepositContractAddress_NotHexAddress(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.DepositContractAddress = "abc?!"
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
_, err := DepositContractAddress()
|
||||
assert.ErrorContains(t, "invalid deposit contract address given", err)
|
||||
}
|
||||
@@ -36,11 +36,11 @@ func (m *PoolMock) InsertProposerSlashing(_ context.Context, _ state.ReadOnlyBea
|
||||
}
|
||||
|
||||
// MarkIncludedAttesterSlashing --
|
||||
func (_ *PoolMock) MarkIncludedAttesterSlashing(_ *ethpb.AttesterSlashing) {
|
||||
func (*PoolMock) MarkIncludedAttesterSlashing(_ *ethpb.AttesterSlashing) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// MarkIncludedProposerSlashing --
|
||||
func (_ *PoolMock) MarkIncludedProposerSlashing(_ *ethpb.ProposerSlashing) {
|
||||
func (*PoolMock) MarkIncludedProposerSlashing(_ *ethpb.ProposerSlashing) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
@@ -74,7 +74,6 @@ go_library(
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_ferranbt_fastssz//:go_default_library",
|
||||
"@com_github_ipfs_go_ipfs_addr//:go_default_library",
|
||||
"@com_github_kevinms_leakybucket_go//:go_default_library",
|
||||
"@com_github_kr_pretty//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//:go_default_library",
|
||||
|
||||
@@ -23,7 +23,7 @@ type Config struct {
|
||||
MetaDataDir string
|
||||
TCPPort uint
|
||||
UDPPort uint
|
||||
MaxPeers uint
|
||||
MaxPeers uint64
|
||||
AllowListCIDR string
|
||||
DenyListCIDR []string
|
||||
StateNotifier statefeed.Notifier
|
||||
|
||||
@@ -104,7 +104,7 @@ func TestService_RejectInboundPeersBeyondLimit(t *testing.T) {
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: mockp2p.NewTestP2P(t).BHost,
|
||||
cfg: &Config{MaxPeers: uint(limit)},
|
||||
cfg: &Config{MaxPeers: uint64(limit)},
|
||||
}
|
||||
var err error
|
||||
s.addrFilter, err = configureFilter(&Config{})
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
iaddr "github.com/ipfs/go-ipfs-addr"
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
@@ -454,11 +453,7 @@ func peersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) {
|
||||
}
|
||||
|
||||
func multiAddrFromString(address string) (ma.Multiaddr, error) {
|
||||
addr, err := iaddr.ParseString(address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return addr.Multiaddr(), nil
|
||||
return ma.NewMultiaddr(address)
|
||||
}
|
||||
|
||||
func udpVersionFromIP(ipAddr net.IP) string {
|
||||
|
||||
@@ -103,13 +103,13 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
|
||||
case strings.Contains(topic, GossipBlockMessage):
|
||||
return defaultBlockTopicParams(), nil
|
||||
case strings.Contains(topic, GossipAggregateAndProofMessage):
|
||||
return defaultAggregateTopicParams(activeValidators)
|
||||
return defaultAggregateTopicParams(activeValidators), nil
|
||||
case strings.Contains(topic, GossipAttestationMessage):
|
||||
return defaultAggregateSubnetTopicParams(activeValidators)
|
||||
return defaultAggregateSubnetTopicParams(activeValidators), nil
|
||||
case strings.Contains(topic, GossipSyncCommitteeMessage):
|
||||
return defaultSyncSubnetTopicParams(activeValidators)
|
||||
return defaultSyncSubnetTopicParams(activeValidators), nil
|
||||
case strings.Contains(topic, GossipContributionAndProofMessage):
|
||||
return defaultSyncContributionTopicParams()
|
||||
return defaultSyncContributionTopicParams(), nil
|
||||
case strings.Contains(topic, GossipExitMessage):
|
||||
return defaultVoluntaryExitTopicParams(), nil
|
||||
case strings.Contains(topic, GossipProposerSlashingMessage):
|
||||
@@ -191,19 +191,19 @@ func defaultBlockTopicParams() *pubsub.TopicScoreParams {
|
||||
}
|
||||
}
|
||||
|
||||
func defaultAggregateTopicParams(activeValidators uint64) (*pubsub.TopicScoreParams, error) {
|
||||
func defaultAggregateTopicParams(activeValidators uint64) *pubsub.TopicScoreParams {
|
||||
// Determine the expected message rate for the particular gossip topic.
|
||||
aggPerSlot := aggregatorsPerSlot(activeValidators)
|
||||
firstMessageCap, err := decayLimit(scoreDecay(1*oneEpochDuration()), float64(aggPerSlot*2/gossipSubD))
|
||||
if err != nil {
|
||||
log.Warnf("skipping initializing topic scoring: %v", err)
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
firstMessageWeight := maxFirstDeliveryScore / firstMessageCap
|
||||
meshThreshold, err := decayThreshold(scoreDecay(1*oneEpochDuration()), float64(aggPerSlot)/dampeningFactor)
|
||||
if err != nil {
|
||||
log.Warnf("skipping initializing topic scoring: %v", err)
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
meshWeight := -scoreByWeight(aggregateWeight, meshThreshold)
|
||||
meshCap := 4 * meshThreshold
|
||||
@@ -230,22 +230,22 @@ func defaultAggregateTopicParams(activeValidators uint64) (*pubsub.TopicScorePar
|
||||
MeshFailurePenaltyDecay: scoreDecay(1 * oneEpochDuration()),
|
||||
InvalidMessageDeliveriesWeight: -maxScore() / aggregateWeight,
|
||||
InvalidMessageDeliveriesDecay: scoreDecay(invalidDecayPeriod),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func defaultSyncContributionTopicParams() (*pubsub.TopicScoreParams, error) {
|
||||
func defaultSyncContributionTopicParams() *pubsub.TopicScoreParams {
|
||||
// Determine the expected message rate for the particular gossip topic.
|
||||
aggPerSlot := params.BeaconConfig().SyncCommitteeSubnetCount * params.BeaconConfig().TargetAggregatorsPerSyncSubcommittee
|
||||
firstMessageCap, err := decayLimit(scoreDecay(1*oneEpochDuration()), float64(aggPerSlot*2/gossipSubD))
|
||||
if err != nil {
|
||||
log.Warnf("skipping initializing topic scoring: %v", err)
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
firstMessageWeight := maxFirstDeliveryScore / firstMessageCap
|
||||
meshThreshold, err := decayThreshold(scoreDecay(1*oneEpochDuration()), float64(aggPerSlot)/dampeningFactor)
|
||||
if err != nil {
|
||||
log.Warnf("skipping initializing topic scoring: %v", err)
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
meshWeight := -scoreByWeight(syncContributionWeight, meshThreshold)
|
||||
meshCap := 4 * meshThreshold
|
||||
@@ -272,23 +272,23 @@ func defaultSyncContributionTopicParams() (*pubsub.TopicScoreParams, error) {
|
||||
MeshFailurePenaltyDecay: scoreDecay(1 * oneEpochDuration()),
|
||||
InvalidMessageDeliveriesWeight: -maxScore() / syncContributionWeight,
|
||||
InvalidMessageDeliveriesDecay: scoreDecay(invalidDecayPeriod),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func defaultAggregateSubnetTopicParams(activeValidators uint64) (*pubsub.TopicScoreParams, error) {
|
||||
func defaultAggregateSubnetTopicParams(activeValidators uint64) *pubsub.TopicScoreParams {
|
||||
subnetCount := params.BeaconNetworkConfig().AttestationSubnetCount
|
||||
// Get weight for each specific subnet.
|
||||
topicWeight := attestationTotalWeight / float64(subnetCount)
|
||||
subnetWeight := activeValidators / subnetCount
|
||||
if subnetWeight == 0 {
|
||||
log.Warn("Subnet weight is 0, skipping initializing topic scoring")
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
// Determine the amount of validators expected in a subnet in a single slot.
|
||||
numPerSlot := time.Duration(subnetWeight / uint64(params.BeaconConfig().SlotsPerEpoch))
|
||||
if numPerSlot == 0 {
|
||||
log.Warn("numPerSlot is 0, skipping initializing topic scoring")
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
comsPerSlot := committeeCountPerSlot(activeValidators)
|
||||
exceedsThreshold := comsPerSlot >= 2*subnetCount/uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
@@ -301,20 +301,20 @@ func defaultAggregateSubnetTopicParams(activeValidators uint64) (*pubsub.TopicSc
|
||||
rate := numPerSlot * 2 / gossipSubD
|
||||
if rate == 0 {
|
||||
log.Warn("rate is 0, skipping initializing topic scoring")
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
// Determine expected first deliveries based on the message rate.
|
||||
firstMessageCap, err := decayLimit(scoreDecay(firstDecay*oneEpochDuration()), float64(rate))
|
||||
if err != nil {
|
||||
log.Warnf("skipping initializing topic scoring: %v", err)
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
firstMessageWeight := maxFirstDeliveryScore / firstMessageCap
|
||||
// Determine expected mesh deliveries based on message rate applied with a dampening factor.
|
||||
meshThreshold, err := decayThreshold(scoreDecay(meshDecay*oneEpochDuration()), float64(numPerSlot)/dampeningFactor)
|
||||
if err != nil {
|
||||
log.Warnf("skipping initializing topic scoring: %v", err)
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
meshWeight := -scoreByWeight(topicWeight, meshThreshold)
|
||||
meshCap := 4 * meshThreshold
|
||||
@@ -341,10 +341,10 @@ func defaultAggregateSubnetTopicParams(activeValidators uint64) (*pubsub.TopicSc
|
||||
MeshFailurePenaltyDecay: scoreDecay(meshDecay * oneEpochDuration()),
|
||||
InvalidMessageDeliveriesWeight: -maxScore() / topicWeight,
|
||||
InvalidMessageDeliveriesDecay: scoreDecay(invalidDecayPeriod),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func defaultSyncSubnetTopicParams(activeValidators uint64) (*pubsub.TopicScoreParams, error) {
|
||||
func defaultSyncSubnetTopicParams(activeValidators uint64) *pubsub.TopicScoreParams {
|
||||
subnetCount := params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
// Get weight for each specific subnet.
|
||||
topicWeight := syncCommitteesTotalWeight / float64(subnetCount)
|
||||
@@ -356,7 +356,7 @@ func defaultSyncSubnetTopicParams(activeValidators uint64) (*pubsub.TopicScorePa
|
||||
subnetWeight := activeValidators / subnetCount
|
||||
if subnetWeight == 0 {
|
||||
log.Warn("Subnet weight is 0, skipping initializing topic scoring")
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
firstDecay := time.Duration(1)
|
||||
meshDecay := time.Duration(4)
|
||||
@@ -364,20 +364,20 @@ func defaultSyncSubnetTopicParams(activeValidators uint64) (*pubsub.TopicScorePa
|
||||
rate := subnetWeight * 2 / gossipSubD
|
||||
if rate == 0 {
|
||||
log.Warn("rate is 0, skipping initializing topic scoring")
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
// Determine expected first deliveries based on the message rate.
|
||||
firstMessageCap, err := decayLimit(scoreDecay(firstDecay*oneEpochDuration()), float64(rate))
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Skipping initializing topic scoring")
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
firstMessageWeight := maxFirstDeliveryScore / firstMessageCap
|
||||
// Determine expected mesh deliveries based on message rate applied with a dampening factor.
|
||||
meshThreshold, err := decayThreshold(scoreDecay(meshDecay*oneEpochDuration()), float64(subnetWeight)/dampeningFactor)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Skipping initializing topic scoring")
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
meshWeight := -scoreByWeight(topicWeight, meshThreshold)
|
||||
meshCap := 4 * meshThreshold
|
||||
@@ -404,7 +404,7 @@ func defaultSyncSubnetTopicParams(activeValidators uint64) (*pubsub.TopicScorePa
|
||||
MeshFailurePenaltyDecay: scoreDecay(meshDecay * oneEpochDuration()),
|
||||
InvalidMessageDeliveriesWeight: -maxScore() / topicWeight,
|
||||
InvalidMessageDeliveriesDecay: scoreDecay(invalidDecayPeriod),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func defaultAttesterSlashingTopicParams() *pubsub.TopicScoreParams {
|
||||
|
||||
@@ -63,16 +63,14 @@ func TestCorrect_ActiveValidatorsCount(t *testing.T) {
|
||||
assert.Equal(t, int(params.BeaconConfig().MinGenesisActiveValidatorCount)+100, int(vals), "mainnet genesis active count isn't accurate")
|
||||
}
|
||||
|
||||
func TestLoggingParameters(t *testing.T) {
|
||||
func TestLoggingParameters(_ *testing.T) {
|
||||
logGossipParameters("testing", nil)
|
||||
logGossipParameters("testing", &pubsub.TopicScoreParams{})
|
||||
// Test out actual gossip parameters.
|
||||
logGossipParameters("testing", defaultBlockTopicParams())
|
||||
p, err := defaultAggregateSubnetTopicParams(10000)
|
||||
assert.NoError(t, err)
|
||||
p := defaultAggregateSubnetTopicParams(10000)
|
||||
logGossipParameters("testing", p)
|
||||
p, err = defaultAggregateTopicParams(10000)
|
||||
assert.NoError(t, err)
|
||||
p = defaultAggregateTopicParams(10000)
|
||||
logGossipParameters("testing", p)
|
||||
logGossipParameters("testing", defaultAttesterSlashingTopicParams())
|
||||
logGossipParameters("testing", defaultProposerSlashingTopicParams())
|
||||
|
||||
@@ -77,7 +77,7 @@ type PeerManager interface {
|
||||
ENR() *enr.Record
|
||||
DiscoveryAddresses() ([]multiaddr.Multiaddr, error)
|
||||
RefreshENR()
|
||||
FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold int) (bool, error)
|
||||
FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold uint64) (bool, error)
|
||||
AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error)
|
||||
}
|
||||
|
||||
|
||||
@@ -85,7 +85,7 @@ func newBlockProviderScorer(store *peerdata.Store, config *BlockProviderScorerCo
|
||||
if scorer.config.StalePeerRefreshInterval == 0 {
|
||||
scorer.config.StalePeerRefreshInterval = DefaultBlockProviderStalePeerRefreshInterval
|
||||
}
|
||||
batchSize := uint64(flags.Get().BlockBatchLimit)
|
||||
batchSize := flags.Get().BlockBatchLimit
|
||||
scorer.maxScore = 1.0
|
||||
if batchSize > 0 {
|
||||
totalBatches := float64(scorer.config.ProcessedBlocksCap / batchSize)
|
||||
@@ -110,7 +110,7 @@ func (s *BlockProviderScorer) score(pid peer.ID) float64 {
|
||||
if !ok || time.Since(peerData.BlockProviderUpdated) >= s.config.StalePeerRefreshInterval {
|
||||
return s.maxScore
|
||||
}
|
||||
batchSize := uint64(flags.Get().BlockBatchLimit)
|
||||
batchSize := flags.Get().BlockBatchLimit
|
||||
if batchSize > 0 {
|
||||
processedBatches := float64(peerData.ProcessedBlocks / batchSize)
|
||||
score += processedBatches * s.config.ProcessedBatchWeight
|
||||
|
||||
@@ -21,7 +21,7 @@ func TestScorers_BlockProvider_Score(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
batchSize := uint64(flags.Get().BlockBatchLimit)
|
||||
batchSize := flags.Get().BlockBatchLimit
|
||||
tests := []struct {
|
||||
name string
|
||||
update func(scorer *scorers.BlockProviderScorer)
|
||||
@@ -160,7 +160,7 @@ func TestScorers_BlockProvider_WeightSorted(t *testing.T) {
|
||||
},
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BlockProviderScorer()
|
||||
batchSize := uint64(flags.Get().BlockBatchLimit)
|
||||
batchSize := flags.Get().BlockBatchLimit
|
||||
r := rand.NewDeterministicGenerator()
|
||||
|
||||
reverse := func(pids []peer.ID) []peer.ID {
|
||||
@@ -214,7 +214,7 @@ func TestScorers_BlockProvider_WeightSorted(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestScorers_BlockProvider_Sorted(t *testing.T) {
|
||||
batchSize := uint64(flags.Get().BlockBatchLimit)
|
||||
batchSize := flags.Get().BlockBatchLimit
|
||||
tests := []struct {
|
||||
name string
|
||||
update func(s *scorers.BlockProviderScorer)
|
||||
@@ -309,7 +309,7 @@ func TestScorers_BlockProvider_Sorted(t *testing.T) {
|
||||
func TestScorers_BlockProvider_MaxScore(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
batchSize := uint64(flags.Get().BlockBatchLimit)
|
||||
batchSize := flags.Get().BlockBatchLimit
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -347,7 +347,7 @@ func TestScorers_BlockProvider_MaxScore(t *testing.T) {
|
||||
func TestScorers_BlockProvider_FormatScorePretty(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
batchSize := uint64(flags.Get().BlockBatchLimit)
|
||||
batchSize := flags.Get().BlockBatchLimit
|
||||
format := "[%0.1f%%, raw: %0.2f, blocks: %d/1280]"
|
||||
|
||||
tests := []struct {
|
||||
|
||||
@@ -183,8 +183,16 @@ func (s *Service) loop(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-decayBadResponsesStats.C:
|
||||
// Exit early if context is canceled.
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
s.scorers.badResponsesScorer.Decay()
|
||||
case <-decayBlockProviderStats.C:
|
||||
// Exit early if context is canceled.
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
s.scorers.blockProviderScorer.Decay()
|
||||
case <-ctx.Done():
|
||||
return
|
||||
|
||||
@@ -17,7 +17,7 @@ func TestScorers_Service_Init(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
batchSize := uint64(flags.Get().BlockBatchLimit)
|
||||
batchSize := flags.Get().BlockBatchLimit
|
||||
|
||||
t.Run("default config", func(t *testing.T) {
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
@@ -82,7 +82,7 @@ func TestScorers_Service_Score(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
|
||||
batchSize := uint64(flags.Get().BlockBatchLimit)
|
||||
batchSize := flags.Get().BlockBatchLimit
|
||||
|
||||
peerScores := func(s *scorers.Service, pids []peer.ID) map[string]float64 {
|
||||
scores := make(map[string]float64, len(pids))
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user