mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
147 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4aa372a65f | ||
|
|
f89b7624cf | ||
|
|
b854b51e21 | ||
|
|
6832f79cf2 | ||
|
|
f3e6c83d66 | ||
|
|
82453961ee | ||
|
|
b1d3ea83bd | ||
|
|
98ab265b6b | ||
|
|
8e115672c5 | ||
|
|
4778ec7434 | ||
|
|
fac8526eb6 | ||
|
|
93fc666f83 | ||
|
|
b5bd461627 | ||
|
|
788338a004 | ||
|
|
8fecc5af7f | ||
|
|
03e40edf2c | ||
|
|
39c33b82ad | ||
|
|
905e0f4c1c | ||
|
|
0ade1f121d | ||
|
|
ee52f8dff3 | ||
|
|
50159c2e48 | ||
|
|
cae58bbbd8 | ||
|
|
9b37418761 | ||
|
|
a78cdf86cc | ||
|
|
1c4ea75a18 | ||
|
|
6f4c80531c | ||
|
|
e3246922eb | ||
|
|
5962363847 | ||
|
|
5e8cf9cd28 | ||
|
|
b5ca09bce6 | ||
|
|
720ee3f2a4 | ||
|
|
3d139d35f6 | ||
|
|
ea38969af2 | ||
|
|
f753ce81cc | ||
|
|
9d678b0c47 | ||
|
|
4a4a7e97df | ||
|
|
652b1617ed | ||
|
|
42edc4f8dd | ||
|
|
70d5bc448f | ||
|
|
d1159308c8 | ||
|
|
e01298bd08 | ||
|
|
2bcb62db28 | ||
|
|
672fb72a7f | ||
|
|
7fbd5b06da | ||
|
|
0fb91437fc | ||
|
|
6e731bdedd | ||
|
|
40eb718ba2 | ||
|
|
e1840f7523 | ||
|
|
b07e1ba7a4 | ||
|
|
233171d17c | ||
|
|
2b0e132201 | ||
|
|
ad9e5331f5 | ||
|
|
341a2f1ea3 | ||
|
|
7974fe01cd | ||
|
|
ae56f643eb | ||
|
|
2ea09b621e | ||
|
|
40fedee137 | ||
|
|
7cdddcb015 | ||
|
|
4440ac199f | ||
|
|
d78428c49e | ||
|
|
2e45fada34 | ||
|
|
4c18d291f4 | ||
|
|
dfe33b0770 | ||
|
|
7a2b8e4e6a | ||
|
|
63308239d9 | ||
|
|
712cc18ee0 | ||
|
|
3d318cffa2 | ||
|
|
e13cdf493e | ||
|
|
fdd9c535b4 | ||
|
|
ba728d4929 | ||
|
|
cd5eb0a2ef | ||
|
|
6d44428e9c | ||
|
|
cefb5cec55 | ||
|
|
815debee38 | ||
|
|
fc6c17cc75 | ||
|
|
df0e9fa3d7 | ||
|
|
0287bc65c7 | ||
|
|
35c3225579 | ||
|
|
f238f872a1 | ||
|
|
24f105b804 | ||
|
|
f98354f59f | ||
|
|
2aea4e49f4 | ||
|
|
a8e8338973 | ||
|
|
ed78d15ed6 | ||
|
|
d5cf0a2e54 | ||
|
|
b89bb3fa30 | ||
|
|
be722f2c5c | ||
|
|
09c99b25bc | ||
|
|
24ff40fbf5 | ||
|
|
d96491ffa9 | ||
|
|
5121a50bb4 | ||
|
|
1c6d914ea1 | ||
|
|
71adada879 | ||
|
|
5790aa66e0 | ||
|
|
8e6bb39d2f | ||
|
|
ad06914f45 | ||
|
|
259e07d5c9 | ||
|
|
f6cf77acd8 | ||
|
|
40a36fb02d | ||
|
|
2e65be12b8 | ||
|
|
4a237e11bc | ||
|
|
026207fc16 | ||
|
|
6b7b30ce47 | ||
|
|
105bb70b5e | ||
|
|
9564ab1f7f | ||
|
|
0b09e3e955 | ||
|
|
bb319e02e8 | ||
|
|
53c86429e4 | ||
|
|
61172d5007 | ||
|
|
1507719613 | ||
|
|
4c677e7b40 | ||
|
|
28f50862cb | ||
|
|
2dfb0696f7 | ||
|
|
ae2c883aaf | ||
|
|
ad9ef9d803 | ||
|
|
b837f90b35 | ||
|
|
4d9eafe110 | ||
|
|
7b1b9a564b | ||
|
|
341cced53f | ||
|
|
7f3ec4221f | ||
|
|
5b3375638a | ||
|
|
3c721418db | ||
|
|
a3ad254b78 | ||
|
|
d7cad27cc3 | ||
|
|
290b4273dd | ||
|
|
a797a7aaac | ||
|
|
f7c34b0fd6 | ||
|
|
c6874e33f7 | ||
|
|
13ddc171eb | ||
|
|
acde184aa7 | ||
|
|
5fd6474e56 | ||
|
|
a7fc25f2e0 | ||
|
|
65db331eaf | ||
|
|
7b8aedbfe4 | ||
|
|
cf956c718d | ||
|
|
975f0ea1af | ||
|
|
9551a6c4b8 | ||
|
|
d39113af60 | ||
|
|
d27334746b | ||
|
|
d00c7a0ce8 | ||
|
|
c7826856a5 | ||
|
|
1bec9ae9e6 | ||
|
|
17ed9356ff | ||
|
|
c493290027 | ||
|
|
30c07a8a1a | ||
|
|
b7fb8a8dcd | ||
|
|
bd108c3244 |
2
.github/workflows/go.yml
vendored
2
.github/workflows/go.yml
vendored
@@ -39,7 +39,7 @@ jobs:
|
||||
- name: Golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
args: --print-issued-lines --sort-results --no-config --timeout=10m --disable-all -E deadcode --skip-files=validator/web/site_data.go
|
||||
args: --print-issued-lines --sort-results --no-config --timeout=10m --disable-all -E deadcode -E errcheck -E gosimple --skip-files=validator/web/site_data.go
|
||||
|
||||
build:
|
||||
name: Build
|
||||
|
||||
@@ -5,21 +5,22 @@ Contact: mailto:security@prysmaticlabs.com
|
||||
Encryption: openpgp4fpr:0AE0051D647BA3C1A917AF4072E33E4DF1A5036E
|
||||
Encryption: openpgp4fpr:CD08DE68C60B82D3EE2A3F7D95452A701810FEDB
|
||||
Encryption: openpgp4fpr:317D6E91058F8F3C2303BA7756313E44581297A6
|
||||
Encryption: openpgp4fpr:79C59A585E3FD3AFFA00F5C22940A6479DA7C9EC
|
||||
Preferred-Languages: en
|
||||
Canonical: https://github.com/prysmaticlabs/prysm/tree/master/.well-known/security.txt
|
||||
-----BEGIN PGP SIGNATURE-----
|
||||
|
||||
iQIzBAEBCgAdFiEECuAFHWR7o8GpF69AcuM+TfGlA24FAl++klgACgkQcuM+TfGl
|
||||
A27rQw/6A29p1W20J0v+h218p8XWLSUpTIGLnZTxw6KqdyVXMzlsQK0YG4G2s2AB
|
||||
0LKh7Ae/Di5E0U+Z4AjUW5nc5eaCxK36GMscH9Ah0rgJwNYxEJw7/2o8ZqVT/Ip2
|
||||
+56rFihRqxFZfaCNKFVuZFaL9jKewV9FKYP38ID6/SnTcrOHiu2AoAlyZGmB03p+
|
||||
iT57SPRHatygeY4xb/gwcfREFWEv+VHGyBTv8A+6ABZDxyurboCFMERHzFICrbmk
|
||||
8UdHxxlWZDnHAbAUyAwpERC5znx6IHXQJwF8TMtu6XY6a6axT2XBOyJDF9/mZOz+
|
||||
kdkz6loX5uxaQBGLtTv6Kqf1yUGANOZ16VhHvWwL209LmHmigIVQ+qSM6c79PsW/
|
||||
vrsqdz3GBsiMC5Fq2vYgnbgzpfE8Atjn0y7E+j4R7IvwOAE/Ro/b++nqnc4YqhME
|
||||
P/yTcfGftaCrdSNnQCXeoV9JxpFM5Xy8KV3eexvNKbcgA/9DtgxL5i+s5ZJkUT9A
|
||||
+qJvoRrRyIym32ghkHgtFJKB3PLCdobeoOVRk6EnMo9zKSiSK2rZEJW8Ccbo515D
|
||||
W9qUOn3GF7lNVuUFAU/YKEdmDp/AVaViZ7vH+8aq0LC0HBkZ8XlzWnWoArS8sMhw
|
||||
fX0R9g/HMgrwNte/d0mwim5lJ2Plgv60Bh4grJqwZJeWbU0zi1U=
|
||||
=uW+X
|
||||
iQIzBAEBCgAdFiEECuAFHWR7o8GpF69AcuM+TfGlA24FAmGOfiYACgkQcuM+TfGl
|
||||
A24YwRAAiQk3w6yzqSEggrOlNoNn04iu/rWZdn5ihkQgzACXy8XH2D1gdKLChE/X
|
||||
7e5bUtgE2aCuHryQjwoKxqZakviBJFstVmHgF64rXv2zKhpqA30Mj4fI+T3zn8I+
|
||||
+FpFV0TTsxNLDx+AcR1eQ1nSayO7ImUDIfOQNDDnSZZy42Bc+F+QIGKB3aH/8bpG
|
||||
kT+bDTZrXvX+TE1gZTbAtZG8sH8g/zadoWEHIhfXUuYb0kTz+DRzAxoqU4j4Z4ee
|
||||
1zSfFAgfJwxJP4kWD7s4xkE1sBbCgGBeD6cW/C2lbcfIei+XSizLpHW3jD9dNqh4
|
||||
fLkmEspSa/LV/iXFq8nFzu/GLww4q+sQZDzzDKZyws54CrATinRitZMhzoIL0bTn
|
||||
yFZVOGHosFAMEVZ36dl1Aw2+B2W6tr2CVr9c5zfV+kup5/KZH1EmT5nYY/zFwfg2
|
||||
jYCFB5wmYeiyWZvuprgJXRArgVZLZaJxwWazlPVk4i/4vPvRgvfHqOwHCBe8DXy0
|
||||
VHPhpewwb/ECYek1KoaNQflgR8iH2GMHkC5RjhGDAt1S0AQDtite5m4ZYt1kvO9E
|
||||
k/znkv89dduhL9CKDvZvnI+DICwsTrf//4KJ8PM/qaPAJa4GvtiUU/eS/jKBivtv
|
||||
OP5dZQtX6KPc9ewqqZgn622uHSezoBidgeTkdZsJ6tw2eIu0lsY=
|
||||
=V7L0
|
||||
-----END PGP SIGNATURE-----
|
||||
|
||||
@@ -144,12 +144,6 @@ common_files = {
|
||||
"//:README.md": "README.md",
|
||||
}
|
||||
|
||||
toolchain(
|
||||
name = "built_cmake_toolchain",
|
||||
toolchain = "@rules_foreign_cc//tools/build_defs/native_tools:built_cmake",
|
||||
toolchain_type = "@rules_foreign_cc//tools/build_defs:cmake_toolchain",
|
||||
)
|
||||
|
||||
string_setting(
|
||||
name = "gotags",
|
||||
build_setting_default = "",
|
||||
|
||||
18
WORKSPACE
18
WORKSPACE
@@ -225,7 +225,7 @@ filegroup(
|
||||
url = "https://github.com/eth2-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.1.2"
|
||||
consensus_spec_version = "v1.1.5"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -241,7 +241,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "45f6bd1688a57fd40ab1272dac75de33d9777c84f9ea545815bfd9dc0e841a82",
|
||||
sha256 = "a7d7173d953494c0dfde432c9fc064c25d46d666b024749b3474ae0cdfc50050",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -257,7 +257,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "e203d4378a59d9b82b1a4e948bef7281be8bb5b59264c7d1a6a394b669cf800b",
|
||||
sha256 = "f86872061588c0197516b23025d39e9365b4716c112218a618739dc0d6f4666a",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -273,7 +273,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "dd9e01bb871673af49a6b4efad291505e0bdb607df3dd29d1b0b319c1fba2d60",
|
||||
sha256 = "7a06975360fd37fbb4694d0e06abb78d2a0835146c1d9b26d33569edff8b98f0",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -288,7 +288,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "55fa6fa9823781195cb5362b734b4b6e2a6940a7411530b9bcdab02d845106ca",
|
||||
sha256 = "87d8089200163340484d61212fbdffbb5d9d03e1244622761dcb91e641a65761",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
@@ -362,9 +362,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "54ce527b83d092da01127f2e3816f4d5cfbab69354caba8537f1ea55889b6d7c",
|
||||
sha256 = "0a3d94428ea28916276694c517b82b364122063fdbf924f54ee9ae0bc500289f",
|
||||
urls = [
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.0-beta.4/prysm-web-ui.tar.gz",
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.1/prysm-web-ui.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -395,10 +395,6 @@ load(
|
||||
|
||||
_cc_image_repos()
|
||||
|
||||
load("@com_github_ethereum_go_ethereum//:deps.bzl", "geth_dependencies")
|
||||
|
||||
geth_dependencies()
|
||||
|
||||
load("@io_bazel_rules_go//extras:embed_data_deps.bzl", "go_embed_data_dependencies")
|
||||
|
||||
go_embed_data_dependencies()
|
||||
|
||||
@@ -10,6 +10,7 @@ go_library(
|
||||
"init_sync_process_block.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"options.go",
|
||||
"process_attestation.go",
|
||||
"process_attestation_helpers.go",
|
||||
"process_block.go",
|
||||
@@ -17,11 +18,13 @@ go_library(
|
||||
"receive_attestation.go",
|
||||
"receive_block.go",
|
||||
"service.go",
|
||||
"state_balance_cache.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd/beacon-chain:__subpackages__",
|
||||
"//testing/fuzz:__pkg__",
|
||||
"//testing/slasher/simulator:__pkg__",
|
||||
],
|
||||
@@ -93,6 +96,7 @@ go_test(
|
||||
"init_test.go",
|
||||
"log_test.go",
|
||||
"metrics_test.go",
|
||||
"mock_test.go",
|
||||
"process_attestation_test.go",
|
||||
"process_block_test.go",
|
||||
"receive_attestation_test.go",
|
||||
@@ -139,6 +143,7 @@ go_test(
|
||||
"chain_info_norace_test.go",
|
||||
"checktags_test.go",
|
||||
"init_test.go",
|
||||
"mock_test.go",
|
||||
"receive_block_test.go",
|
||||
"service_norace_test.go",
|
||||
],
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
func TestHeadSlot_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &Config{BeaconDB: beaconDB},
|
||||
cfg: &config{BeaconDB: beaconDB},
|
||||
}
|
||||
go func() {
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
@@ -25,7 +25,7 @@ func TestHeadSlot_DataRace(t *testing.T) {
|
||||
func TestHeadRoot_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
head: &head{root: [32]byte{'A'}},
|
||||
}
|
||||
go func() {
|
||||
@@ -38,7 +38,7 @@ func TestHeadRoot_DataRace(t *testing.T) {
|
||||
func TestHeadBlock_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
head: &head{block: wrapper.WrappedPhase0SignedBeaconBlock(ðpb.SignedBeaconBlock{})},
|
||||
}
|
||||
go func() {
|
||||
@@ -51,7 +51,7 @@ func TestHeadBlock_DataRace(t *testing.T) {
|
||||
func TestHeadState_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
}
|
||||
go func() {
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
|
||||
@@ -120,7 +120,7 @@ func TestHeadRoot_CanRetrieve(t *testing.T) {
|
||||
|
||||
func TestHeadRoot_UseDB(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
c := &Service{cfg: &Config{BeaconDB: beaconDB}}
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB}}
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
br, err := b.Block.HashTreeRoot()
|
||||
@@ -278,14 +278,14 @@ func TestService_HeadGenesisValidatorRoot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_ProtoArrayStore(t *testing.T) {
|
||||
c := &Service{cfg: &Config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}}
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}}
|
||||
p := c.ProtoArrayStore()
|
||||
require.Equal(t, 0, int(p.FinalizedEpoch()))
|
||||
}
|
||||
|
||||
func TestService_ChainHeads(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &Config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}}
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{}, 0, 0))
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
@@ -42,9 +41,6 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
|
||||
// ensure head gets its best justified info.
|
||||
if s.bestJustifiedCheckpt.Epoch > s.justifiedCheckpt.Epoch {
|
||||
s.justifiedCheckpt = s.bestJustifiedCheckpt
|
||||
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Get head from the fork choice service.
|
||||
@@ -107,8 +103,8 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if newHeadBlock == nil || newHeadBlock.IsNil() || newHeadBlock.Block().IsNil() {
|
||||
return errors.New("cannot save nil head block")
|
||||
if err := helpers.BeaconBlockIsNil(newHeadBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the new head state from cached state or DB.
|
||||
@@ -175,7 +171,7 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
|
||||
// root in DB. With the inception of initial-sync-cache-state flag, it uses finalized
|
||||
// check point as anchors to resume sync therefore head is no longer needed to be saved on per slot basis.
|
||||
func (s *Service) saveHeadNoDB(ctx context.Context, b block.SignedBeaconBlock, r [32]byte, hs state.BeaconState) error {
|
||||
if err := helpers.VerifyNilBeaconBlock(b); err != nil {
|
||||
if err := helpers.BeaconBlockIsNil(b); err != nil {
|
||||
return err
|
||||
}
|
||||
cachedHeadRoot, err := s.HeadRoot(ctx)
|
||||
@@ -273,57 +269,6 @@ func (s *Service) hasHeadState() bool {
|
||||
return s.head != nil && s.head.state != nil
|
||||
}
|
||||
|
||||
// This caches justified state balances to be used for fork choice.
|
||||
func (s *Service) cacheJustifiedStateBalances(ctx context.Context, justifiedRoot [32]byte) error {
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.clearInitSyncBlocks()
|
||||
|
||||
var justifiedState state.BeaconState
|
||||
var err error
|
||||
if justifiedRoot == s.genesisRoot {
|
||||
justifiedState, err = s.cfg.BeaconDB.GenesisState(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
justifiedState, err = s.cfg.StateGen.StateByRoot(ctx, justifiedRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if justifiedState == nil || justifiedState.IsNil() {
|
||||
return errors.New("justified state can't be nil")
|
||||
}
|
||||
|
||||
epoch := time.CurrentEpoch(justifiedState)
|
||||
|
||||
justifiedBalances := make([]uint64, justifiedState.NumValidators())
|
||||
if err := justifiedState.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
if helpers.IsActiveValidatorUsingTrie(val, epoch) {
|
||||
justifiedBalances[idx] = val.EffectiveBalance()
|
||||
} else {
|
||||
justifiedBalances[idx] = 0
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.justifiedBalancesLock.Lock()
|
||||
defer s.justifiedBalancesLock.Unlock()
|
||||
s.justifiedBalances = justifiedBalances
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) getJustifiedBalances() []uint64 {
|
||||
s.justifiedBalancesLock.RLock()
|
||||
defer s.justifiedBalancesLock.RUnlock()
|
||||
return s.justifiedBalances
|
||||
}
|
||||
|
||||
// Notifies a common event feed of a new chain head event. Called right after a new
|
||||
// chain head is determined, set, and saved to disk.
|
||||
func (s *Service) notifyNewHeadEvent(
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
func TestService_headSyncCommitteeFetcher_Errors(t *testing.T) {
|
||||
beaconDB := dbtest.SetupDB(t)
|
||||
c := &Service{
|
||||
cfg: &Config{
|
||||
cfg: &config{
|
||||
StateGen: stategen.New(beaconDB),
|
||||
},
|
||||
}
|
||||
@@ -36,7 +36,7 @@ func TestService_headSyncCommitteeFetcher_Errors(t *testing.T) {
|
||||
func TestService_HeadDomainFetcher_Errors(t *testing.T) {
|
||||
beaconDB := dbtest.SetupDB(t)
|
||||
c := &Service{
|
||||
cfg: &Config{
|
||||
cfg: &config{
|
||||
StateGen: stategen.New(beaconDB),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -123,13 +123,15 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
ctx := context.Background()
|
||||
|
||||
state, _ := util.DeterministicGenesisState(t, 100)
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: r[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), state, r))
|
||||
require.NoError(t, service.cacheJustifiedStateBalances(context.Background(), r))
|
||||
require.DeepEqual(t, service.getJustifiedBalances(), state.Balances(), "Incorrect justified balances")
|
||||
balances, err := service.justifiedBalances.get(ctx, r)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, balances, state.Balances(), "Incorrect justified balances")
|
||||
}
|
||||
|
||||
func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
|
||||
@@ -153,7 +155,7 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
notifier := &mock.MockStateNotifier{RecordEvents: true}
|
||||
srv := &Service{
|
||||
cfg: &Config{
|
||||
cfg: &config{
|
||||
StateNotifier: notifier,
|
||||
},
|
||||
genesisRoot: [32]byte{1},
|
||||
@@ -182,7 +184,7 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
notifier := &mock.MockStateNotifier{RecordEvents: true}
|
||||
genesisRoot := [32]byte{1}
|
||||
srv := &Service{
|
||||
cfg: &Config{
|
||||
cfg: &config{
|
||||
StateNotifier: notifier,
|
||||
},
|
||||
genesisRoot: genesisRoot,
|
||||
|
||||
@@ -25,16 +25,17 @@ func TestService_TreeHandler(t *testing.T) {
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetBalances([]uint64{params.BeaconConfig().GweiPerEth}))
|
||||
cfg := &Config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
[32]byte{'a'},
|
||||
),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
fcs := protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
[32]byte{'a'},
|
||||
)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
s, err := NewService(ctx, cfg)
|
||||
s, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, [32]byte{'a'}, [32]byte{'g'}, [32]byte{'c'}, 0, 0))
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.ProcessBlock(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'c'}, 0, 0))
|
||||
|
||||
@@ -130,6 +130,14 @@ var (
|
||||
Name: "sync_head_state_hit",
|
||||
Help: "The number of sync head state requests that are present in the cache.",
|
||||
})
|
||||
stateBalanceCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "state_balance_cache_hit",
|
||||
Help: "Count the number of state balance cache hits.",
|
||||
})
|
||||
stateBalanceCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "state_balance_cache_miss",
|
||||
Help: "Count the number of state balance cache hits.",
|
||||
})
|
||||
)
|
||||
|
||||
// reportSlotMetrics reports slot related metrics.
|
||||
|
||||
50
beacon-chain/blockchain/mock_test.go
Normal file
50
beacon-chain/blockchain/mock_test.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
)
|
||||
|
||||
func testServiceOptsWithDB(t *testing.T) []Option {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
return []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
}
|
||||
|
||||
// warning: only use these opts when you are certain there are no db calls
|
||||
// in your code path. this is a lightweight way to satisfy the stategen/beacondb
|
||||
// initialization requirements w/o the overhead of db init.
|
||||
func testServiceOptsNoDB() []Option {
|
||||
return []Option{
|
||||
withStateBalanceCache(satisfactoryStateBalanceCache()),
|
||||
}
|
||||
}
|
||||
|
||||
type mockStateByRooter struct {
|
||||
state state.BeaconState
|
||||
err error
|
||||
}
|
||||
|
||||
var _ stateByRooter = &mockStateByRooter{}
|
||||
|
||||
func (m mockStateByRooter) StateByRoot(_ context.Context, _ [32]byte) (state.BeaconState, error) {
|
||||
return m.state, m.err
|
||||
}
|
||||
|
||||
// returns an instance of the state balance cache that can be used
|
||||
// to satisfy the requirement for one in NewService, but which will
|
||||
// always return an error if used.
|
||||
func satisfactoryStateBalanceCache() *stateBalanceCache {
|
||||
err := errors.New("satisfactoryStateBalanceCache doesn't perform real caching")
|
||||
return &stateBalanceCache{stateGen: mockStateByRooter{err: err}}
|
||||
}
|
||||
146
beacon-chain/blockchain/options.go
Normal file
146
beacon-chain/blockchain/options.go
Normal file
@@ -0,0 +1,146 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
type Option func(s *Service) error
|
||||
|
||||
// WithMaxGoroutines to control resource use of the blockchain service.
|
||||
func WithMaxGoroutines(x int) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.MaxRoutines = x
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithWeakSubjectivityCheckpoint for checkpoint sync.
|
||||
func WithWeakSubjectivityCheckpoint(c *ethpb.Checkpoint) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.WeakSubjectivityCheckpt = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDatabase for head access.
|
||||
func WithDatabase(beaconDB db.HeadAccessDatabase) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.BeaconDB = beaconDB
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithChainStartFetcher to retrieve information about genesis.
|
||||
func WithChainStartFetcher(f powchain.ChainStartFetcher) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.ChainStartFetcher = f
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDepositCache for deposit lifecycle after chain inclusion.
|
||||
func WithDepositCache(c *depositcache.DepositCache) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.DepositCache = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithAttestationPool for attestation lifecycle after chain inclusion.
|
||||
func WithAttestationPool(p attestations.Pool) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.AttPool = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithExitPool for exits lifecycle after chain inclusion.
|
||||
func WithExitPool(p voluntaryexits.PoolManager) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.ExitPool = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSlashingPool for slashings lifecycle after chain inclusion.
|
||||
func WithSlashingPool(p slashings.PoolManager) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.SlashingPool = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithP2PBroadcaster to broadcast messages after appropriate processing.
|
||||
func WithP2PBroadcaster(p p2p.Broadcaster) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.P2p = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithStateNotifier to notify an event feed of state processing.
|
||||
func WithStateNotifier(n statefeed.Notifier) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.StateNotifier = n
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithForkChoiceStore to update an optimized fork-choice representation.
|
||||
func WithForkChoiceStore(f forkchoice.ForkChoicer) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.ForkChoiceStore = f
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithAttestationService for dealing with attestation lifecycles.
|
||||
func WithAttestationService(srv *attestations.Service) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.AttService = srv
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithStateGen for managing state regeneration and replay.
|
||||
func WithStateGen(g *stategen.State) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.StateGen = g
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSlasherAttestationsFeed to forward attestations into slasher if enabled.
|
||||
func WithSlasherAttestationsFeed(f *event.Feed) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.SlasherAttestationsFeed = f
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func withStateBalanceCache(c *stateBalanceCache) Option {
|
||||
return func(s *Service) error {
|
||||
s.justifiedBalances = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithFinalizedStateAtStartUp to store finalized state at start up.
|
||||
func WithFinalizedStateAtStartUp(st state.BeaconState) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.FinalizedStateAtStartUp = st
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -87,7 +87,7 @@ func (s *Service) verifyBeaconBlock(ctx context.Context, data *ethpb.Attestation
|
||||
if (b == nil || b.IsNil()) && s.hasInitSyncBlock(r) {
|
||||
b = s.getInitSyncBlock(r)
|
||||
}
|
||||
if err := helpers.VerifyNilBeaconBlock(b); err != nil {
|
||||
if err := helpers.BeaconBlockIsNil(b); err != nil {
|
||||
return err
|
||||
}
|
||||
if b.Block().Slot() > data.Slot {
|
||||
|
||||
@@ -24,12 +24,12 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, [32]byte{})),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = blockTree1(t, beaconDB, []byte{'g'})
|
||||
@@ -130,12 +130,13 @@ func TestStore_OnAttestation_Ok(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, genesisState.SetGenesisTime(uint64(time.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
|
||||
@@ -155,11 +156,11 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
s, err := util.NewBeaconState()
|
||||
@@ -227,11 +228,11 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
epoch := types.Epoch(1)
|
||||
@@ -264,10 +265,9 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
|
||||
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
opts := testServiceOptsNoDB()
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
@@ -276,10 +276,9 @@ func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
|
||||
|
||||
func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
opts := testServiceOptsNoDB()
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
@@ -288,10 +287,9 @@ func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
|
||||
|
||||
func TestAttEpoch_NotMatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
opts := testServiceOptsNoDB()
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
nowTime := 2 * uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
@@ -301,10 +299,8 @@ func TestAttEpoch_NotMatch(t *testing.T) {
|
||||
|
||||
func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
d := util.HydrateAttestationData(ðpb.AttestationData{})
|
||||
@@ -313,10 +309,9 @@ func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
|
||||
|
||||
func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
@@ -331,10 +326,9 @@ func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
|
||||
|
||||
func TestVerifyBeaconBlock_OK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
@@ -351,8 +345,13 @@ func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
@@ -376,10 +375,9 @@ func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
|
||||
|
||||
func TestVerifyFinalizedConsistency_OK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
@@ -403,10 +401,9 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) {
|
||||
|
||||
func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
|
||||
@@ -88,9 +88,8 @@ var initialSyncBlockCacheSize = uint64(2 * params.BeaconConfig().SlotsPerEpoch)
|
||||
func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlock")
|
||||
defer span.End()
|
||||
|
||||
if signed == nil || signed.IsNil() || signed.Block().IsNil() {
|
||||
return errors.New("nil block")
|
||||
if err := helpers.BeaconBlockIsNil(signed); err != nil {
|
||||
return err
|
||||
}
|
||||
b := signed.Block()
|
||||
|
||||
@@ -152,7 +151,12 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
s.finalizedCheckpt = postState.FinalizedCheckpoint()
|
||||
}
|
||||
|
||||
if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil {
|
||||
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", s.justifiedCheckpt.Root)
|
||||
return errors.Wrap(err, msg)
|
||||
}
|
||||
if err := s.updateHead(ctx, balances); err != nil {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
|
||||
@@ -237,8 +241,8 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
|
||||
if len(blks) == 0 || len(blockRoots) == 0 {
|
||||
return nil, nil, errors.New("no blocks provided")
|
||||
}
|
||||
if blks[0] == nil || blks[0].IsNil() || blks[0].Block().IsNil() {
|
||||
return nil, nil, errors.New("nil block")
|
||||
if err := helpers.BeaconBlockIsNil(blks[0]); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
b := blks[0].Block()
|
||||
|
||||
|
||||
@@ -135,7 +135,24 @@ func (s *Service) verifyBlkFinalizedSlot(b block.BeaconBlock) error {
|
||||
// shouldUpdateCurrentJustified prevents bouncing attack, by only update conflicting justified
|
||||
// checkpoints in the fork choice if in the early slots of the epoch.
|
||||
// Otherwise, delay incorporation of new justified checkpoint until next epoch boundary.
|
||||
// See https://ethresear.ch/t/prevention-of-bouncing-attack-on-ffg/6114 for more detailed analysis and discussion.
|
||||
//
|
||||
// Spec code:
|
||||
// def should_update_justified_checkpoint(store: Store, new_justified_checkpoint: Checkpoint) -> bool:
|
||||
// """
|
||||
// To address the bouncing attack, only update conflicting justified
|
||||
// checkpoints in the fork choice if in the early slots of the epoch.
|
||||
// Otherwise, delay incorporation of new justified checkpoint until next epoch boundary.
|
||||
//
|
||||
// See https://ethresear.ch/t/prevention-of-bouncing-attack-on-ffg/6114 for more detailed analysis and discussion.
|
||||
// """
|
||||
// if compute_slots_since_epoch_start(get_current_slot(store)) < SAFE_SLOTS_TO_UPDATE_JUSTIFIED:
|
||||
// return True
|
||||
//
|
||||
// justified_slot = compute_start_slot_at_epoch(store.justified_checkpoint.epoch)
|
||||
// if not get_ancestor(store, new_justified_checkpoint.root, justified_slot) == store.justified_checkpoint.root:
|
||||
// return False
|
||||
//
|
||||
// return True
|
||||
func (s *Service) shouldUpdateCurrentJustified(ctx context.Context, newJustifiedCheckpt *ethpb.Checkpoint) (bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.shouldUpdateCurrentJustified")
|
||||
defer span.End()
|
||||
@@ -143,51 +160,20 @@ func (s *Service) shouldUpdateCurrentJustified(ctx context.Context, newJustified
|
||||
if slots.SinceEpochStarts(s.CurrentSlot()) < params.BeaconConfig().SafeSlotsToUpdateJustified {
|
||||
return true, nil
|
||||
}
|
||||
var newJustifiedBlockSigned block.SignedBeaconBlock
|
||||
justifiedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(newJustifiedCheckpt.Root))
|
||||
var err error
|
||||
if s.hasInitSyncBlock(justifiedRoot) {
|
||||
newJustifiedBlockSigned = s.getInitSyncBlock(justifiedRoot)
|
||||
} else {
|
||||
newJustifiedBlockSigned, err = s.cfg.BeaconDB.Block(ctx, justifiedRoot)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
if newJustifiedBlockSigned == nil || newJustifiedBlockSigned.IsNil() || newJustifiedBlockSigned.Block().IsNil() {
|
||||
return false, errors.New("nil new justified block")
|
||||
}
|
||||
|
||||
newJustifiedBlock := newJustifiedBlockSigned.Block()
|
||||
jSlot, err := slots.EpochStart(s.justifiedCheckpt.Epoch)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if newJustifiedBlock.Slot() <= jSlot {
|
||||
return false, nil
|
||||
}
|
||||
var justifiedBlockSigned block.SignedBeaconBlock
|
||||
cachedJustifiedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
||||
if s.hasInitSyncBlock(cachedJustifiedRoot) {
|
||||
justifiedBlockSigned = s.getInitSyncBlock(cachedJustifiedRoot)
|
||||
} else {
|
||||
justifiedBlockSigned, err = s.cfg.BeaconDB.Block(ctx, cachedJustifiedRoot)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
if justifiedBlockSigned == nil || justifiedBlockSigned.IsNil() || justifiedBlockSigned.Block().IsNil() {
|
||||
return false, errors.New("nil justified block")
|
||||
}
|
||||
justifiedBlock := justifiedBlockSigned.Block()
|
||||
b, err := s.ancestor(ctx, justifiedRoot[:], justifiedBlock.Slot())
|
||||
justifiedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(newJustifiedCheckpt.Root))
|
||||
b, err := s.ancestor(ctx, justifiedRoot[:], jSlot)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !bytes.Equal(b, s.justifiedCheckpt.Root) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -207,9 +193,6 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
|
||||
if canUpdate {
|
||||
s.prevJustifiedCheckpt = s.justifiedCheckpt
|
||||
s.justifiedCheckpt = cpt
|
||||
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -220,12 +203,13 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
|
||||
// This method does not have defense against fork choice bouncing attack, which is why it's only recommend to be used during initial syncing.
|
||||
func (s *Service) updateJustifiedInitSync(ctx context.Context, cp *ethpb.Checkpoint) error {
|
||||
s.prevJustifiedCheckpt = s.justifiedCheckpt
|
||||
s.justifiedCheckpt = cp
|
||||
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
|
||||
|
||||
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cp); err != nil {
|
||||
return err
|
||||
}
|
||||
s.justifiedCheckpt = cp
|
||||
|
||||
return s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cp)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) error {
|
||||
@@ -344,7 +328,9 @@ func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state state.
|
||||
if !attestation.CheckPointIsEqual(s.justifiedCheckpt, state.CurrentJustifiedCheckpoint()) {
|
||||
if state.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
|
||||
s.justifiedCheckpt = state.CurrentJustifiedCheckpoint()
|
||||
return s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
||||
// we don't need to check if the previous justified checkpoint was an ancestor since the new
|
||||
// finalized checkpoint is overriding it.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update justified if store justified is not in chain with finalized check point.
|
||||
@@ -359,9 +345,6 @@ func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state state.
|
||||
}
|
||||
if !bytes.Equal(anc, s.finalizedCheckpt.Root) {
|
||||
s.justifiedCheckpt = state.CurrentJustifiedCheckpoint()
|
||||
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -35,14 +35,16 @@ import (
|
||||
|
||||
func TestStore_OnBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -133,11 +135,11 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
@@ -184,12 +186,11 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
|
||||
func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
params.UseMinimalConfig()
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.genesisTime = time.Now()
|
||||
|
||||
@@ -218,13 +219,13 @@ func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
|
||||
|
||||
func TestShouldUpdateJustified_ReturnFalse(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
params.UseMinimalConfig()
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
|
||||
lastJustifiedBlk := util.NewBeaconBlock()
|
||||
lastJustifiedBlk.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, 32)
|
||||
lastJustifiedRoot, err := lastJustifiedBlk.Block.HashTreeRoot()
|
||||
@@ -249,11 +250,11 @@ func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{Slot: 1, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
|
||||
@@ -282,11 +283,11 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
@@ -319,8 +320,12 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}
|
||||
service, err := NewService(ctx, cfg)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, [32]byte{})),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
signedBlock := util.NewBeaconBlock()
|
||||
@@ -352,8 +357,11 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: make([]byte, 32)}
|
||||
@@ -390,8 +398,11 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: make([]byte, 32)}
|
||||
@@ -431,8 +442,11 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
// Set finalized epoch to 1.
|
||||
@@ -574,7 +588,8 @@ func TestCurrentSlot_HandlesOverflow(t *testing.T) {
|
||||
}
|
||||
func TestAncestorByDB_CtxErr(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
service, err := NewService(ctx, &Config{})
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
cancel()
|
||||
@@ -586,8 +601,13 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
b1 := util.NewBeaconBlock()
|
||||
@@ -629,8 +649,8 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
|
||||
|
||||
func TestAncestor_CanUseForkchoice(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &Config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
b1 := util.NewBeaconBlock()
|
||||
@@ -668,8 +688,13 @@ func TestAncestor_CanUseDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
b1 := util.NewBeaconBlock()
|
||||
@@ -705,8 +730,8 @@ func TestAncestor_CanUseDB(t *testing.T) {
|
||||
|
||||
func TestEnsureRootNotZeroHashes(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &Config{}
|
||||
service, err := NewService(ctx, cfg)
|
||||
opts := testServiceOptsNoDB()
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.genesisRoot = [32]byte{'a'}
|
||||
|
||||
@@ -719,6 +744,12 @@ func TestEnsureRootNotZeroHashes(t *testing.T) {
|
||||
|
||||
func TestFinalizedImpliesNewJustified(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
ctx := context.Background()
|
||||
type args struct {
|
||||
cachedCheckPoint *ethpb.Checkpoint
|
||||
@@ -760,7 +791,7 @@ func TestFinalizedImpliesNewJustified(t *testing.T) {
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(test.args.stateCheckPoint))
|
||||
service, err := NewService(ctx, &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})})
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.justifiedCheckpt = test.args.cachedCheckPoint
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo(test.want.Root, 32)}))
|
||||
@@ -798,6 +829,12 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
@@ -852,7 +889,7 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
service, err := NewService(ctx, &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})})
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{
|
||||
Root: tt.args.finalizedRoot[:],
|
||||
@@ -867,10 +904,9 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdateJustifiedInitSync(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
gBlk := util.NewBeaconBlock()
|
||||
@@ -897,8 +933,8 @@ func TestUpdateJustifiedInitSync(t *testing.T) {
|
||||
|
||||
func TestHandleEpochBoundary_BadMetrics(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &Config{}
|
||||
service, err := NewService(ctx, cfg)
|
||||
opts := testServiceOptsNoDB()
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
s, err := util.NewBeaconState()
|
||||
@@ -911,8 +947,8 @@ func TestHandleEpochBoundary_BadMetrics(t *testing.T) {
|
||||
|
||||
func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &Config{}
|
||||
service, err := NewService(ctx, cfg)
|
||||
opts := testServiceOptsNoDB()
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
s, _ := util.DeterministicGenesisState(t, 1024)
|
||||
@@ -925,16 +961,17 @@ func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
|
||||
func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
cfg := &Config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
DepositCache: depositCache,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
@@ -969,16 +1006,11 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
|
||||
func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
cfg := &Config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
DepositCache: depositCache,
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
opts = append(opts, WithDepositCache(depositCache))
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
gs, _ := util.DeterministicGenesisState(t, 32)
|
||||
|
||||
@@ -131,7 +131,13 @@ func (s *Service) processAttestationsRoutine(subscribedToStateEvents chan<- stru
|
||||
continue
|
||||
}
|
||||
s.processAttestations(s.ctx)
|
||||
if err := s.updateHead(s.ctx, s.getJustifiedBalances()); err != nil {
|
||||
|
||||
balances, err := s.justifiedBalances.get(s.ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
||||
if err != nil {
|
||||
log.Errorf("Unable to get justified balances for root %v w/ error %s", s.justifiedCheckpt.Root, err)
|
||||
continue
|
||||
}
|
||||
if err := s.updateHead(s.ctx, balances); err != nil {
|
||||
log.Warnf("Resolving fork due to new attestation: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,9 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -42,10 +40,9 @@ func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) {
|
||||
|
||||
func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
@@ -70,10 +67,9 @@ func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
|
||||
|
||||
func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
@@ -99,17 +95,12 @@ func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
|
||||
func TestProcessAttestations_Ok(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
opts = append(opts, WithAttestationPool(attestations.NewPool()))
|
||||
|
||||
cfg := &Config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
AttPool: attestations.NewPool(),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
|
||||
@@ -101,7 +101,10 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []block.SignedBe
|
||||
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), s.finalizedCheckpt)
|
||||
}
|
||||
|
||||
if err := s.VerifyWeakSubjectivityRoot(s.ctx); err != nil {
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.wsVerifier.VerifyWeakSubjectivity(s.ctx, s.finalizedCheckpt.Epoch); err != nil {
|
||||
// log.Fatalf will prevent defer from being called
|
||||
span.End()
|
||||
// Exit run time if the node failed to verify weak subjectivity checkpoint.
|
||||
|
||||
@@ -124,19 +124,15 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
genesisBlockRoot := bytesutil.ToBytes32(nil)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
|
||||
|
||||
cfg := &Config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
genesisBlockRoot,
|
||||
),
|
||||
AttPool: attestations.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
s, err := NewService(ctx, cfg)
|
||||
s, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.saveGenesisData(ctx, genesis))
|
||||
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
@@ -165,19 +161,16 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
genesisBlockRoot := bytesutil.ToBytes32(nil)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
|
||||
cfg := &Config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
genesisBlockRoot,
|
||||
),
|
||||
AttPool: attestations.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
s, err := NewService(ctx, cfg)
|
||||
|
||||
s, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.saveGenesisData(ctx, genesis))
|
||||
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
@@ -248,17 +241,13 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
genesisBlockRoot, err := genesis.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
cfg := &Config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
genesisBlockRoot,
|
||||
),
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
s, err := NewService(ctx, cfg)
|
||||
s, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
err = s.saveGenesisData(ctx, genesis)
|
||||
require.NoError(t, err)
|
||||
@@ -284,7 +273,9 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_HasInitSyncBlock(t *testing.T) {
|
||||
s, err := NewService(context.Background(), &Config{StateNotifier: &blockchainTesting.MockStateNotifier{}})
|
||||
opts := testServiceOptsNoDB()
|
||||
opts = append(opts, WithStateNotifier(&blockchainTesting.MockStateNotifier{}))
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
r := [32]byte{'a'}
|
||||
if s.HasInitSyncBlock(r) {
|
||||
@@ -297,9 +288,9 @@ func TestService_HasInitSyncBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := NewService(context.Background(), &Config{StateGen: stategen.New(beaconDB)})
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
@@ -310,9 +301,9 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := NewService(context.Background(), &Config{StateGen: stategen.New(beaconDB)})
|
||||
opts := testServiceOptsWithDB(t)
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{}
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
@@ -323,9 +314,9 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := NewService(context.Background(), &Config{StateGen: stategen.New(beaconDB)})
|
||||
opts := testServiceOptsWithDB(t)
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{Epoch: 10000000}
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
@@ -45,7 +45,7 @@ const headSyncMinEpochsAfterCheckpoint = 128
|
||||
// Service represents a service that handles the internal
|
||||
// logic of managing the full PoS beacon chain.
|
||||
type Service struct {
|
||||
cfg *Config
|
||||
cfg *config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
genesisTime time.Time
|
||||
@@ -62,13 +62,13 @@ type Service struct {
|
||||
checkpointStateCache *cache.CheckpointStateCache
|
||||
initSyncBlocks map[[32]byte]block.SignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
justifiedBalances []uint64
|
||||
justifiedBalancesLock sync.RWMutex
|
||||
wsVerified bool
|
||||
//justifiedBalances []uint64
|
||||
justifiedBalances *stateBalanceCache
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
}
|
||||
|
||||
// Config options for the service.
|
||||
type Config struct {
|
||||
// config options for the service.
|
||||
type config struct {
|
||||
BeaconBlockBuf int
|
||||
ChainStartFetcher powchain.ChainStartFetcher
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
@@ -84,53 +84,43 @@ type Config struct {
|
||||
StateGen *stategen.State
|
||||
SlasherAttestationsFeed *event.Feed
|
||||
WeakSubjectivityCheckpt *ethpb.Checkpoint
|
||||
FinalizedStateAtStartUp state.BeaconState
|
||||
}
|
||||
|
||||
// NewService instantiates a new block service instance that will
|
||||
// be registered into a running beacon node.
|
||||
func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &Service{
|
||||
cfg: cfg,
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
boundaryRoots: [][32]byte{},
|
||||
checkpointStateCache: cache.NewCheckpointStateCache(),
|
||||
initSyncBlocks: make(map[[32]byte]block.SignedBeaconBlock),
|
||||
justifiedBalances: make([]uint64, 0),
|
||||
}, nil
|
||||
cfg: &config{},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if err := opt(srv); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var err error
|
||||
if srv.justifiedBalances == nil {
|
||||
srv.justifiedBalances, err = newStateBalanceCache(srv.cfg.StateGen)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
srv.wsVerifier, err = NewWeakSubjectivityVerifier(srv.cfg.WeakSubjectivityCheckpt, srv.cfg.BeaconDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return srv, nil
|
||||
}
|
||||
|
||||
// Start a blockchain service's main event loop.
|
||||
func (s *Service) Start() {
|
||||
// For running initial sync with state cache, in an event of restart, we use
|
||||
// last finalized check point as start point to sync instead of head
|
||||
// state. This is because we no longer save state every slot during sync.
|
||||
cp, err := s.cfg.BeaconDB.FinalizedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch finalized cp: %v", err)
|
||||
}
|
||||
|
||||
r := bytesutil.ToBytes32(cp.Root)
|
||||
// Before the first finalized epoch, in the current epoch,
|
||||
// the finalized root is defined as zero hashes instead of genesis root hash.
|
||||
// We want to use genesis root to retrieve for state.
|
||||
if r == params.BeaconConfig().ZeroHash {
|
||||
genesisBlock, err := s.cfg.BeaconDB.GenesisBlock(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch finalized cp: %v", err)
|
||||
}
|
||||
if genesisBlock != nil && !genesisBlock.IsNil() {
|
||||
r, err = genesisBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
log.Fatalf("Could not tree hash genesis block: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
beaconState, err := s.cfg.StateGen.StateByRoot(s.ctx, r)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch beacon state by root: %v", err)
|
||||
}
|
||||
beaconState := s.cfg.FinalizedStateAtStartUp
|
||||
|
||||
// Make sure that attestation processor is subscribed and ready for state initializing event.
|
||||
attestationProcessorSubscribed := make(chan struct{}, 1)
|
||||
@@ -166,9 +156,6 @@ func (s *Service) Start() {
|
||||
|
||||
// Resume fork choice.
|
||||
s.justifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint)
|
||||
if err := s.cacheJustifiedStateBalances(s.ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(s.justifiedCheckpt.Root))); err != nil {
|
||||
log.Fatalf("Could not cache justified state balances: %v", err)
|
||||
}
|
||||
s.prevJustifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint)
|
||||
s.bestJustifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint)
|
||||
s.finalizedCheckpt = ethpb.CopyCheckpoint(finalizedCheckpoint)
|
||||
@@ -190,9 +177,11 @@ func (s *Service) Start() {
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.VerifyWeakSubjectivityRoot(s.ctx); err != nil {
|
||||
// not attempting to save initial sync blocks here, because there shouldn't be until
|
||||
// after the statefeed.Initialized event is fired (below)
|
||||
if err := s.wsVerifier.VerifyWeakSubjectivity(s.ctx, s.finalizedCheckpt.Epoch); err != nil {
|
||||
// Exit run time if the node failed to verify weak subjectivity checkpoint.
|
||||
log.Fatalf("Could not verify weak subjectivity checkpoint: %v", err)
|
||||
log.Fatalf("could not verify initial checkpoint provided for chain sync, with err=: %v", err)
|
||||
}
|
||||
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
@@ -353,9 +342,6 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
genesisCheckpoint := genesisState.FinalizedCheckpoint()
|
||||
|
||||
s.justifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
|
||||
if err := s.cacheJustifiedStateBalances(ctx, genesisBlkRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
s.prevJustifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
|
||||
s.bestJustifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
|
||||
s.finalizedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
|
||||
@@ -381,8 +367,8 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get genesis block from db")
|
||||
}
|
||||
if genesisBlock == nil || genesisBlock.IsNil() {
|
||||
return errors.New("no genesis block in db")
|
||||
if err := helpers.BeaconBlockIsNil(genesisBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
genesisBlkRoot, err := genesisBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
@@ -402,7 +388,7 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
|
||||
finalizedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
|
||||
var finalizedState state.BeaconState
|
||||
|
||||
finalizedState, err = s.cfg.StateGen.Resume(ctx)
|
||||
finalizedState, err = s.cfg.StateGen.Resume(ctx, s.cfg.FinalizedStateAtStartUp)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized state from db")
|
||||
}
|
||||
@@ -424,7 +410,7 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not hash head block")
|
||||
}
|
||||
finalizedState, err := s.cfg.StateGen.Resume(ctx)
|
||||
finalizedState, err := s.cfg.StateGen.Resume(ctx, s.cfg.FinalizedStateAtStartUp)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized state from db")
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ func init() {
|
||||
func TestChainService_SaveHead_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &Config{BeaconDB: beaconDB},
|
||||
cfg: &config{BeaconDB: beaconDB},
|
||||
}
|
||||
go func() {
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
|
||||
@@ -94,11 +94,12 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
DepositContainers: []*ethpb.DepositContainer{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
web3Service, err = powchain.NewService(ctx, &powchain.Web3ServiceConfig{
|
||||
BeaconDB: beaconDB,
|
||||
HttpEndpoints: []string{endpoint},
|
||||
DepositContract: common.Address{},
|
||||
})
|
||||
web3Service, err = powchain.NewService(
|
||||
ctx,
|
||||
powchain.WithDatabase(beaconDB),
|
||||
powchain.WithHttpEndpoints([]string{endpoint}),
|
||||
powchain.WithDepositContractAddress(common.Address{}),
|
||||
)
|
||||
require.NoError(t, err, "Unable to set up web3 service")
|
||||
|
||||
attService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
|
||||
@@ -107,23 +108,23 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &Config{
|
||||
BeaconBlockBuf: 0,
|
||||
BeaconDB: beaconDB,
|
||||
DepositCache: depositCache,
|
||||
ChainStartFetcher: web3Service,
|
||||
P2p: &mockBroadcaster{},
|
||||
StateNotifier: &mockBeaconNode{},
|
||||
AttPool: attestations.NewPool(),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, params.BeaconConfig().ZeroHash),
|
||||
AttService: attService,
|
||||
stateGen := stategen.New(beaconDB)
|
||||
// Safe a state in stategen to purposes of testing a service stop / shutdown.
|
||||
require.NoError(t, stateGen.SaveState(ctx, bytesutil.ToBytes32(bState.FinalizedCheckpoint().Root), bState))
|
||||
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithDepositCache(depositCache),
|
||||
WithChainStartFetcher(web3Service),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithP2PBroadcaster(&mockBroadcaster{}),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, params.BeaconConfig().ZeroHash)),
|
||||
WithAttestationService(attService),
|
||||
WithStateGen(stateGen),
|
||||
}
|
||||
|
||||
// Safe a state in stategen to purposes of testing a service stop / shutdown.
|
||||
require.NoError(t, cfg.StateGen.SaveState(ctx, bytesutil.ToBytes32(bState.FinalizedCheckpoint().Root), bState))
|
||||
|
||||
chainService, err := NewService(ctx, cfg)
|
||||
chainService, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err, "Unable to setup chain service")
|
||||
chainService.genesisTime = time.Unix(1, 0) // non-zero time
|
||||
|
||||
@@ -149,7 +150,7 @@ func TestChainStartStop_Initialized(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{Root: blkRoot[:]}))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: blkRoot[:]}))
|
||||
|
||||
chainService.cfg.FinalizedStateAtStartUp = s
|
||||
// Test the start function.
|
||||
chainService.Start()
|
||||
|
||||
@@ -176,7 +177,7 @@ func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}))
|
||||
|
||||
chainService.cfg.FinalizedStateAtStartUp = s
|
||||
// Test the start function.
|
||||
chainService.Start()
|
||||
|
||||
@@ -247,7 +248,7 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: blkRoot[:]}))
|
||||
|
||||
chainService.cfg.FinalizedStateAtStartUp = s
|
||||
// Test the start function.
|
||||
chainService.Start()
|
||||
|
||||
@@ -282,7 +283,8 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(headBlock)))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
c := &Service{cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}}
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}}
|
||||
c.cfg.FinalizedStateAtStartUp = headState
|
||||
require.NoError(t, c.initializeChainInfo(ctx))
|
||||
headBlk, err := c.HeadBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -322,7 +324,7 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(headBlock)))
|
||||
c := &Service{cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}}
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}}
|
||||
require.NoError(t, c.initializeChainInfo(ctx))
|
||||
s, err := c.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -379,8 +381,8 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
|
||||
Root: finalizedRoot[:],
|
||||
}))
|
||||
|
||||
c := &Service{cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}}
|
||||
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}}
|
||||
c.cfg.FinalizedStateAtStartUp = headState
|
||||
require.NoError(t, c.initializeChainInfo(ctx))
|
||||
s, err := c.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -417,7 +419,7 @@ func TestChainService_SaveHeadNoDB(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
s := &Service{
|
||||
cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
}
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 1
|
||||
@@ -439,7 +441,7 @@ func TestHasBlock_ForkChoiceAndDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &Config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
|
||||
cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
|
||||
finalizedCheckpt: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
}
|
||||
block := util.NewBeaconBlock()
|
||||
@@ -457,7 +459,7 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
initSyncBlocks: make(map[[32]byte]block.SignedBeaconBlock),
|
||||
@@ -488,7 +490,7 @@ func BenchmarkHasBlockDB(b *testing.B) {
|
||||
beaconDB := testDB.SetupDB(b)
|
||||
ctx := context.Background()
|
||||
s := &Service{
|
||||
cfg: &Config{BeaconDB: beaconDB},
|
||||
cfg: &config{BeaconDB: beaconDB},
|
||||
}
|
||||
block := util.NewBeaconBlock()
|
||||
require.NoError(b, s.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block)))
|
||||
@@ -505,7 +507,7 @@ func BenchmarkHasBlockForkChoiceStore(b *testing.B) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(b)
|
||||
s := &Service{
|
||||
cfg: &Config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
|
||||
cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
|
||||
finalizedCheckpt: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
}
|
||||
block := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}}
|
||||
|
||||
82
beacon-chain/blockchain/state_balance_cache.go
Normal file
82
beacon-chain/blockchain/state_balance_cache.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
)
|
||||
|
||||
var errNilStateFromStategen = errors.New("justified state can't be nil")
|
||||
|
||||
type stateBalanceCache struct {
|
||||
sync.Mutex
|
||||
balances []uint64
|
||||
root [32]byte
|
||||
stateGen stateByRooter
|
||||
}
|
||||
|
||||
type stateByRooter interface {
|
||||
StateByRoot(context.Context, [32]byte) (state.BeaconState, error)
|
||||
}
|
||||
|
||||
// newStateBalanceCache exists to remind us that stateBalanceCache needs a stagegen
|
||||
// to avoid nil pointer bugs when updating the cache in the read path (get())
|
||||
func newStateBalanceCache(sg *stategen.State) (*stateBalanceCache, error) {
|
||||
if sg == nil {
|
||||
return nil, errors.New("Can't initialize state balance cache without stategen")
|
||||
}
|
||||
return &stateBalanceCache{stateGen: sg}, nil
|
||||
}
|
||||
|
||||
// update is called by get() when the requested root doesn't match
|
||||
// the previously read value. This cache assumes we only want to cache one
|
||||
// set of balances for a single root (the current justified root).
|
||||
//
|
||||
// warning: this is not thread-safe on its own, relies on get() for locking
|
||||
func (c *stateBalanceCache) update(ctx context.Context, justifiedRoot [32]byte) ([]uint64, error) {
|
||||
stateBalanceCacheMiss.Inc()
|
||||
justifiedState, err := c.stateGen.StateByRoot(ctx, justifiedRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if justifiedState == nil || justifiedState.IsNil() {
|
||||
return nil, errNilStateFromStategen
|
||||
}
|
||||
epoch := time.CurrentEpoch(justifiedState)
|
||||
|
||||
justifiedBalances := make([]uint64, justifiedState.NumValidators())
|
||||
var balanceAccumulator = func(idx int, val state.ReadOnlyValidator) error {
|
||||
if helpers.IsActiveValidatorUsingTrie(val, epoch) {
|
||||
justifiedBalances[idx] = val.EffectiveBalance()
|
||||
} else {
|
||||
justifiedBalances[idx] = 0
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err := justifiedState.ReadFromEveryValidator(balanceAccumulator); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.balances = justifiedBalances
|
||||
c.root = justifiedRoot
|
||||
return c.balances, nil
|
||||
}
|
||||
|
||||
// getBalances takes an explicit justifiedRoot so it can invalidate the singleton cache key
|
||||
// when the justified root changes, and takes a context so that the long-running stategen
|
||||
// read path can connect to the upstream cancellation/timeout chain.
|
||||
func (c *stateBalanceCache) get(ctx context.Context, justifiedRoot [32]byte) ([]uint64, error) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
if justifiedRoot == c.root {
|
||||
stateBalanceCacheHit.Inc()
|
||||
return c.balances, nil
|
||||
}
|
||||
|
||||
return c.update(ctx, justifiedRoot)
|
||||
}
|
||||
225
beacon-chain/blockchain/state_balance_cache_test.go
Normal file
225
beacon-chain/blockchain/state_balance_cache_test.go
Normal file
@@ -0,0 +1,225 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
type mockStateByRoot struct {
|
||||
state state.BeaconState
|
||||
err error
|
||||
}
|
||||
|
||||
func (m *mockStateByRoot) StateByRoot(context.Context, [32]byte) (state.BeaconState, error) {
|
||||
return m.state, m.err
|
||||
}
|
||||
|
||||
type testStateOpt func(*ethpb.BeaconStateAltair)
|
||||
|
||||
func testStateWithValidators(v []*ethpb.Validator) testStateOpt {
|
||||
return func(a *ethpb.BeaconStateAltair) {
|
||||
a.Validators = v
|
||||
}
|
||||
}
|
||||
|
||||
func testStateWithSlot(slot types.Slot) testStateOpt {
|
||||
return func(a *ethpb.BeaconStateAltair) {
|
||||
a.Slot = slot
|
||||
}
|
||||
}
|
||||
|
||||
func testStateFixture(opts ...testStateOpt) state.BeaconState {
|
||||
a := ðpb.BeaconStateAltair{}
|
||||
for _, o := range opts {
|
||||
o(a)
|
||||
}
|
||||
s, _ := v2.InitializeFromProtoUnsafe(a)
|
||||
return s
|
||||
}
|
||||
|
||||
func generateTestValidators(count int, opts ...func(*ethpb.Validator)) []*ethpb.Validator {
|
||||
vs := make([]*ethpb.Validator, count)
|
||||
var i uint32 = 0
|
||||
for ; i < uint32(count); i++ {
|
||||
pk := make([]byte, 48)
|
||||
binary.LittleEndian.PutUint32(pk, i)
|
||||
v := ðpb.Validator{PublicKey: pk}
|
||||
for _, o := range opts {
|
||||
o(v)
|
||||
}
|
||||
vs[i] = v
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
func oddValidatorsExpired(currentSlot types.Slot) func(*ethpb.Validator) {
|
||||
return func(v *ethpb.Validator) {
|
||||
pki := binary.LittleEndian.Uint64(v.PublicKey)
|
||||
if pki%2 == 0 {
|
||||
v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
|
||||
} else {
|
||||
v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) - 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func oddValidatorsQueued(currentSlot types.Slot) func(*ethpb.Validator) {
|
||||
return func(v *ethpb.Validator) {
|
||||
v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
|
||||
pki := binary.LittleEndian.Uint64(v.PublicKey)
|
||||
if pki%2 == 0 {
|
||||
v.ActivationEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) - 1)
|
||||
} else {
|
||||
v.ActivationEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func allValidatorsValid(currentSlot types.Slot) func(*ethpb.Validator) {
|
||||
return func(v *ethpb.Validator) {
|
||||
v.ActivationEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) - 1)
|
||||
v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
|
||||
}
|
||||
}
|
||||
|
||||
func balanceIsKeyTimes2(v *ethpb.Validator) {
|
||||
pki := binary.LittleEndian.Uint64(v.PublicKey)
|
||||
v.EffectiveBalance = uint64(pki) * 2
|
||||
}
|
||||
|
||||
func testHalfExpiredValidators() ([]*ethpb.Validator, []uint64) {
|
||||
balances := []uint64{0, 0, 4, 0, 8, 0, 12, 0, 16, 0}
|
||||
return generateTestValidators(10,
|
||||
oddValidatorsExpired(types.Slot(99)),
|
||||
balanceIsKeyTimes2), balances
|
||||
}
|
||||
|
||||
func testHalfQueuedValidators() ([]*ethpb.Validator, []uint64) {
|
||||
balances := []uint64{0, 0, 4, 0, 8, 0, 12, 0, 16, 0}
|
||||
return generateTestValidators(10,
|
||||
oddValidatorsQueued(types.Slot(99)),
|
||||
balanceIsKeyTimes2), balances
|
||||
}
|
||||
|
||||
func testAllValidValidators() ([]*ethpb.Validator, []uint64) {
|
||||
balances := []uint64{0, 2, 4, 6, 8, 10, 12, 14, 16, 18}
|
||||
return generateTestValidators(10,
|
||||
allValidatorsValid(types.Slot(99)),
|
||||
balanceIsKeyTimes2), balances
|
||||
}
|
||||
|
||||
func TestStateBalanceCache(t *testing.T) {
|
||||
type sbcTestCase struct {
|
||||
err error
|
||||
root [32]byte
|
||||
sbc *stateBalanceCache
|
||||
balances []uint64
|
||||
name string
|
||||
}
|
||||
sentinelCacheMiss := errors.New("Cache missed, as expected!")
|
||||
sentinelBalances := []uint64{1, 2, 3, 4, 5}
|
||||
halfExpiredValidators, halfExpiredBalances := testHalfExpiredValidators()
|
||||
halfQueuedValidators, halfQueuedBalances := testHalfQueuedValidators()
|
||||
allValidValidators, allValidBalances := testAllValidValidators()
|
||||
cases := []sbcTestCase{
|
||||
{
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
balances: sentinelBalances,
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{
|
||||
err: sentinelCacheMiss,
|
||||
},
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
balances: sentinelBalances,
|
||||
},
|
||||
name: "cache hit",
|
||||
},
|
||||
// this works by using a staterooter that returns a known error
|
||||
// so really we're testing the miss by making sure stategen got called
|
||||
// this also tells us stategen errors are propagated
|
||||
{
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{
|
||||
//state: generateTestValidators(1, testWithBadEpoch),
|
||||
err: sentinelCacheMiss,
|
||||
},
|
||||
root: bytesutil.ToBytes32([]byte{'B'}),
|
||||
},
|
||||
err: sentinelCacheMiss,
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
name: "cache miss",
|
||||
},
|
||||
{
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{},
|
||||
root: bytesutil.ToBytes32([]byte{'B'}),
|
||||
},
|
||||
err: errNilStateFromStategen,
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
name: "error for nil state upon cache miss",
|
||||
},
|
||||
{
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{
|
||||
state: testStateFixture(
|
||||
testStateWithSlot(99),
|
||||
testStateWithValidators(halfExpiredValidators)),
|
||||
},
|
||||
},
|
||||
balances: halfExpiredBalances,
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
name: "test filtering by exit epoch",
|
||||
},
|
||||
{
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{
|
||||
state: testStateFixture(
|
||||
testStateWithSlot(99),
|
||||
testStateWithValidators(halfQueuedValidators)),
|
||||
},
|
||||
},
|
||||
balances: halfQueuedBalances,
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
name: "test filtering by activation epoch",
|
||||
},
|
||||
{
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{
|
||||
state: testStateFixture(
|
||||
testStateWithSlot(99),
|
||||
testStateWithValidators(allValidValidators)),
|
||||
},
|
||||
},
|
||||
balances: allValidBalances,
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
name: "happy path",
|
||||
},
|
||||
}
|
||||
ctx := context.Background()
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
cache := c.sbc
|
||||
cacheRootStart := cache.root
|
||||
b, err := cache.get(ctx, c.root)
|
||||
require.ErrorIs(t, err, c.err)
|
||||
require.DeepEqual(t, c.balances, b)
|
||||
if c.err != nil {
|
||||
// if there was an error somewhere, the root should not have changed (unless it already matched)
|
||||
require.Equal(t, cacheRootStart, cache.root)
|
||||
} else {
|
||||
// when successful, the cache should always end with a root matching the request
|
||||
require.Equal(t, c.root, cache.root)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -4,57 +4,94 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
// VerifyWeakSubjectivityRoot verifies the weak subjectivity root in the service struct.
|
||||
var errWSBlockNotFound = errors.New("weak subjectivity root not found in db")
|
||||
var errWSBlockNotFoundInEpoch = errors.New("weak subjectivity root not found in db within epoch")
|
||||
|
||||
type weakSubjectivityDB interface {
|
||||
HasBlock(ctx context.Context, blockRoot [32]byte) bool
|
||||
BlockRoots(ctx context.Context, f *filters.QueryFilter) ([][32]byte, error)
|
||||
}
|
||||
|
||||
type WeakSubjectivityVerifier struct {
|
||||
enabled bool
|
||||
verified bool
|
||||
root [32]byte
|
||||
epoch types.Epoch
|
||||
slot types.Slot
|
||||
db weakSubjectivityDB
|
||||
}
|
||||
|
||||
// NewWeakSubjectivityVerifier validates a checkpoint, and if valid, uses it to initialize a weak subjectivity verifier
|
||||
func NewWeakSubjectivityVerifier(wsc *ethpb.Checkpoint, db weakSubjectivityDB) (*WeakSubjectivityVerifier, error) {
|
||||
// TODO(7342): Weak subjectivity checks are currently optional. When we require the flag to be specified
|
||||
// per 7342, a nil checkpoint, zero-root or zero-epoch should all fail validation
|
||||
// and return an error instead of creating a WeakSubjectivityVerifier that permits any chain history.
|
||||
if wsc == nil || len(wsc.Root) == 0 || wsc.Epoch == 0 {
|
||||
return &WeakSubjectivityVerifier{
|
||||
enabled: false,
|
||||
}, nil
|
||||
}
|
||||
startSlot, err := slots.EpochStart(wsc.Epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &WeakSubjectivityVerifier{
|
||||
enabled: true,
|
||||
verified: false,
|
||||
root: bytesutil.ToBytes32(wsc.Root),
|
||||
epoch: wsc.Epoch,
|
||||
db: db,
|
||||
slot: startSlot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// VerifyWeakSubjectivity verifies the weak subjectivity root in the service struct.
|
||||
// Reference design: https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/weak-subjectivity.md#weak-subjectivity-sync-procedure
|
||||
func (s *Service) VerifyWeakSubjectivityRoot(ctx context.Context) error {
|
||||
// TODO(7342): Remove the following to fully use weak subjectivity in production.
|
||||
if s.cfg.WeakSubjectivityCheckpt == nil || len(s.cfg.WeakSubjectivityCheckpt.Root) == 0 || s.cfg.WeakSubjectivityCheckpt.Epoch == 0 {
|
||||
func (v *WeakSubjectivityVerifier) VerifyWeakSubjectivity(ctx context.Context, finalizedEpoch types.Epoch) error {
|
||||
if v.verified || !v.enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do nothing if the weak subjectivity has previously been verified,
|
||||
// or weak subjectivity epoch is higher than last finalized epoch.
|
||||
if s.wsVerified {
|
||||
return nil
|
||||
}
|
||||
if s.cfg.WeakSubjectivityCheckpt.Epoch > s.finalizedCheckpt.Epoch {
|
||||
// Two conditions are described in the specs:
|
||||
// IF epoch_number > store.finalized_checkpoint.epoch,
|
||||
// then ASSERT during block sync that block with root block_root
|
||||
// is in the sync path at epoch epoch_number. Emit descriptive critical error if this assert fails,
|
||||
// then exit client process.
|
||||
// we do not handle this case ^, because we can only blocks that have been processed / are currently
|
||||
// in line for finalization, we don't have the ability to look ahead. so we only satisfy the following:
|
||||
// IF epoch_number <= store.finalized_checkpoint.epoch,
|
||||
// then ASSERT that the block in the canonical chain at epoch epoch_number has root block_root.
|
||||
// Emit descriptive critical error if this assert fails, then exit client process.
|
||||
if v.epoch > finalizedEpoch {
|
||||
return nil
|
||||
}
|
||||
log.Infof("Performing weak subjectivity check for root %#x in epoch %d", v.root, v.epoch)
|
||||
|
||||
r := bytesutil.ToBytes32(s.cfg.WeakSubjectivityCheckpt.Root)
|
||||
log.Infof("Performing weak subjectivity check for root %#x in epoch %d", r, s.cfg.WeakSubjectivityCheckpt.Epoch)
|
||||
// Save initial sync cached blocks to DB.
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
// A node should have the weak subjectivity block in the DB.
|
||||
if !s.cfg.BeaconDB.HasBlock(ctx, r) {
|
||||
return fmt.Errorf("node does not have root in DB: %#x", r)
|
||||
}
|
||||
|
||||
startSlot, err := slots.EpochStart(s.cfg.WeakSubjectivityCheckpt.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
if !v.db.HasBlock(ctx, v.root) {
|
||||
return errors.Wrap(errWSBlockNotFound, fmt.Sprintf("missing root %#x", v.root))
|
||||
}
|
||||
filter := filters.NewFilter().SetStartSlot(v.slot).SetEndSlot(v.slot + params.BeaconConfig().SlotsPerEpoch)
|
||||
// A node should have the weak subjectivity block corresponds to the correct epoch in the DB.
|
||||
filter := filters.NewFilter().SetStartSlot(startSlot).SetEndSlot(startSlot + params.BeaconConfig().SlotsPerEpoch)
|
||||
roots, err := s.cfg.BeaconDB.BlockRoots(ctx, filter)
|
||||
roots, err := v.db.BlockRoots(ctx, filter)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "error while retrieving block roots to verify weak subjectivity")
|
||||
}
|
||||
for _, root := range roots {
|
||||
if r == root {
|
||||
if v.root == root {
|
||||
log.Info("Weak subjectivity check has passed")
|
||||
s.wsVerified = true
|
||||
v.verified = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("node does not have root in db corresponding to epoch: %#x %d", r, s.cfg.WeakSubjectivityCheckpt.Epoch)
|
||||
return errors.Wrap(errWSBlockNotFoundInEpoch, fmt.Sprintf("root=%#x, epoch=%d", v.root, v.epoch))
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
@@ -23,59 +24,57 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
tests := []struct {
|
||||
wsVerified bool
|
||||
wantErr bool
|
||||
wantErr error
|
||||
checkpt *ethpb.Checkpoint
|
||||
finalizedEpoch types.Epoch
|
||||
errString string
|
||||
name string
|
||||
}{
|
||||
{
|
||||
name: "nil root and epoch",
|
||||
wantErr: false,
|
||||
name: "nil root and epoch",
|
||||
},
|
||||
{
|
||||
name: "already verified",
|
||||
checkpt: ðpb.Checkpoint{Epoch: 2},
|
||||
finalizedEpoch: 2,
|
||||
wsVerified: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "not yet to verify, ws epoch higher than finalized epoch",
|
||||
checkpt: ðpb.Checkpoint{Epoch: 2},
|
||||
finalizedEpoch: 1,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "can't find the block in DB",
|
||||
checkpt: ðpb.Checkpoint{Root: bytesutil.PadTo([]byte{'a'}, 32), Epoch: 1},
|
||||
finalizedEpoch: 3,
|
||||
wantErr: true,
|
||||
errString: "node does not have root in DB",
|
||||
wantErr: errWSBlockNotFound,
|
||||
},
|
||||
{
|
||||
name: "can't find the block corresponds to ws epoch in DB",
|
||||
checkpt: ðpb.Checkpoint{Root: r[:], Epoch: 2}, // Root belongs in epoch 1.
|
||||
finalizedEpoch: 3,
|
||||
wantErr: true,
|
||||
errString: "node does not have root in db corresponding to epoch",
|
||||
wantErr: errWSBlockNotFoundInEpoch,
|
||||
},
|
||||
{
|
||||
name: "can verify and pass",
|
||||
checkpt: ðpb.Checkpoint{Root: r[:], Epoch: 1},
|
||||
finalizedEpoch: 3,
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
wv, err := NewWeakSubjectivityVerifier(tt.checkpt, beaconDB)
|
||||
require.NoError(t, err)
|
||||
s := &Service{
|
||||
cfg: &Config{BeaconDB: beaconDB, WeakSubjectivityCheckpt: tt.checkpt},
|
||||
wsVerified: tt.wsVerified,
|
||||
cfg: &config{BeaconDB: beaconDB, WeakSubjectivityCheckpt: tt.checkpt},
|
||||
finalizedCheckpt: ðpb.Checkpoint{Epoch: tt.finalizedEpoch},
|
||||
wsVerifier: wv,
|
||||
}
|
||||
if err := s.VerifyWeakSubjectivityRoot(context.Background()); (err != nil) != tt.wantErr {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
err = s.wsVerifier.VerifyWeakSubjectivity(context.Background(), s.finalizedCheckpt.Epoch)
|
||||
if tt.wantErr == nil {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Equal(t, true, errors.Is(err, tt.wantErr))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
package depositcache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"math/big"
|
||||
@@ -54,6 +53,7 @@ type DepositCache struct {
|
||||
pendingDeposits []*dbpb.DepositContainer
|
||||
deposits []*dbpb.DepositContainer
|
||||
finalizedDeposits *FinalizedDeposits
|
||||
depositsByKey map[[48]byte][]*dbpb.DepositContainer
|
||||
depositsLock sync.RWMutex
|
||||
}
|
||||
|
||||
@@ -69,6 +69,7 @@ func New() (*DepositCache, error) {
|
||||
return &DepositCache{
|
||||
pendingDeposits: []*dbpb.DepositContainer{},
|
||||
deposits: []*dbpb.DepositContainer{},
|
||||
depositsByKey: map[[48]byte][]*ethpb.DepositContainer{},
|
||||
finalizedDeposits: &FinalizedDeposits{Deposits: finalizedDepositsTrie, MerkleTrieIndex: -1},
|
||||
}, nil
|
||||
}
|
||||
@@ -95,10 +96,15 @@ func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blo
|
||||
}
|
||||
// Keep the slice sorted on insertion in order to avoid costly sorting on retrieval.
|
||||
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Index >= index })
|
||||
depCtr := &dbpb.DepositContainer{Deposit: d, Eth1BlockHeight: blockNum, DepositRoot: depositRoot[:], Index: index}
|
||||
newDeposits := append(
|
||||
[]*dbpb.DepositContainer{{Deposit: d, Eth1BlockHeight: blockNum, DepositRoot: depositRoot[:], Index: index}},
|
||||
[]*dbpb.DepositContainer{depCtr},
|
||||
dc.deposits[heightIdx:]...)
|
||||
dc.deposits = append(dc.deposits[:heightIdx], newDeposits...)
|
||||
// Append the deposit to our map, in the event no deposits
|
||||
// exist for the pubkey , it is simply added to the map.
|
||||
pubkey := bytesutil.ToBytes48(d.Data.PublicKey)
|
||||
dc.depositsByKey[pubkey] = append(dc.depositsByKey[pubkey], depCtr)
|
||||
historicalDepositsCount.Inc()
|
||||
return nil
|
||||
}
|
||||
@@ -112,6 +118,13 @@ func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*dbp
|
||||
|
||||
sort.SliceStable(ctrs, func(i int, j int) bool { return ctrs[i].Index < ctrs[j].Index })
|
||||
dc.deposits = ctrs
|
||||
for _, c := range ctrs {
|
||||
// Use a new value, as the reference
|
||||
// of c changes in the next iteration.
|
||||
newPtr := c
|
||||
pKey := bytesutil.ToBytes48(newPtr.Deposit.Data.PublicKey)
|
||||
dc.depositsByKey[pKey] = append(dc.depositsByKey[pKey], newPtr)
|
||||
}
|
||||
historicalDepositsCount.Add(float64(len(ctrs)))
|
||||
}
|
||||
|
||||
@@ -202,13 +215,15 @@ func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*et
|
||||
|
||||
var deposit *ethpb.Deposit
|
||||
var blockNum *big.Int
|
||||
for _, ctnr := range dc.deposits {
|
||||
if bytes.Equal(ctnr.Deposit.Data.PublicKey, pubKey) {
|
||||
deposit = ctnr.Deposit
|
||||
blockNum = big.NewInt(int64(ctnr.Eth1BlockHeight))
|
||||
break
|
||||
}
|
||||
deps, ok := dc.depositsByKey[bytesutil.ToBytes48(pubKey)]
|
||||
if !ok || len(deps) == 0 {
|
||||
return deposit, blockNum
|
||||
}
|
||||
// We always return the first deposit if a particular
|
||||
// validator key has multiple deposits assigned to
|
||||
// it.
|
||||
deposit = deps[0].Deposit
|
||||
blockNum = big.NewInt(int64(deps[0].Eth1BlockHeight))
|
||||
return deposit, blockNum
|
||||
}
|
||||
|
||||
|
||||
@@ -44,31 +44,31 @@ func TestInsertDeposit_MaintainsSortedOrderByIndex(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
deposit: ðpb.Deposit{Data: &dbpb.Deposit_Data{PublicKey: []byte{'A'}}},
|
||||
index: 0,
|
||||
expectedErr: "",
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
deposit: ðpb.Deposit{Data: &dbpb.Deposit_Data{PublicKey: []byte{'B'}}},
|
||||
index: 3,
|
||||
expectedErr: "wanted deposit with index 1 to be inserted but received 3",
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
deposit: ðpb.Deposit{Data: &dbpb.Deposit_Data{PublicKey: []byte{'C'}}},
|
||||
index: 1,
|
||||
expectedErr: "",
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
deposit: ðpb.Deposit{Data: &dbpb.Deposit_Data{PublicKey: []byte{'D'}}},
|
||||
index: 4,
|
||||
expectedErr: "wanted deposit with index 2 to be inserted but received 4",
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
deposit: ðpb.Deposit{Data: &dbpb.Deposit_Data{PublicKey: []byte{'E'}}},
|
||||
index: 2,
|
||||
expectedErr: "",
|
||||
},
|
||||
@@ -316,8 +316,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
|
||||
func TestDepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
ctrs := []*dbpb.DepositContainer{
|
||||
{
|
||||
Eth1BlockHeight: 9,
|
||||
Deposit: ðpb.Deposit{
|
||||
@@ -359,6 +358,7 @@ func TestDepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
dc.InsertDepositContainers(context.Background(), ctrs)
|
||||
|
||||
pk1 := bytesutil.PadTo([]byte("pk1"), 48)
|
||||
dep, blkNum := dc.DepositByPubkey(context.Background(), pk1)
|
||||
@@ -626,24 +626,28 @@ func TestPruneProofs_Ok(t *testing.T) {
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 0,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 1,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 2,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 3,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -669,24 +673,26 @@ func TestPruneProofs_SomeAlreadyPruned(t *testing.T) {
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: nil},
|
||||
index: 0,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: nil, Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: nil, Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}}, index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: nil},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(), Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 3,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -709,24 +715,28 @@ func TestPruneProofs_PruneAllWhenDepositIndexTooBig(t *testing.T) {
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 0,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 1,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 2,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 3,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -752,24 +762,28 @@ func TestPruneProofs_CorrectlyHandleLastIndex(t *testing.T) {
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 0,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 1,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 2,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 3,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -785,6 +799,36 @@ func TestPruneProofs_CorrectlyHandleLastIndex(t *testing.T) {
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestDepositMap_WorksCorrectly(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
pk0 := bytesutil.PadTo([]byte("pk0"), 48)
|
||||
dep, _ := dc.DepositByPubkey(context.Background(), pk0)
|
||||
var nilDep *ethpb.Deposit
|
||||
assert.DeepEqual(t, nilDep, dep)
|
||||
|
||||
dep = ðpb.Deposit{Proof: makeDepositProof(), Data: ðpb.Deposit_Data{PublicKey: pk0, Amount: 1000}}
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), dep, 1000, 0, [32]byte{}))
|
||||
|
||||
dep, _ = dc.DepositByPubkey(context.Background(), pk0)
|
||||
assert.NotEqual(t, nilDep, dep)
|
||||
assert.Equal(t, uint64(1000), dep.Data.Amount)
|
||||
|
||||
dep = ðpb.Deposit{Proof: makeDepositProof(), Data: ðpb.Deposit_Data{PublicKey: pk0, Amount: 10000}}
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), dep, 1000, 1, [32]byte{}))
|
||||
|
||||
// Make sure we have the same deposit returned over here.
|
||||
dep, _ = dc.DepositByPubkey(context.Background(), pk0)
|
||||
assert.NotEqual(t, nilDep, dep)
|
||||
assert.Equal(t, uint64(1000), dep.Data.Amount)
|
||||
|
||||
// Make sure another key doesn't work.
|
||||
pk1 := bytesutil.PadTo([]byte("pk1"), 48)
|
||||
dep, _ = dc.DepositByPubkey(context.Background(), pk1)
|
||||
assert.DeepEqual(t, nilDep, dep)
|
||||
}
|
||||
|
||||
func makeDepositProof() [][]byte {
|
||||
proof := make([][]byte, int(params.BeaconConfig().DepositContractTreeDepth)+1)
|
||||
for i := range proof {
|
||||
|
||||
12
beacon-chain/cache/skip_slot_cache.go
vendored
12
beacon-chain/cache/skip_slot_cache.go
vendored
@@ -120,26 +120,22 @@ func (c *SkipSlotCache) MarkInProgress(r [32]byte) error {
|
||||
|
||||
// MarkNotInProgress will release the lock on a given request. This should be
|
||||
// called after put.
|
||||
func (c *SkipSlotCache) MarkNotInProgress(r [32]byte) error {
|
||||
func (c *SkipSlotCache) MarkNotInProgress(r [32]byte) {
|
||||
if c.disabled {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
delete(c.inProgress, r)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put the response in the cache.
|
||||
func (c *SkipSlotCache) Put(_ context.Context, r [32]byte, state state.BeaconState) error {
|
||||
func (c *SkipSlotCache) Put(_ context.Context, r [32]byte, state state.BeaconState) {
|
||||
if c.disabled {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
// Copy state so cached value is not mutated.
|
||||
c.cache.Add(r, state.Copy())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
4
beacon-chain/cache/skip_slot_cache_test.go
vendored
4
beacon-chain/cache/skip_slot_cache_test.go
vendored
@@ -28,8 +28,8 @@ func TestSkipSlotCache_RoundTrip(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, c.Put(ctx, r, s))
|
||||
require.NoError(t, c.MarkNotInProgress(r))
|
||||
c.Put(ctx, r, s)
|
||||
c.MarkNotInProgress(r)
|
||||
|
||||
res, err := c.Get(ctx, r)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -25,7 +25,7 @@ func ProcessAttestationsNoVerifySignature(
|
||||
beaconState state.BeaconState,
|
||||
b block.SignedBeaconBlock,
|
||||
) (state.BeaconState, error) {
|
||||
if err := helpers.VerifyNilBeaconBlock(b); err != nil {
|
||||
if err := helpers.BeaconBlockIsNil(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
body := b.Block().Body()
|
||||
@@ -142,13 +142,19 @@ func SetParticipationAndRewardProposer(
|
||||
}
|
||||
|
||||
// HasValidatorFlag returns true if the flag at position has set.
|
||||
func HasValidatorFlag(flag, flagPosition uint8) bool {
|
||||
return ((flag >> flagPosition) & 1) == 1
|
||||
func HasValidatorFlag(flag, flagPosition uint8) (bool, error) {
|
||||
if flagPosition > 7 {
|
||||
return false, errors.New("flag position exceeds length")
|
||||
}
|
||||
return ((flag >> flagPosition) & 1) == 1, nil
|
||||
}
|
||||
|
||||
// AddValidatorFlag adds new validator flag to existing one.
|
||||
func AddValidatorFlag(flag, flagPosition uint8) uint8 {
|
||||
return flag | (1 << flagPosition)
|
||||
func AddValidatorFlag(flag, flagPosition uint8) (uint8, error) {
|
||||
if flagPosition > 7 {
|
||||
return flag, errors.New("flag position exceeds length")
|
||||
}
|
||||
return flag | (1 << flagPosition), nil
|
||||
}
|
||||
|
||||
// EpochParticipation sets and returns the proposer reward numerator and epoch participation.
|
||||
@@ -174,16 +180,37 @@ func EpochParticipation(beaconState state.BeaconState, indices []uint64, epochPa
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
if participatedFlags[sourceFlagIndex] && !HasValidatorFlag(epochParticipation[index], sourceFlagIndex) {
|
||||
epochParticipation[index] = AddValidatorFlag(epochParticipation[index], sourceFlagIndex)
|
||||
has, err := HasValidatorFlag(epochParticipation[index], sourceFlagIndex)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
if participatedFlags[sourceFlagIndex] && !has {
|
||||
epochParticipation[index], err = AddValidatorFlag(epochParticipation[index], sourceFlagIndex)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
proposerRewardNumerator += br * cfg.TimelySourceWeight
|
||||
}
|
||||
if participatedFlags[targetFlagIndex] && !HasValidatorFlag(epochParticipation[index], targetFlagIndex) {
|
||||
epochParticipation[index] = AddValidatorFlag(epochParticipation[index], targetFlagIndex)
|
||||
has, err = HasValidatorFlag(epochParticipation[index], targetFlagIndex)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
if participatedFlags[targetFlagIndex] && !has {
|
||||
epochParticipation[index], err = AddValidatorFlag(epochParticipation[index], targetFlagIndex)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
proposerRewardNumerator += br * cfg.TimelyTargetWeight
|
||||
}
|
||||
if participatedFlags[headFlagIndex] && !HasValidatorFlag(epochParticipation[index], headFlagIndex) {
|
||||
epochParticipation[index] = AddValidatorFlag(epochParticipation[index], headFlagIndex)
|
||||
has, err = HasValidatorFlag(epochParticipation[index], headFlagIndex)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
if participatedFlags[headFlagIndex] && !has {
|
||||
epochParticipation[index], err = AddValidatorFlag(epochParticipation[index], headFlagIndex)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
proposerRewardNumerator += br * cfg.TimelyHeadWeight
|
||||
}
|
||||
}
|
||||
|
||||
@@ -275,9 +275,15 @@ func TestProcessAttestationNoVerify_SourceTargetHead(t *testing.T) {
|
||||
indices, err := attestation.AttestingIndices(att.AggregationBits, committee)
|
||||
require.NoError(t, err)
|
||||
for _, index := range indices {
|
||||
require.Equal(t, true, altair.HasValidatorFlag(p[index], params.BeaconConfig().TimelyHeadFlagIndex))
|
||||
require.Equal(t, true, altair.HasValidatorFlag(p[index], params.BeaconConfig().TimelyTargetFlagIndex))
|
||||
require.Equal(t, true, altair.HasValidatorFlag(p[index], params.BeaconConfig().TimelySourceFlagIndex))
|
||||
has, err := altair.HasValidatorFlag(p[index], params.BeaconConfig().TimelyHeadFlagIndex)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, has)
|
||||
has, err = altair.HasValidatorFlag(p[index], params.BeaconConfig().TimelySourceFlagIndex)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, has)
|
||||
has, err = altair.HasValidatorFlag(p[index], params.BeaconConfig().TimelyTargetFlagIndex)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, has)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -331,12 +337,19 @@ func TestValidatorFlag_Has(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
for _, f := range tt.expected {
|
||||
require.Equal(t, true, altair.HasValidatorFlag(tt.set, f))
|
||||
has, err := altair.HasValidatorFlag(tt.set, f)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, has)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatorFlag_Has_ExceedsLength(t *testing.T) {
|
||||
_, err := altair.HasValidatorFlag(0, 8)
|
||||
require.ErrorContains(t, "flag position exceeds length", err)
|
||||
}
|
||||
|
||||
func TestValidatorFlag_Add(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -368,23 +381,33 @@ func TestValidatorFlag_Add(t *testing.T) {
|
||||
expectedFalse: []uint8{},
|
||||
},
|
||||
}
|
||||
|
||||
var err error
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
b := uint8(0)
|
||||
for _, f := range tt.set {
|
||||
b = altair.AddValidatorFlag(b, f)
|
||||
b, err = altair.AddValidatorFlag(b, f)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
for _, f := range tt.expectedFalse {
|
||||
require.Equal(t, false, altair.HasValidatorFlag(b, f))
|
||||
has, err := altair.HasValidatorFlag(b, f)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, has)
|
||||
}
|
||||
for _, f := range tt.expectedTrue {
|
||||
require.Equal(t, true, altair.HasValidatorFlag(b, f))
|
||||
has, err := altair.HasValidatorFlag(b, f)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, has)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatorFlag_Add_ExceedsLength(t *testing.T) {
|
||||
_, err := altair.AddValidatorFlag(0, 8)
|
||||
require.ErrorContains(t, "flag position exceeds length", err)
|
||||
}
|
||||
|
||||
func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconStateAltair{}
|
||||
|
||||
@@ -42,12 +42,18 @@ func InitializePrecomputeValidators(ctx context.Context, beaconState state.Beaco
|
||||
// Set validator's active status for current epoch.
|
||||
if helpers.IsActiveValidatorUsingTrie(val, currentEpoch) {
|
||||
v.IsActiveCurrentEpoch = true
|
||||
bal.ActiveCurrentEpoch += val.EffectiveBalance()
|
||||
bal.ActiveCurrentEpoch, err = math.Add64(bal.ActiveCurrentEpoch, val.EffectiveBalance())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Set validator's active status for preivous epoch.
|
||||
if helpers.IsActiveValidatorUsingTrie(val, prevEpoch) {
|
||||
v.IsActivePrevEpoch = true
|
||||
bal.ActivePrevEpoch += val.EffectiveBalance()
|
||||
bal.ActivePrevEpoch, err = math.Add64(bal.ActivePrevEpoch, val.EffectiveBalance())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
vals[idx] = v
|
||||
return nil
|
||||
@@ -150,7 +156,19 @@ func ProcessEpochParticipation(
|
||||
sourceIdx := cfg.TimelySourceFlagIndex
|
||||
headIdx := cfg.TimelyHeadFlagIndex
|
||||
for i, b := range cp {
|
||||
if HasValidatorFlag(b, targetIdx) && vals[i].IsActiveCurrentEpoch {
|
||||
has, err := HasValidatorFlag(b, sourceIdx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if has && vals[i].IsActiveCurrentEpoch {
|
||||
vals[i].IsCurrentEpochAttester = true
|
||||
}
|
||||
has, err = HasValidatorFlag(b, targetIdx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if has && vals[i].IsActiveCurrentEpoch {
|
||||
vals[i].IsCurrentEpochAttester = true
|
||||
vals[i].IsCurrentEpochTargetAttester = true
|
||||
}
|
||||
}
|
||||
@@ -159,17 +177,31 @@ func ProcessEpochParticipation(
|
||||
return nil, nil, err
|
||||
}
|
||||
for i, b := range pp {
|
||||
if HasValidatorFlag(b, sourceIdx) && vals[i].IsActivePrevEpoch {
|
||||
vals[i].IsPrevEpochAttester = true
|
||||
has, err := HasValidatorFlag(b, sourceIdx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if HasValidatorFlag(b, targetIdx) && vals[i].IsActivePrevEpoch {
|
||||
if has && vals[i].IsActivePrevEpoch {
|
||||
vals[i].IsPrevEpochAttester = true
|
||||
vals[i].IsPrevEpochSourceAttester = true
|
||||
}
|
||||
has, err = HasValidatorFlag(b, targetIdx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if has && vals[i].IsActivePrevEpoch {
|
||||
vals[i].IsPrevEpochAttester = true
|
||||
vals[i].IsPrevEpochTargetAttester = true
|
||||
}
|
||||
if HasValidatorFlag(b, headIdx) && vals[i].IsActivePrevEpoch {
|
||||
has, err = HasValidatorFlag(b, headIdx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if has && vals[i].IsActivePrevEpoch {
|
||||
vals[i].IsPrevEpochHeadAttester = true
|
||||
}
|
||||
}
|
||||
bal = precompute.UpdateBalance(vals, bal)
|
||||
bal = precompute.UpdateBalance(vals, bal, beaconState.Version())
|
||||
return vals, bal, nil
|
||||
}
|
||||
|
||||
@@ -268,7 +300,7 @@ func attestationDelta(
|
||||
headWeight := cfg.TimelyHeadWeight
|
||||
reward, penalty = uint64(0), uint64(0)
|
||||
// Process source reward / penalty
|
||||
if val.IsPrevEpochAttester && !val.IsSlashed {
|
||||
if val.IsPrevEpochSourceAttester && !val.IsSlashed {
|
||||
if !inactivityLeak {
|
||||
n := baseReward * srcWeight * (bal.PrevEpochAttested / increment)
|
||||
reward += n / (activeIncrement * weightDenominator)
|
||||
|
||||
@@ -2,6 +2,7 @@ package altair
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
@@ -62,6 +63,21 @@ func TestInitializeEpochValidators_Ok(t *testing.T) {
|
||||
assert.DeepEqual(t, wantedBalances, b, "Incorrect wanted balance")
|
||||
}
|
||||
|
||||
func TestInitializeEpochValidators_Overflow(t *testing.T) {
|
||||
ffe := params.BeaconConfig().FarFutureEpoch
|
||||
s, err := stateAltair.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
Validators: []*ethpb.Validator{
|
||||
{WithdrawableEpoch: ffe, ExitEpoch: ffe, EffectiveBalance: math.MaxUint64},
|
||||
{WithdrawableEpoch: ffe, ExitEpoch: ffe, EffectiveBalance: math.MaxUint64},
|
||||
},
|
||||
InactivityScores: []uint64{0, 1},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, _, err = InitializePrecomputeValidators(context.Background(), s)
|
||||
require.ErrorContains(t, "could not read every validator: addition overflows", err)
|
||||
}
|
||||
|
||||
func TestInitializeEpochValidators_BadState(t *testing.T) {
|
||||
s, err := stateAltair.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: []*ethpb.Validator{{}},
|
||||
@@ -90,14 +106,18 @@ func TestProcessEpochParticipation(t *testing.T) {
|
||||
IsActivePrevEpoch: true,
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
CurrentEpochEffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
IsCurrentEpochAttester: true,
|
||||
IsPrevEpochAttester: true,
|
||||
IsPrevEpochSourceAttester: true,
|
||||
}, validators[1])
|
||||
require.DeepEqual(t, &precompute.Validator{
|
||||
IsActiveCurrentEpoch: true,
|
||||
IsActivePrevEpoch: true,
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
CurrentEpochEffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
IsCurrentEpochAttester: true,
|
||||
IsPrevEpochAttester: true,
|
||||
IsPrevEpochSourceAttester: true,
|
||||
IsCurrentEpochTargetAttester: true,
|
||||
IsPrevEpochTargetAttester: true,
|
||||
}, validators[2])
|
||||
@@ -106,7 +126,9 @@ func TestProcessEpochParticipation(t *testing.T) {
|
||||
IsActivePrevEpoch: true,
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
CurrentEpochEffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
IsCurrentEpochAttester: true,
|
||||
IsPrevEpochAttester: true,
|
||||
IsPrevEpochSourceAttester: true,
|
||||
IsCurrentEpochTargetAttester: true,
|
||||
IsPrevEpochTargetAttester: true,
|
||||
IsPrevEpochHeadAttester: true,
|
||||
@@ -120,8 +142,10 @@ func TestProcessEpochParticipation(t *testing.T) {
|
||||
func TestProcessEpochParticipation_InactiveValidator(t *testing.T) {
|
||||
generateParticipation := func(flags ...uint8) byte {
|
||||
b := byte(0)
|
||||
var err error
|
||||
for _, flag := range flags {
|
||||
b = AddValidatorFlag(b, flag)
|
||||
b, err = AddValidatorFlag(b, flag)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
return b
|
||||
}
|
||||
@@ -159,6 +183,7 @@ func TestProcessEpochParticipation_InactiveValidator(t *testing.T) {
|
||||
IsActiveCurrentEpoch: false,
|
||||
IsActivePrevEpoch: true,
|
||||
IsPrevEpochAttester: true,
|
||||
IsPrevEpochSourceAttester: true,
|
||||
IsPrevEpochTargetAttester: true,
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
CurrentEpochEffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
@@ -168,7 +193,9 @@ func TestProcessEpochParticipation_InactiveValidator(t *testing.T) {
|
||||
IsActivePrevEpoch: true,
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
CurrentEpochEffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
IsCurrentEpochAttester: true,
|
||||
IsPrevEpochAttester: true,
|
||||
IsPrevEpochSourceAttester: true,
|
||||
IsCurrentEpochTargetAttester: true,
|
||||
IsPrevEpochTargetAttester: true,
|
||||
IsPrevEpochHeadAttester: true,
|
||||
@@ -395,8 +422,12 @@ func TestProcessInactivityScores_NonEligibleValidator(t *testing.T) {
|
||||
func testState() (state.BeaconState, error) {
|
||||
generateParticipation := func(flags ...uint8) byte {
|
||||
b := byte(0)
|
||||
var err error
|
||||
for _, flag := range flags {
|
||||
b = AddValidatorFlag(b, flag)
|
||||
b, err = AddValidatorFlag(b, flag)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
@@ -161,14 +161,35 @@ func TranslateParticipation(ctx context.Context, state *statealtair.BeaconState,
|
||||
targetFlagIndex := cfg.TimelyTargetFlagIndex
|
||||
headFlagIndex := cfg.TimelyHeadFlagIndex
|
||||
for _, index := range indices {
|
||||
if participatedFlags[sourceFlagIndex] && !HasValidatorFlag(epochParticipation[index], sourceFlagIndex) {
|
||||
epochParticipation[index] = AddValidatorFlag(epochParticipation[index], sourceFlagIndex)
|
||||
has, err := HasValidatorFlag(epochParticipation[index], sourceFlagIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if participatedFlags[targetFlagIndex] && !HasValidatorFlag(epochParticipation[index], targetFlagIndex) {
|
||||
epochParticipation[index] = AddValidatorFlag(epochParticipation[index], targetFlagIndex)
|
||||
if participatedFlags[sourceFlagIndex] && !has {
|
||||
epochParticipation[index], err = AddValidatorFlag(epochParticipation[index], sourceFlagIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if participatedFlags[headFlagIndex] && !HasValidatorFlag(epochParticipation[index], headFlagIndex) {
|
||||
epochParticipation[index] = AddValidatorFlag(epochParticipation[index], headFlagIndex)
|
||||
has, err = HasValidatorFlag(epochParticipation[index], targetFlagIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if participatedFlags[targetFlagIndex] && !has {
|
||||
epochParticipation[index], err = AddValidatorFlag(epochParticipation[index], targetFlagIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
has, err = HasValidatorFlag(epochParticipation[index], headFlagIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if participatedFlags[headFlagIndex] && !has {
|
||||
epochParticipation[index], err = AddValidatorFlag(epochParticipation[index], headFlagIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -62,9 +61,15 @@ func TestTranslateParticipation(t *testing.T) {
|
||||
indices, err := attestation.AttestingIndices(pendingAtts[0].AggregationBits, committee)
|
||||
require.NoError(t, err)
|
||||
for _, index := range indices {
|
||||
require.Equal(t, true, altair.HasValidatorFlag(participation[index], params.BeaconConfig().TimelyHeadFlagIndex))
|
||||
require.Equal(t, true, altair.HasValidatorFlag(participation[index], params.BeaconConfig().TimelyTargetFlagIndex))
|
||||
require.Equal(t, true, altair.HasValidatorFlag(participation[index], params.BeaconConfig().TimelySourceFlagIndex))
|
||||
has, err := altair.HasValidatorFlag(participation[index], params.BeaconConfig().TimelySourceFlagIndex)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, has)
|
||||
has, err = altair.HasValidatorFlag(participation[index], params.BeaconConfig().TimelyTargetFlagIndex)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, has)
|
||||
has, err = altair.HasValidatorFlag(participation[index], params.BeaconConfig().TimelyHeadFlagIndex)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, has)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,8 +78,6 @@ func TestUpgradeToAltair(t *testing.T) {
|
||||
preForkState := st.Copy()
|
||||
aState, err := altair.UpgradeToAltair(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
_, ok := aState.(state.BeaconStateAltair)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
require.Equal(t, preForkState.GenesisTime(), aState.GenesisTime())
|
||||
require.DeepSSZEqual(t, preForkState.GenesisValidatorRoot(), aState.GenesisValidatorRoot())
|
||||
|
||||
@@ -25,7 +25,7 @@ func ProcessAttestationsNoVerifySignature(
|
||||
beaconState state.BeaconState,
|
||||
b block.SignedBeaconBlock,
|
||||
) (state.BeaconState, error) {
|
||||
if err := helpers.VerifyNilBeaconBlock(b); err != nil {
|
||||
if err := helpers.BeaconBlockIsNil(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
body := b.Block().Body()
|
||||
|
||||
@@ -62,7 +62,7 @@ func ProcessAttesterSlashing(
|
||||
if err := VerifyAttesterSlashing(ctx, beaconState, slashing); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify attester slashing")
|
||||
}
|
||||
slashableIndices := slashableAttesterIndices(slashing)
|
||||
slashableIndices := SlashableAttesterIndices(slashing)
|
||||
sort.SliceStable(slashableIndices, func(i, j int) bool {
|
||||
return slashableIndices[i] < slashableIndices[j]
|
||||
})
|
||||
@@ -152,7 +152,8 @@ func IsSlashableAttestationData(data1, data2 *ethpb.AttestationData) bool {
|
||||
return isDoubleVote || isSurroundVote
|
||||
}
|
||||
|
||||
func slashableAttesterIndices(slashing *ethpb.AttesterSlashing) []uint64 {
|
||||
// SlashableAttesterIndices returns the intersection of attester indices from both attestations in this slashing.
|
||||
func SlashableAttesterIndices(slashing *ethpb.AttesterSlashing) []uint64 {
|
||||
if slashing == nil || slashing.Attestation_1 == nil || slashing.Attestation_2 == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -245,7 +245,7 @@ func TestFuzzslashableAttesterIndices_10000(t *testing.T) {
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(attesterSlashing)
|
||||
slashableAttesterIndices(attesterSlashing)
|
||||
SlashableAttesterIndices(attesterSlashing)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ func ProcessBlockHeader(
|
||||
beaconState state.BeaconState,
|
||||
block block.SignedBeaconBlock,
|
||||
) (state.BeaconState, error) {
|
||||
if err := helpers.VerifyNilBeaconBlock(block); err != nil {
|
||||
if err := helpers.BeaconBlockIsNil(block); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bodyRoot, err := block.Block().Body().HashTreeRoot()
|
||||
|
||||
@@ -31,7 +31,7 @@ func ProcessRandao(
|
||||
beaconState state.BeaconState,
|
||||
b block.SignedBeaconBlock,
|
||||
) (state.BeaconState, error) {
|
||||
if err := helpers.VerifyNilBeaconBlock(b); err != nil {
|
||||
if err := helpers.BeaconBlockIsNil(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
body := b.Block().Body()
|
||||
|
||||
@@ -24,6 +24,7 @@ go_library(
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
@@ -51,6 +52,7 @@ go_test(
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/monitoring/tracing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -64,7 +65,7 @@ func ProcessAttestations(
|
||||
vp = UpdateValidator(vp, v, indices, a, a.Data.Slot)
|
||||
}
|
||||
|
||||
pBal = UpdateBalance(vp, pBal)
|
||||
pBal = UpdateBalance(vp, pBal, state.Version())
|
||||
|
||||
return vp, pBal, nil
|
||||
}
|
||||
@@ -170,7 +171,7 @@ func UpdateValidator(vp []*Validator, record *Validator, indices []uint64, a *et
|
||||
}
|
||||
|
||||
// UpdateBalance updates pre computed balance store.
|
||||
func UpdateBalance(vp []*Validator, bBal *Balance) *Balance {
|
||||
func UpdateBalance(vp []*Validator, bBal *Balance, stateVersion int) *Balance {
|
||||
for _, v := range vp {
|
||||
if !v.IsSlashed {
|
||||
if v.IsCurrentEpochAttester {
|
||||
@@ -179,7 +180,10 @@ func UpdateBalance(vp []*Validator, bBal *Balance) *Balance {
|
||||
if v.IsCurrentEpochTargetAttester {
|
||||
bBal.CurrentEpochTargetAttested += v.CurrentEpochEffectiveBalance
|
||||
}
|
||||
if v.IsPrevEpochAttester {
|
||||
if stateVersion == version.Phase0 && v.IsPrevEpochAttester {
|
||||
bBal.PrevEpochAttested += v.CurrentEpochEffectiveBalance
|
||||
}
|
||||
if stateVersion == version.Altair && v.IsPrevEpochSourceAttester {
|
||||
bBal.PrevEpochAttested += v.CurrentEpochEffectiveBalance
|
||||
}
|
||||
if v.IsPrevEpochTargetAttester {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
@@ -65,7 +66,7 @@ func TestUpdateBalance(t *testing.T) {
|
||||
PrevEpochTargetAttested: 100 * params.BeaconConfig().EffectiveBalanceIncrement,
|
||||
PrevEpochHeadAttested: 200 * params.BeaconConfig().EffectiveBalanceIncrement,
|
||||
}
|
||||
pBal := precompute.UpdateBalance(vp, &precompute.Balance{})
|
||||
pBal := precompute.UpdateBalance(vp, &precompute.Balance{}, version.Phase0)
|
||||
assert.DeepEqual(t, wantedPBal, pBal, "Incorrect balance calculations")
|
||||
}
|
||||
|
||||
|
||||
@@ -20,12 +20,14 @@ type Validator struct {
|
||||
IsCurrentEpochTargetAttester bool
|
||||
// IsPrevEpochAttester is true if the validator attested previous epoch.
|
||||
IsPrevEpochAttester bool
|
||||
// IsPrevEpochSourceAttester is true if the validator attested to source previous epoch. [Only for Altair]
|
||||
IsPrevEpochSourceAttester bool
|
||||
// IsPrevEpochTargetAttester is true if the validator attested previous epoch target.
|
||||
IsPrevEpochTargetAttester bool
|
||||
// IsHeadAttester is true if the validator attested head.
|
||||
IsPrevEpochHeadAttester bool
|
||||
|
||||
// CurrentEpochEffectiveBalance is how much effective balance this validator validator has current epoch.
|
||||
// CurrentEpochEffectiveBalance is how much effective balance this validator has current epoch.
|
||||
CurrentEpochEffectiveBalance uint64
|
||||
// InclusionSlot is the slot of when the attestation gets included in the chain.
|
||||
InclusionSlot types.Slot
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
// Package operation contains types for block operation-specific events fired
|
||||
// during the runtime of a beacon node such as attestations, voluntary
|
||||
// exits, and slashings.
|
||||
// Package operation contains types for block operation-specific events fired during the runtime of a beacon node.
|
||||
package operation
|
||||
|
||||
import (
|
||||
@@ -18,6 +16,9 @@ const (
|
||||
|
||||
// ExitReceived is sent after an voluntary exit object has been received from the outside world (eg in RPC or sync)
|
||||
ExitReceived
|
||||
|
||||
// SyncCommitteeContributionReceived is sent after a sync committee contribution object has been received.
|
||||
SyncCommitteeContributionReceived
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -37,3 +38,9 @@ type ExitReceivedData struct {
|
||||
// Exit is the voluntary exit object.
|
||||
Exit *ethpb.SignedVoluntaryExit
|
||||
}
|
||||
|
||||
// SyncCommitteeContributionReceivedData is the data sent with SyncCommitteeContributionReceived objects.
|
||||
type SyncCommitteeContributionReceivedData struct {
|
||||
// Contribution is the sync committee contribution object.
|
||||
Contribution *ethpb.SignedContributionAndProof
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd/beacon-chain:__subpackages__",
|
||||
"//contracts/deposit:__pkg__",
|
||||
"//crypto/keystore:__pkg__",
|
||||
"//fuzz:__pkg__",
|
||||
|
||||
@@ -199,7 +199,7 @@ func CommitteeAssignments(
|
||||
// Each slot in an epoch has a different set of committees. This value is derived from the
|
||||
// active validator set, which does not change.
|
||||
numCommitteesPerSlot := SlotCommitteeCount(uint64(len(activeValidatorIndices)))
|
||||
validatorIndexToCommittee := make(map[types.ValidatorIndex]*CommitteeAssignmentContainer, params.BeaconConfig().SlotsPerEpoch.Mul(numCommitteesPerSlot))
|
||||
validatorIndexToCommittee := make(map[types.ValidatorIndex]*CommitteeAssignmentContainer, len(activeValidatorIndices))
|
||||
|
||||
// Compute all committees for all slots.
|
||||
for i := types.Slot(0); i < params.BeaconConfig().SlotsPerEpoch; i++ {
|
||||
|
||||
@@ -11,10 +11,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
// VerifyNilBeaconBlock checks if any composite field of input signed beacon block is nil.
|
||||
// BeaconBlockIsNil checks if any composite field of input signed beacon block is nil.
|
||||
// Access to these nil fields will result in run time panic,
|
||||
// it is recommended to run these checks as first line of defense.
|
||||
func VerifyNilBeaconBlock(b block.SignedBeaconBlock) error {
|
||||
func BeaconBlockIsNil(b block.SignedBeaconBlock) error {
|
||||
if b == nil || b.IsNil() {
|
||||
return errors.New("signed beacon block can't be nil")
|
||||
}
|
||||
|
||||
@@ -2,9 +2,9 @@ package signing
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/eth2-types"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// Domain returns the domain version for BLS private key to sign and verify.
|
||||
|
||||
@@ -3,9 +3,9 @@ package signing
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/eth2-types"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
@@ -60,6 +60,16 @@ func CanUpgradeToAltair(slot types.Slot) bool {
|
||||
return epochStart && altairEpoch
|
||||
}
|
||||
|
||||
// CanUpgradeToMerge returns true if the input `slot` can upgrade to Merge fork.
|
||||
//
|
||||
// Spec code:
|
||||
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == MERGE_FORK_EPOCH
|
||||
func CanUpgradeToMerge(slot types.Slot) bool {
|
||||
epochStart := slots.IsEpochStart(slot)
|
||||
mergeEpoch := slots.ToEpoch(slot) == params.BeaconConfig().MergeForkEpoch
|
||||
return epochStart && mergeEpoch
|
||||
}
|
||||
|
||||
// CanProcessEpoch checks the eligibility to process epoch.
|
||||
// The epoch can be processed at the end of the last slot of every epoch.
|
||||
//
|
||||
|
||||
@@ -114,6 +114,40 @@ func TestCanUpgradeToAltair(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanUpgradeToMerge(t *testing.T) {
|
||||
bc := params.BeaconConfig()
|
||||
bc.MergeForkEpoch = 5
|
||||
params.OverrideBeaconConfig(bc)
|
||||
tests := []struct {
|
||||
name string
|
||||
slot types.Slot
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "not epoch start",
|
||||
slot: 1,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "not merge epoch",
|
||||
slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "merge epoch",
|
||||
slot: types.Slot(params.BeaconConfig().MergeForkEpoch) * params.BeaconConfig().SlotsPerEpoch,
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := CanUpgradeToMerge(tt.slot); got != tt.want {
|
||||
t.Errorf("CanUpgradeToMerge() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanProcessEpoch_TrueOnEpochsLastSlot(t *testing.T) {
|
||||
tests := []struct {
|
||||
slot types.Slot
|
||||
|
||||
@@ -213,9 +213,12 @@ func TestProcessEpoch_BadBalanceAltair(t *testing.T) {
|
||||
assert.NoError(t, s.SetSlot(63))
|
||||
assert.NoError(t, s.UpdateBalancesAtIndex(0, math.MaxUint64))
|
||||
participation := byte(0)
|
||||
participation = altair.AddValidatorFlag(participation, params.BeaconConfig().TimelyHeadFlagIndex)
|
||||
participation = altair.AddValidatorFlag(participation, params.BeaconConfig().TimelySourceFlagIndex)
|
||||
participation = altair.AddValidatorFlag(participation, params.BeaconConfig().TimelyTargetFlagIndex)
|
||||
participation, err := altair.AddValidatorFlag(participation, params.BeaconConfig().TimelyHeadFlagIndex)
|
||||
require.NoError(t, err)
|
||||
participation, err = altair.AddValidatorFlag(participation, params.BeaconConfig().TimelySourceFlagIndex)
|
||||
require.NoError(t, err)
|
||||
participation, err = altair.AddValidatorFlag(participation, params.BeaconConfig().TimelyTargetFlagIndex)
|
||||
require.NoError(t, err)
|
||||
|
||||
epochParticipation, err := s.CurrentEpochParticipation()
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -51,8 +51,8 @@ func ExecuteStateTransition(
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
if signed == nil || signed.IsNil() || signed.Block().IsNil() {
|
||||
return nil, errors.New("nil block")
|
||||
if err := helpers.BeaconBlockIsNil(signed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.ExecuteStateTransition")
|
||||
@@ -214,10 +214,7 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err := SkipSlotCache.MarkNotInProgress(key); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
log.WithError(err).Error("Failed to mark skip slot no longer in progress")
|
||||
}
|
||||
SkipSlotCache.MarkNotInProgress(key)
|
||||
}()
|
||||
|
||||
for state.Slot() < slot {
|
||||
@@ -225,7 +222,7 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
|
||||
tracing.AnnotateError(span, ctx.Err())
|
||||
// Cache last best value.
|
||||
if highestSlot < state.Slot() {
|
||||
if err := SkipSlotCache.Put(ctx, key, state); err != nil {
|
||||
if SkipSlotCache.Put(ctx, key, state); err != nil {
|
||||
log.WithError(err).Error("Failed to put skip slot cache value")
|
||||
}
|
||||
}
|
||||
@@ -269,10 +266,7 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
|
||||
}
|
||||
|
||||
if highestSlot < state.Slot() {
|
||||
if err := SkipSlotCache.Put(ctx, key, state); err != nil {
|
||||
log.WithError(err).Error("Failed to put skip slot cache value")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
SkipSlotCache.Put(ctx, key, state)
|
||||
}
|
||||
|
||||
return state, nil
|
||||
@@ -280,7 +274,7 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
|
||||
|
||||
// VerifyOperationLengths verifies that block operation lengths are valid.
|
||||
func VerifyOperationLengths(_ context.Context, state state.BeaconState, b block.SignedBeaconBlock) (state.BeaconState, error) {
|
||||
if err := helpers.VerifyNilBeaconBlock(b); err != nil {
|
||||
if err := helpers.BeaconBlockIsNil(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
body := b.Block().Body()
|
||||
|
||||
@@ -185,7 +185,7 @@ func ProcessBlockNoVerifyAnySig(
|
||||
) (*bls.SignatureSet, state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.ProcessBlockNoVerifyAnySig")
|
||||
defer span.End()
|
||||
if err := helpers.VerifyNilBeaconBlock(signed); err != nil {
|
||||
if err := helpers.BeaconBlockIsNil(signed); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
@@ -258,7 +258,7 @@ func ProcessOperationsNoVerifyAttsSigs(
|
||||
signedBeaconBlock block.SignedBeaconBlock) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.ProcessOperationsNoVerifyAttsSigs")
|
||||
defer span.End()
|
||||
if err := helpers.VerifyNilBeaconBlock(signedBeaconBlock); err != nil {
|
||||
if err := helpers.BeaconBlockIsNil(signedBeaconBlock); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -294,7 +294,7 @@ func ProcessBlockForStateRoot(
|
||||
) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.ProcessBlockForStateRoot")
|
||||
defer span.End()
|
||||
if err := helpers.VerifyNilBeaconBlock(signed); err != nil {
|
||||
if err := helpers.BeaconBlockIsNil(signed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -39,14 +39,6 @@ type ReadOnlyDatabase interface {
|
||||
StateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error)
|
||||
HasStateSummary(ctx context.Context, blockRoot [32]byte) bool
|
||||
HighestSlotStatesBelow(ctx context.Context, slot types.Slot) ([]state.ReadOnlyBeaconState, error)
|
||||
// Slashing operations.
|
||||
ProposerSlashing(ctx context.Context, slashingRoot [32]byte) (*eth.ProposerSlashing, error)
|
||||
AttesterSlashing(ctx context.Context, slashingRoot [32]byte) (*eth.AttesterSlashing, error)
|
||||
HasProposerSlashing(ctx context.Context, slashingRoot [32]byte) bool
|
||||
HasAttesterSlashing(ctx context.Context, slashingRoot [32]byte) bool
|
||||
// Block operations.
|
||||
VoluntaryExit(ctx context.Context, exitRoot [32]byte) (*eth.VoluntaryExit, error)
|
||||
HasVoluntaryExit(ctx context.Context, exitRoot [32]byte) bool
|
||||
// Checkpoint operations.
|
||||
JustifiedCheckpoint(ctx context.Context) (*eth.Checkpoint, error)
|
||||
FinalizedCheckpoint(ctx context.Context) (*eth.Checkpoint, error)
|
||||
@@ -75,11 +67,6 @@ type NoHeadAccessDatabase interface {
|
||||
DeleteStates(ctx context.Context, blockRoots [][32]byte) error
|
||||
SaveStateSummary(ctx context.Context, summary *ethpb.StateSummary) error
|
||||
SaveStateSummaries(ctx context.Context, summaries []*ethpb.StateSummary) error
|
||||
// Slashing operations.
|
||||
SaveProposerSlashing(ctx context.Context, slashing *eth.ProposerSlashing) error
|
||||
SaveAttesterSlashing(ctx context.Context, slashing *eth.AttesterSlashing) error
|
||||
// Block operations.
|
||||
SaveVoluntaryExit(ctx context.Context, exit *eth.VoluntaryExit) error
|
||||
// Checkpoint operations.
|
||||
SaveJustifiedCheckpoint(ctx context.Context, checkpoint *eth.Checkpoint) error
|
||||
SaveFinalizedCheckpoint(ctx context.Context, checkpoint *eth.Checkpoint) error
|
||||
@@ -160,7 +147,6 @@ type Database interface {
|
||||
io.Closer
|
||||
backup.BackupExporter
|
||||
HeadAccessDatabase
|
||||
|
||||
DatabasePath() string
|
||||
ClearDB() error
|
||||
}
|
||||
|
||||
@@ -13,15 +13,14 @@ go_library(
|
||||
"finalized_block_roots.go",
|
||||
"genesis.go",
|
||||
"kv.go",
|
||||
"light.go",
|
||||
"log.go",
|
||||
"migration.go",
|
||||
"migration_archived_index.go",
|
||||
"migration_block_slot_index.go",
|
||||
"migration_state_validators.go",
|
||||
"operations.go",
|
||||
"powchain.go",
|
||||
"schema.go",
|
||||
"slashings.go",
|
||||
"state.go",
|
||||
"state_summary.go",
|
||||
"state_summary_cache.go",
|
||||
@@ -88,9 +87,7 @@ go_test(
|
||||
"migration_archived_index_test.go",
|
||||
"migration_block_slot_index_test.go",
|
||||
"migration_state_validators_test.go",
|
||||
"operations_test.go",
|
||||
"powchain_test.go",
|
||||
"slashings_test.go",
|
||||
"state_summary_test.go",
|
||||
"state_test.go",
|
||||
"utils_test.go",
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/io/file"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
@@ -34,8 +34,8 @@ func (s *Store) Backup(ctx context.Context, outputDir string, permissionOverride
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if head == nil || head.IsNil() {
|
||||
return errors.New("no head block")
|
||||
if err := helpers.BeaconBlockIsNil(head); err != nil {
|
||||
return err
|
||||
}
|
||||
// Ensure the backups directory exists.
|
||||
if err := file.HandleBackupDir(backupsDir, permissionOverride); err != nil {
|
||||
|
||||
@@ -150,11 +150,7 @@ func (s *Store) BlocksBySlot(ctx context.Context, slot types.Slot) (bool, []bloc
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
|
||||
keys, err := blockRootsBySlot(ctx, tx, slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
keys := blockRootsBySlot(ctx, tx, slot)
|
||||
for i := 0; i < len(keys); i++ {
|
||||
encoded := bkt.Get(keys[i])
|
||||
blk, err := unmarshalBlock(ctx, encoded)
|
||||
@@ -174,11 +170,7 @@ func (s *Store) BlockRootsBySlot(ctx context.Context, slot types.Slot) (bool, []
|
||||
defer span.End()
|
||||
blockRoots := make([][32]byte, 0)
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
keys, err := blockRootsBySlot(ctx, tx, slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
keys := blockRootsBySlot(ctx, tx, slot)
|
||||
for i := 0; i < len(keys); i++ {
|
||||
blockRoots = append(blockRoots, bytesutil.ToBytes32(keys[i]))
|
||||
}
|
||||
@@ -519,7 +511,7 @@ func blockRootsBySlotRange(
|
||||
}
|
||||
|
||||
// blockRootsBySlot retrieves the block roots by slot
|
||||
func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot types.Slot) ([][]byte, error) {
|
||||
func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot types.Slot) [][]byte {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlot")
|
||||
defer span.End()
|
||||
|
||||
@@ -533,7 +525,7 @@ func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot types.Slot) ([][]by
|
||||
roots = append(roots, v[i:i+32])
|
||||
}
|
||||
}
|
||||
return roots, nil
|
||||
return roots
|
||||
}
|
||||
|
||||
// createBlockIndicesFromBlock takes in a beacon block and returns
|
||||
|
||||
@@ -3,8 +3,8 @@ package kv
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/monitoring/tracing"
|
||||
@@ -84,8 +84,7 @@ func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, chec
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
if signedBlock == nil || signedBlock.IsNil() || signedBlock.Block().IsNil() {
|
||||
err := fmt.Errorf("missing block in database: block root=%#x", root)
|
||||
if err := helpers.BeaconBlockIsNil(signedBlock); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,78 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// VoluntaryExit retrieval by signing root.
|
||||
func (s *Store) VoluntaryExit(ctx context.Context, exitRoot [32]byte) (*ethpb.VoluntaryExit, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.VoluntaryExit")
|
||||
defer span.End()
|
||||
enc, err := s.voluntaryExitBytes(ctx, exitRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(enc) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
exit := ðpb.VoluntaryExit{}
|
||||
if err := decode(ctx, enc, exit); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return exit, nil
|
||||
}
|
||||
|
||||
// HasVoluntaryExit verifies if a voluntary exit is stored in the db by its signing root.
|
||||
func (s *Store) HasVoluntaryExit(ctx context.Context, exitRoot [32]byte) bool {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasVoluntaryExit")
|
||||
defer span.End()
|
||||
enc, err := s.voluntaryExitBytes(ctx, exitRoot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return len(enc) > 0
|
||||
}
|
||||
|
||||
// SaveVoluntaryExit to the db by its signing root.
|
||||
func (s *Store) SaveVoluntaryExit(ctx context.Context, exit *ethpb.VoluntaryExit) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveVoluntaryExit")
|
||||
defer span.End()
|
||||
exitRoot, err := exit.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
enc, err := encode(ctx, exit)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(voluntaryExitsBucket)
|
||||
return bucket.Put(exitRoot[:], enc)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) voluntaryExitBytes(ctx context.Context, exitRoot [32]byte) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.voluntaryExitBytes")
|
||||
defer span.End()
|
||||
var dst []byte
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(voluntaryExitsBucket)
|
||||
dst = bkt.Get(exitRoot[:])
|
||||
return nil
|
||||
})
|
||||
return dst, err
|
||||
}
|
||||
|
||||
// deleteVoluntaryExit clears a voluntary exit from the db by its signing root.
|
||||
func (s *Store) deleteVoluntaryExit(ctx context.Context, exitRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.deleteVoluntaryExit")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(voluntaryExitsBucket)
|
||||
return bucket.Delete(exitRoot[:])
|
||||
})
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestStore_VoluntaryExits_CRUD(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
exit := ðpb.VoluntaryExit{
|
||||
Epoch: 5,
|
||||
}
|
||||
exitRoot, err := exit.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
retrieved, err := db.VoluntaryExit(ctx, exitRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, (*ethpb.VoluntaryExit)(nil), retrieved, "Expected nil voluntary exit")
|
||||
require.NoError(t, db.SaveVoluntaryExit(ctx, exit))
|
||||
assert.Equal(t, true, db.HasVoluntaryExit(ctx, exitRoot), "Expected voluntary exit to exist in the db")
|
||||
retrieved, err = db.VoluntaryExit(ctx, exitRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(exit, retrieved), "Wanted %v, received %v", exit, retrieved)
|
||||
require.NoError(t, db.deleteVoluntaryExit(ctx, exitRoot))
|
||||
assert.Equal(t, false, db.HasVoluntaryExit(ctx, exitRoot), "Expected voluntary exit to have been deleted from the db")
|
||||
}
|
||||
@@ -1,147 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ProposerSlashing retrieval by slashing root.
|
||||
func (s *Store) ProposerSlashing(ctx context.Context, slashingRoot [32]byte) (*ethpb.ProposerSlashing, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.ProposerSlashing")
|
||||
defer span.End()
|
||||
enc, err := s.proposerSlashingBytes(ctx, slashingRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(enc) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
proposerSlashing := ðpb.ProposerSlashing{}
|
||||
if err := decode(ctx, enc, proposerSlashing); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proposerSlashing, nil
|
||||
}
|
||||
|
||||
// HasProposerSlashing verifies if a slashing is stored in the db.
|
||||
func (s *Store) HasProposerSlashing(ctx context.Context, slashingRoot [32]byte) bool {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasProposerSlashing")
|
||||
defer span.End()
|
||||
enc, err := s.proposerSlashingBytes(ctx, slashingRoot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return len(enc) > 0
|
||||
}
|
||||
|
||||
// SaveProposerSlashing to the db by its hash tree root.
|
||||
func (s *Store) SaveProposerSlashing(ctx context.Context, slashing *ethpb.ProposerSlashing) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveProposerSlashing")
|
||||
defer span.End()
|
||||
slashingRoot, err := slashing.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
enc, err := encode(ctx, slashing)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(proposerSlashingsBucket)
|
||||
return bucket.Put(slashingRoot[:], enc)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) proposerSlashingBytes(ctx context.Context, slashingRoot [32]byte) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.proposerSlashingBytes")
|
||||
defer span.End()
|
||||
var dst []byte
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(proposerSlashingsBucket)
|
||||
dst = bkt.Get(slashingRoot[:])
|
||||
return nil
|
||||
})
|
||||
return dst, err
|
||||
}
|
||||
|
||||
// deleteProposerSlashing clears a proposer slashing from the db by its hash tree root.
|
||||
func (s *Store) deleteProposerSlashing(ctx context.Context, slashingRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.deleteProposerSlashing")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(proposerSlashingsBucket)
|
||||
return bucket.Delete(slashingRoot[:])
|
||||
})
|
||||
}
|
||||
|
||||
// AttesterSlashing retrieval by hash tree root.
|
||||
func (s *Store) AttesterSlashing(ctx context.Context, slashingRoot [32]byte) (*ethpb.AttesterSlashing, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.AttesterSlashing")
|
||||
defer span.End()
|
||||
enc, err := s.attesterSlashingBytes(ctx, slashingRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(enc) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
attSlashing := ðpb.AttesterSlashing{}
|
||||
if err := decode(ctx, enc, attSlashing); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return attSlashing, nil
|
||||
}
|
||||
|
||||
// HasAttesterSlashing verifies if a slashing is stored in the db.
|
||||
func (s *Store) HasAttesterSlashing(ctx context.Context, slashingRoot [32]byte) bool {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasAttesterSlashing")
|
||||
defer span.End()
|
||||
enc, err := s.attesterSlashingBytes(ctx, slashingRoot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return len(enc) > 0
|
||||
}
|
||||
|
||||
// SaveAttesterSlashing to the db by its hash tree root.
|
||||
func (s *Store) SaveAttesterSlashing(ctx context.Context, slashing *ethpb.AttesterSlashing) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveAttesterSlashing")
|
||||
defer span.End()
|
||||
slashingRoot, err := slashing.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
enc, err := encode(ctx, slashing)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(attesterSlashingsBucket)
|
||||
return bucket.Put(slashingRoot[:], enc)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) attesterSlashingBytes(ctx context.Context, slashingRoot [32]byte) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.attesterSlashingBytes")
|
||||
defer span.End()
|
||||
var dst []byte
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(attesterSlashingsBucket)
|
||||
dst = bkt.Get(slashingRoot[:])
|
||||
return nil
|
||||
})
|
||||
return dst, err
|
||||
}
|
||||
|
||||
// deleteAttesterSlashing clears an attester slashing from the db by its hash tree root.
|
||||
func (s *Store) deleteAttesterSlashing(ctx context.Context, slashingRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.deleteAttesterSlashing")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(attesterSlashingsBucket)
|
||||
return bucket.Delete(slashingRoot[:])
|
||||
})
|
||||
}
|
||||
@@ -1,67 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestStore_ProposerSlashing_CRUD(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
prop := ðpb.ProposerSlashing{
|
||||
Header_1: util.HydrateSignedBeaconHeader(ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: 5,
|
||||
},
|
||||
}),
|
||||
Header_2: util.HydrateSignedBeaconHeader(ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: 5,
|
||||
},
|
||||
}),
|
||||
}
|
||||
slashingRoot, err := prop.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
retrieved, err := db.ProposerSlashing(ctx, slashingRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, (*ethpb.ProposerSlashing)(nil), retrieved, "Expected nil proposer slashing")
|
||||
require.NoError(t, db.SaveProposerSlashing(ctx, prop))
|
||||
assert.Equal(t, true, db.HasProposerSlashing(ctx, slashingRoot), "Expected proposer slashing to exist in the db")
|
||||
retrieved, err = db.ProposerSlashing(ctx, slashingRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(prop, retrieved), "Wanted %v, received %v", prop, retrieved)
|
||||
require.NoError(t, db.deleteProposerSlashing(ctx, slashingRoot))
|
||||
assert.Equal(t, false, db.HasProposerSlashing(ctx, slashingRoot), "Expected proposer slashing to have been deleted from the db")
|
||||
}
|
||||
|
||||
func TestStore_AttesterSlashing_CRUD(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
att := ðpb.AttesterSlashing{
|
||||
Attestation_1: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 5,
|
||||
}}),
|
||||
Attestation_2: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 7,
|
||||
}})}
|
||||
slashingRoot, err := att.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
retrieved, err := db.AttesterSlashing(ctx, slashingRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, (*ethpb.AttesterSlashing)(nil), retrieved, "Expected nil attester slashing")
|
||||
require.NoError(t, db.SaveAttesterSlashing(ctx, att))
|
||||
assert.Equal(t, true, db.HasAttesterSlashing(ctx, slashingRoot), "Expected attester slashing to exist in the db")
|
||||
retrieved, err = db.AttesterSlashing(ctx, slashingRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(att, retrieved), "Wanted %v, received %v", att, retrieved)
|
||||
require.NoError(t, db.deleteAttesterSlashing(ctx, slashingRoot))
|
||||
assert.Equal(t, false, db.HasAttesterSlashing(ctx, slashingRoot), "Expected attester slashing to have been deleted from the db")
|
||||
}
|
||||
@@ -568,7 +568,7 @@ func (s *Store) slotByBlockRoot(ctx context.Context, tx *bolt.Tx, blockRoot []by
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := helpers.VerifyNilBeaconBlock(wrapper.WrappedPhase0SignedBeaconBlock(b)); err != nil {
|
||||
if err := helpers.BeaconBlockIsNil(wrapper.WrappedPhase0SignedBeaconBlock(b)); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return b.Block.Slot, nil
|
||||
|
||||
@@ -6,7 +6,7 @@ go_library(
|
||||
"log.go",
|
||||
"service.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/interop-cold-start",
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/deterministic-genesis",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
@@ -4,4 +4,4 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "interop-cold-start")
|
||||
var log = logrus.WithField("prefix", "deterministic-genesis")
|
||||
@@ -1,4 +1,4 @@
|
||||
// Package interopcoldstart allows for spinning up a deterministic
|
||||
// Package interopcoldstart allows for spinning up a deterministic-genesis
|
||||
// local chain without the need for eth1 deposits useful for
|
||||
// local client development and interoperability testing.
|
||||
package interopcoldstart
|
||||
32
beacon-chain/light/BUILD.bazel
Normal file
32
beacon-chain/light/BUILD.bazel
Normal file
@@ -0,0 +1,32 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"service.go",
|
||||
"update_comparison.go",
|
||||
"updater.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/light",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/db/iface:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/block:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
209
beacon-chain/light/service.go
Normal file
209
beacon-chain/light/service.go
Normal file
@@ -0,0 +1,209 @@
|
||||
package light
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/iface"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
syncSrv "github.com/prysmaticlabs/prysm/beacon-chain/sync"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/io/file"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
block2 "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
type UpdatesFetcher interface {
|
||||
BestUpdateForPeriod(ctx context.Context, period uint64) (*ethpb.LightClientUpdate, error)
|
||||
LatestFinalizedUpdate(ctx context.Context) *ethpb.LightClientUpdate
|
||||
LatestNonFinalizedUpdate(ctx context.Context) *ethpb.LightClientUpdate
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
StateGen stategen.StateManager
|
||||
Database iface.Database
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
FinalizationFetcher blockchain.FinalizationFetcher
|
||||
StateNotifier statefeed.Notifier
|
||||
TimeFetcher blockchain.TimeFetcher
|
||||
SyncChecker syncSrv.Checker
|
||||
}
|
||||
|
||||
type Service struct {
|
||||
cfg *Config
|
||||
cancelFunc context.CancelFunc
|
||||
prevHeadData map[[32]byte]*ethpb.SyncAttestedData
|
||||
lock sync.RWMutex
|
||||
genesisTime time.Time
|
||||
finalizedByEpoch map[types.Epoch]*ethpb.LightClientFinalizedCheckpoint
|
||||
bestUpdateByPeriod map[uint64]*ethpb.LightClientUpdate
|
||||
latestFinalizedUpdate *ethpb.LightClientUpdate
|
||||
latestNonFinalizedUpdate *ethpb.LightClientUpdate
|
||||
}
|
||||
|
||||
// New --
|
||||
func New(ctx context.Context, cfg *Config) *Service {
|
||||
return &Service{
|
||||
cfg: cfg,
|
||||
prevHeadData: make(map[[32]byte]*ethpb.SyncAttestedData),
|
||||
finalizedByEpoch: make(map[types.Epoch]*ethpb.LightClientFinalizedCheckpoint),
|
||||
bestUpdateByPeriod: make(map[uint64]*ethpb.LightClientUpdate),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) Start() {
|
||||
go s.run()
|
||||
}
|
||||
|
||||
func (s *Service) Stop() error {
|
||||
s.cancelFunc()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) Status() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) BestUpdateForPeriod(ctx context.Context, period uint64) (*ethpb.LightClientUpdate, error) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
update, ok := s.bestUpdateByPeriod[period]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no update found for period %d", period)
|
||||
}
|
||||
return update, nil
|
||||
}
|
||||
|
||||
func (s *Service) LatestFinalizedUpdate(ctx context.Context) *ethpb.LightClientUpdate {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
return s.latestFinalizedUpdate
|
||||
}
|
||||
|
||||
func (s *Service) LatestNonFinalizedUpdate(ctx context.Context) *ethpb.LightClientUpdate {
|
||||
return s.latestNonFinalizedUpdate
|
||||
}
|
||||
|
||||
func (s *Service) run() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
s.cancelFunc = cancel
|
||||
s.waitForChainInitialization(ctx)
|
||||
s.waitForSync(ctx)
|
||||
// Initialize the service from finalized (state, block) data.
|
||||
log.Info("Initializing from finalized data")
|
||||
if err := s.initializeFromFinalizedData(ctx); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Info("Beginning subscriptions")
|
||||
// Begin listening for new chain head and finalized checkpoint events.
|
||||
go s.subscribeHeadEvent(ctx)
|
||||
go s.subscribeFinalizedEvent(ctx)
|
||||
}
|
||||
|
||||
func (s *Service) waitForChainInitialization(ctx context.Context) {
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
defer close(stateChannel)
|
||||
for {
|
||||
select {
|
||||
case stateEvent := <-stateChannel:
|
||||
// Wait for us to receive the genesis time via a chain started notification.
|
||||
if stateEvent.Type == statefeed.Initialized {
|
||||
// Alternatively, if the chain has already started, we then read the genesis
|
||||
// time value from this data.
|
||||
data, ok := stateEvent.Data.(*statefeed.InitializedData)
|
||||
if !ok {
|
||||
log.Error(
|
||||
"Could not receive chain start notification, want *statefeed.ChainStartedData",
|
||||
)
|
||||
return
|
||||
}
|
||||
s.genesisTime = data.StartTime
|
||||
log.WithField("genesisTime", s.genesisTime).Info(
|
||||
"Received chain initialization event",
|
||||
)
|
||||
return
|
||||
}
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error(
|
||||
"Could not subscribe to state events",
|
||||
)
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) waitForSync(ctx context.Context) {
|
||||
slotTicker := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
|
||||
defer slotTicker.Done()
|
||||
for {
|
||||
select {
|
||||
case <-slotTicker.C():
|
||||
if slots.ToEpoch(slots.SinceGenesis(s.genesisTime)) < 6 {
|
||||
continue
|
||||
}
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) finalizedBlockOrGenesis(ctx context.Context, cpt *ethpb.Checkpoint) (block2.SignedBeaconBlock, error) {
|
||||
checkpointRoot := bytesutil.ToBytes32(cpt.Root)
|
||||
block, err := s.cfg.Database.Block(ctx, checkpointRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if block == nil || block.IsNil() {
|
||||
return s.cfg.Database.GenesisBlock(ctx)
|
||||
}
|
||||
return block, nil
|
||||
}
|
||||
|
||||
func (s *Service) finalizedStateOrGenesis(ctx context.Context, cpt *ethpb.Checkpoint) (state.BeaconState, error) {
|
||||
checkpointRoot := bytesutil.ToBytes32(cpt.Root)
|
||||
st, err := s.cfg.StateGen.StateByRoot(ctx, checkpointRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if st == nil || st.IsNil() {
|
||||
return s.cfg.Database.GenesisState(ctx)
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func (s *Service) initializeFromFinalizedData(ctx context.Context) error {
|
||||
cpt, err := s.cfg.Database.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
finalizedBlock, err := s.finalizedBlockOrGenesis(ctx, cpt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
finalizedState, err := s.finalizedStateOrGenesis(ctx, cpt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
enc, err := finalizedState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := file.WriteFile("/tmp/state.ssz", enc); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.onFinalized(ctx, finalizedBlock, finalizedState)
|
||||
}
|
||||
120
beacon-chain/light/subscribe_finalized_event.go
Normal file
120
beacon-chain/light/subscribe_finalized_event.go
Normal file
@@ -0,0 +1,120 @@
|
||||
package light
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz"
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
const (
|
||||
finalizedCheckpointStateIndex = 20
|
||||
nextSyncCommitteeStateIndex = 23
|
||||
)
|
||||
|
||||
func (s *Service) subscribeFinalizedEvent(ctx context.Context) {
|
||||
stateChan := make(chan *feed.Event, 1)
|
||||
sub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChan)
|
||||
defer sub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case ev := <-stateChan:
|
||||
if ev.Type == statefeed.FinalizedCheckpoint {
|
||||
blk, beaconState, err := s.parseFinalizedEvent(ctx, ev.Data)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
if err := s.onFinalized(ctx, blk, beaconState); err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) parseFinalizedEvent(
|
||||
ctx context.Context, eventData interface{},
|
||||
) (block.SignedBeaconBlock, state.BeaconState, error) {
|
||||
finalizedCheckpoint, ok := eventData.(*v1.EventFinalizedCheckpoint)
|
||||
if !ok {
|
||||
return nil, nil, errors.New("expected finalized checkpoint event")
|
||||
}
|
||||
checkpointRoot := bytesutil.ToBytes32(finalizedCheckpoint.Block)
|
||||
blk, err := s.cfg.Database.Block(ctx, checkpointRoot)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if blk == nil || blk.IsNil() {
|
||||
return nil, nil, err
|
||||
}
|
||||
st, err := s.cfg.StateGen.StateByRoot(ctx, checkpointRoot)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if st == nil || st.IsNil() {
|
||||
return nil, nil, err
|
||||
}
|
||||
return blk, st, nil
|
||||
}
|
||||
|
||||
func (s *Service) onFinalized(
|
||||
ctx context.Context, signedBlock block.SignedBeaconBlock, postState state.BeaconStateAltair,
|
||||
) error {
|
||||
if _, ok := postState.InnerStateUnsafe().(*ethpb.BeaconStateAltair); !ok {
|
||||
return errors.New("expected an Altair beacon state")
|
||||
}
|
||||
blk := signedBlock.Block()
|
||||
header, err := block.BeaconBlockHeaderFromBlockInterface(blk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tb, err := ssz.NewTreeBackedState(postState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proof, gIndex, err := tb.Proof(nextSyncCommitteeStateIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nextSyncCommittee, err := postState.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
root, err := postState.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nextSyncCommitteeRoot, err := nextSyncCommittee.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info("On finalized update")
|
||||
log.Infof("Header state root %#x, state hash tree root %#x", header.StateRoot, root)
|
||||
log.Infof("Generating proof against root %#x with gindex %d and leaf root %#x", root, gIndex, nextSyncCommitteeRoot)
|
||||
log.Info("-----")
|
||||
log.Infof("Proof with length %d", len(proof))
|
||||
for _, elem := range proof {
|
||||
log.Infof("%#x", bytesutil.Trunc(elem))
|
||||
}
|
||||
log.Info("-----")
|
||||
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
currentEpoch := slots.ToEpoch(blk.Slot())
|
||||
s.finalizedByEpoch[currentEpoch] = ðpb.LightClientFinalizedCheckpoint{
|
||||
Header: header,
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
NextSyncCommitteeBranch: proof,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
154
beacon-chain/light/subscribe_head_event.go
Normal file
154
beacon-chain/light/subscribe_head_event.go
Normal file
@@ -0,0 +1,154 @@
|
||||
package light
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz"
|
||||
"github.com/prysmaticlabs/prysm/network/forks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
func (s *Service) subscribeHeadEvent(ctx context.Context) {
|
||||
stateChan := make(chan *feed.Event, 1)
|
||||
sub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChan)
|
||||
defer sub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case ev := <-stateChan:
|
||||
if ev.Type == statefeed.NewHead {
|
||||
head, beaconState, err := s.getChainHeadAndState(ctx)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
if err := s.onHead(ctx, head, beaconState); err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
case <-sub.Err():
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) getChainHeadAndState(ctx context.Context) (block.SignedBeaconBlock, state.BeaconState, error) {
|
||||
head, err := s.cfg.HeadFetcher.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if head == nil || head.IsNil() {
|
||||
return nil, nil, errors.New("head block is nil")
|
||||
}
|
||||
st, err := s.cfg.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, errors.New("head state is nil")
|
||||
}
|
||||
if st == nil || st.IsNil() {
|
||||
return nil, nil, err
|
||||
}
|
||||
return head, st, nil
|
||||
}
|
||||
|
||||
func (s *Service) onHead(ctx context.Context, head block.SignedBeaconBlock, postState state.BeaconStateAltair) error {
|
||||
if _, ok := postState.InnerStateUnsafe().(*ethpb.BeaconStateAltair); !ok {
|
||||
return errors.New("expected an Altair beacon state")
|
||||
}
|
||||
blk := head.Block()
|
||||
tb, err := ssz.NewTreeBackedState(postState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
header, err := block.BeaconBlockHeaderFromBlockInterface(blk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
finalityBranch, _, err := tb.Proof(finalizedCheckpointStateIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nextSyncCommitteeBranch, gIndex, err := tb.Proof(nextSyncCommitteeStateIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stRoot, err := postState.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blkRoot, err := blk.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof("On head, generating sync committee proof for root %#x and index %d, block root %#x, header state root %#x", stRoot[:], gIndex, blkRoot, header.StateRoot)
|
||||
nextSyncCommittee, err := postState.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.lock.Lock()
|
||||
s.prevHeadData[blkRoot] = ðpb.SyncAttestedData{
|
||||
Header: header,
|
||||
FinalityCheckpoint: postState.FinalizedCheckpoint(),
|
||||
FinalityBranch: finalityBranch,
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
NextSyncCommitteeBranch: nextSyncCommitteeBranch,
|
||||
}
|
||||
s.lock.Unlock()
|
||||
syncAttestedBlockRoot, err := helpers.BlockRootAtSlot(postState, postState.Slot()-1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fork, err := forks.Fork(slots.ToEpoch(blk.Slot()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
syncAggregate, err := blk.Body().SyncAggregate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sigData := &signatureData{
|
||||
slot: blk.Slot(),
|
||||
forkVersion: fork.CurrentVersion,
|
||||
syncAggregate: syncAggregate,
|
||||
}
|
||||
|
||||
s.lock.Lock()
|
||||
syncAttestedData, ok := s.prevHeadData[bytesutil.ToBytes32(syncAttestedBlockRoot)]
|
||||
if !ok {
|
||||
s.lock.Unlock()
|
||||
log.Info("Got useless data, skipping")
|
||||
return nil // Useless data.
|
||||
}
|
||||
s.lock.Unlock()
|
||||
commmitteePeriodWithFinalized, err := s.persistBestFinalizedUpdate(ctx, syncAttestedData, sigData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.persistBestNonFinalizedUpdate(ctx, syncAttestedData, sigData, commmitteePeriodWithFinalized); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.lock.Lock()
|
||||
if len(s.prevHeadData) > PrevDataMaxSize {
|
||||
for k := range s.prevHeadData {
|
||||
delete(s.prevHeadData, k)
|
||||
if len(s.prevHeadData) <= PrevDataMaxSize {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
s.lock.Unlock()
|
||||
return nil
|
||||
}
|
||||
50
beacon-chain/light/update_comparison.go
Normal file
50
beacon-chain/light/update_comparison.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package light
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func isBetterUpdate(prevUpdate *ethpb.LightClientUpdate, newUpdate *ethpb.LightClientUpdate) bool {
|
||||
prevIsFinalized := isFinalizedUpdate(prevUpdate)
|
||||
newIsFinalized := isFinalizedUpdate(newUpdate)
|
||||
// newUpdate becomes finalized, it's better.
|
||||
if newIsFinalized && !prevIsFinalized {
|
||||
return true
|
||||
}
|
||||
// newUpdate is no longer finalized, it's worse.
|
||||
if !newIsFinalized && prevIsFinalized {
|
||||
return false
|
||||
}
|
||||
return hasMoreBits(newUpdate, prevUpdate)
|
||||
}
|
||||
|
||||
func isLatestBestFinalizedUpdate(prevUpdate *ethpb.LightClientUpdate, newUpdate *ethpb.LightClientUpdate) bool {
|
||||
if newUpdate.FinalityHeader.Slot > prevUpdate.FinalityHeader.Slot {
|
||||
return true
|
||||
}
|
||||
if newUpdate.FinalityHeader.Slot < prevUpdate.FinalityHeader.Slot {
|
||||
return false
|
||||
}
|
||||
return hasMoreBits(newUpdate, prevUpdate)
|
||||
}
|
||||
|
||||
func isLatestBestNonFinalizedUpdate(prevUpdate *ethpb.LightClientUpdate, newUpdate *ethpb.LightClientUpdate) bool {
|
||||
if newUpdate.Header.Slot > prevUpdate.Header.Slot {
|
||||
return true
|
||||
}
|
||||
if newUpdate.Header.Slot < prevUpdate.Header.Slot {
|
||||
return false
|
||||
}
|
||||
return hasMoreBits(newUpdate, prevUpdate)
|
||||
}
|
||||
|
||||
func isFinalizedUpdate(update *ethpb.LightClientUpdate) bool {
|
||||
return !bytes.Equal(params.BeaconConfig().ZeroHash[:], update.FinalityHeader.StateRoot)
|
||||
}
|
||||
|
||||
func hasMoreBits(a *ethpb.LightClientUpdate, b *ethpb.LightClientUpdate) bool {
|
||||
return a.SyncCommitteeBits.Count() > b.SyncCommitteeBits.Count()
|
||||
}
|
||||
134
beacon-chain/light/updater.go
Normal file
134
beacon-chain/light/updater.go
Normal file
@@ -0,0 +1,134 @@
|
||||
package light
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Precomputed values for generalized indices.
|
||||
const (
|
||||
FinalizedRootIndex = 105
|
||||
NextSyncCommitteeIndex = 55
|
||||
PrevDataMaxSize = 64
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "light")
|
||||
|
||||
type signatureData struct {
|
||||
slot types.Slot
|
||||
forkVersion []byte
|
||||
syncAggregate *ethpb.SyncAggregate
|
||||
}
|
||||
|
||||
func (s *Service) persistBestFinalizedUpdate(ctx context.Context, syncAttestedData *ethpb.SyncAttestedData, sigData *signatureData) (uint64, error) {
|
||||
finalizedEpoch := syncAttestedData.FinalityCheckpoint.Epoch
|
||||
|
||||
s.lock.RLock()
|
||||
finalizedData := s.finalizedByEpoch[finalizedEpoch]
|
||||
s.lock.RUnlock()
|
||||
|
||||
if finalizedData == nil {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
committeePeriod := slots.SyncCommitteePeriod(slots.ToEpoch(syncAttestedData.Header.Slot))
|
||||
signaturePeriod := slots.SyncCommitteePeriod(slots.ToEpoch(sigData.slot))
|
||||
if committeePeriod != signaturePeriod {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
newUpdate := ðpb.LightClientUpdate{
|
||||
Header: finalizedData.Header,
|
||||
NextSyncCommittee: finalizedData.NextSyncCommittee,
|
||||
NextSyncCommitteeBranch: finalizedData.NextSyncCommitteeBranch,
|
||||
FinalityHeader: syncAttestedData.Header,
|
||||
FinalityBranch: syncAttestedData.FinalityBranch,
|
||||
SyncCommitteeBits: sigData.syncAggregate.SyncCommitteeBits,
|
||||
SyncCommitteeSignature: sigData.syncAggregate.SyncCommitteeSignature,
|
||||
ForkVersion: sigData.forkVersion,
|
||||
}
|
||||
|
||||
s.lock.RLock()
|
||||
prevBestUpdate := s.bestUpdateByPeriod[committeePeriod]
|
||||
s.lock.RUnlock()
|
||||
|
||||
if prevBestUpdate == nil || isBetterUpdate(prevBestUpdate, newUpdate) {
|
||||
s.lock.Lock()
|
||||
s.bestUpdateByPeriod[committeePeriod] = newUpdate
|
||||
s.lock.Unlock()
|
||||
}
|
||||
|
||||
s.lock.RLock()
|
||||
prevLatestUpdate := s.latestFinalizedUpdate
|
||||
s.lock.RUnlock()
|
||||
|
||||
if prevLatestUpdate == nil || isLatestBestFinalizedUpdate(prevLatestUpdate, newUpdate) {
|
||||
s.lock.Lock()
|
||||
s.latestFinalizedUpdate = newUpdate
|
||||
s.lock.Unlock()
|
||||
log.Info("Putting latest best finalized update")
|
||||
rt, err := newUpdate.NextSyncCommittee.HashTreeRoot()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
log.Infof("Header state root %#x, state hash tree root %#x", newUpdate.Header.StateRoot, newUpdate.Header.StateRoot)
|
||||
log.Infof("Generating proof against root %#x with gindex %d and leaf root %#x", newUpdate.Header.StateRoot, 55, rt)
|
||||
log.Info("-----")
|
||||
log.Infof("Proof with length %d", len(newUpdate.NextSyncCommitteeBranch))
|
||||
for _, elem := range newUpdate.NextSyncCommitteeBranch {
|
||||
log.Infof("%#x", bytesutil.Trunc(elem))
|
||||
}
|
||||
log.Info("-----")
|
||||
}
|
||||
return committeePeriod, nil
|
||||
}
|
||||
|
||||
func (s *Service) persistBestNonFinalizedUpdate(ctx context.Context, syncAttestedData *ethpb.SyncAttestedData, sigData *signatureData, period uint64) error {
|
||||
committeePeriod := slots.SyncCommitteePeriod(slots.ToEpoch(syncAttestedData.Header.Slot))
|
||||
signaturePeriod := slots.SyncCommitteePeriod(slots.ToEpoch(sigData.slot))
|
||||
if committeePeriod != signaturePeriod {
|
||||
return nil
|
||||
}
|
||||
|
||||
newUpdate := ðpb.LightClientUpdate{
|
||||
Header: syncAttestedData.Header,
|
||||
NextSyncCommittee: syncAttestedData.NextSyncCommittee,
|
||||
NextSyncCommitteeBranch: syncAttestedData.NextSyncCommitteeBranch,
|
||||
FinalityHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncCommitteeBits: sigData.syncAggregate.SyncCommitteeBits,
|
||||
SyncCommitteeSignature: sigData.syncAggregate.SyncCommitteeSignature,
|
||||
ForkVersion: sigData.forkVersion,
|
||||
}
|
||||
|
||||
// Optimization: If there's already a finalized update for this committee period, no need to
|
||||
// create a non-finalized update>
|
||||
if committeePeriod != period {
|
||||
s.lock.RLock()
|
||||
prevBestUpdate := s.bestUpdateByPeriod[committeePeriod]
|
||||
s.lock.RUnlock()
|
||||
if prevBestUpdate == nil || isBetterUpdate(prevBestUpdate, newUpdate) {
|
||||
s.lock.Lock()
|
||||
s.bestUpdateByPeriod[committeePeriod] = newUpdate
|
||||
s.lock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// Store the latest update here overall. Not checking it's the best
|
||||
s.lock.RLock()
|
||||
prevLatestUpdate := s.latestNonFinalizedUpdate
|
||||
s.lock.RUnlock()
|
||||
|
||||
if prevLatestUpdate == nil || isLatestBestNonFinalizedUpdate(prevLatestUpdate, newUpdate) {
|
||||
// TODO: Don't store nextCommittee, that can be fetched through getBestUpdates()
|
||||
s.lock.Lock()
|
||||
s.latestNonFinalizedUpdate = newUpdate
|
||||
s.lock.Unlock()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
58
beacon-chain/monitor/BUILD.bazel
Normal file
58
beacon-chain/monitor/BUILD.bazel
Normal file
@@ -0,0 +1,58 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"metrics.go",
|
||||
"process_attestation.go",
|
||||
"process_block.go",
|
||||
"process_exit.go",
|
||||
"service.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/monitor",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/block:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"process_attestation_test.go",
|
||||
"process_block_test.go",
|
||||
"process_exit_test.go",
|
||||
"service_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/wrapper:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
)
|
||||
7
beacon-chain/monitor/doc.go
Normal file
7
beacon-chain/monitor/doc.go
Normal file
@@ -0,0 +1,7 @@
|
||||
/*
|
||||
Package monitor defines a runtime service which receives
|
||||
notifications triggered by events related to performance of tracked
|
||||
validating keys. It then logs and emits metrics for a user to keep finely
|
||||
detailed performance measures.
|
||||
*/
|
||||
package monitor
|
||||
70
beacon-chain/monitor/metrics.go
Normal file
70
beacon-chain/monitor/metrics.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
log = logrus.WithField("prefix", "monitor")
|
||||
// TODO: The Prometheus gauge vectors and counters in this package deprecate the
|
||||
// corresponding gauge vectors and counters in the validator client.
|
||||
|
||||
// inclusionSlotGauge used to track attestation inclusion distance
|
||||
inclusionSlotGauge = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "monitor",
|
||||
Name: "inclusion_slot",
|
||||
Help: "Attestations inclusion slot",
|
||||
},
|
||||
[]string{
|
||||
"validator_index",
|
||||
},
|
||||
)
|
||||
// timelyHeadCounter used to track attestation timely head flags
|
||||
timelyHeadCounter = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "monitor",
|
||||
Name: "timely_head",
|
||||
Help: "Attestation timely Head flag",
|
||||
},
|
||||
[]string{
|
||||
"validator_index",
|
||||
},
|
||||
)
|
||||
// timelyTargetCounter used to track attestation timely head flags
|
||||
timelyTargetCounter = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "monitor",
|
||||
Name: "timely_target",
|
||||
Help: "Attestation timely Target flag",
|
||||
},
|
||||
[]string{
|
||||
"validator_index",
|
||||
},
|
||||
)
|
||||
// timelySourceCounter used to track attestation timely head flags
|
||||
timelySourceCounter = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "monitor",
|
||||
Name: "timely_source",
|
||||
Help: "Attestation timely Source flag",
|
||||
},
|
||||
[]string{
|
||||
"validator_index",
|
||||
},
|
||||
)
|
||||
|
||||
// aggregationCounter used to track aggregations
|
||||
aggregationCounter = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "monitor",
|
||||
Name: "aggregations",
|
||||
Help: "Number of aggregation duties performed",
|
||||
},
|
||||
[]string{
|
||||
"validator_index",
|
||||
},
|
||||
)
|
||||
)
|
||||
218
beacon-chain/monitor/process_attestation.go
Normal file
218
beacon-chain/monitor/process_attestation.go
Normal file
@@ -0,0 +1,218 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// updatedPerformanceFromTrackedVal returns true if the validator is tracked and if the
|
||||
// given slot is different than the last attested slot from this validator.
|
||||
func (s *Service) updatedPerformanceFromTrackedVal(idx types.ValidatorIndex, slot types.Slot) bool {
|
||||
if !s.TrackedIndex(types.ValidatorIndex(idx)) {
|
||||
return false
|
||||
}
|
||||
|
||||
if lp, ok := s.latestPerformance[types.ValidatorIndex(idx)]; ok {
|
||||
return lp.attestedSlot != slot
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// attestingIndices returns the indices of validators that appear in the
|
||||
// given aggregated atestation.
|
||||
func attestingIndices(ctx context.Context, state state.BeaconState, att *ethpb.Attestation) ([]uint64, error) {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return attestation.AttestingIndices(att.AggregationBits, committee)
|
||||
}
|
||||
|
||||
// logMessageTimelyFlagsForIndex returns the log message with the basic
|
||||
// performance indicators for the attestation (head, source, target)
|
||||
func logMessageTimelyFlagsForIndex(idx types.ValidatorIndex, data *ethpb.AttestationData) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"ValidatorIndex": idx,
|
||||
"Slot": data.Slot,
|
||||
"Source": fmt.Sprintf("%#x", bytesutil.Trunc(data.Source.Root)),
|
||||
"Target": fmt.Sprintf("%#x", bytesutil.Trunc(data.Target.Root)),
|
||||
"Head": fmt.Sprintf("%#x", bytesutil.Trunc(data.BeaconBlockRoot)),
|
||||
}
|
||||
}
|
||||
|
||||
// processAttestations logs the event that one of our tracked validators'
|
||||
// attestations was included in a block
|
||||
func (s *Service) processAttestations(ctx context.Context, state state.BeaconState, blk block.BeaconBlock) {
|
||||
if blk == nil || blk.Body() == nil {
|
||||
return
|
||||
}
|
||||
for _, attestation := range blk.Body().Attestations() {
|
||||
s.processIncludedAttestation(ctx, state, attestation)
|
||||
}
|
||||
}
|
||||
|
||||
// processIncludedAttestation logs in the event that one of our tracked validators'
|
||||
// appears in the attesting indices and this included attestation was not included
|
||||
// before.
|
||||
func (s *Service) processIncludedAttestation(ctx context.Context, state state.BeaconState, att *ethpb.Attestation) {
|
||||
attestingIndices, err := attestingIndices(ctx, state, att)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get attesting indices")
|
||||
return
|
||||
}
|
||||
for _, idx := range attestingIndices {
|
||||
if s.updatedPerformanceFromTrackedVal(types.ValidatorIndex(idx), att.Data.Slot) {
|
||||
logFields := logMessageTimelyFlagsForIndex(types.ValidatorIndex(idx), att.Data)
|
||||
balance, err := state.BalanceAtIndex(types.ValidatorIndex(idx))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get balance")
|
||||
return
|
||||
}
|
||||
|
||||
aggregatedPerf := s.aggregatedPerformance[types.ValidatorIndex(idx)]
|
||||
aggregatedPerf.totalAttestedCount++
|
||||
aggregatedPerf.totalRequestedCount++
|
||||
|
||||
latestPerf := s.latestPerformance[types.ValidatorIndex(idx)]
|
||||
balanceChg := balance - latestPerf.balance
|
||||
latestPerf.balanceChange = balanceChg
|
||||
latestPerf.balance = balance
|
||||
latestPerf.attestedSlot = att.Data.Slot
|
||||
latestPerf.inclusionSlot = state.Slot()
|
||||
inclusionSlotGauge.WithLabelValues(fmt.Sprintf("%d", idx)).Set(float64(latestPerf.inclusionSlot))
|
||||
aggregatedPerf.totalDistance += uint64(latestPerf.inclusionSlot - latestPerf.attestedSlot)
|
||||
|
||||
if state.Version() == version.Altair {
|
||||
targetIdx := params.BeaconConfig().TimelyTargetFlagIndex
|
||||
sourceIdx := params.BeaconConfig().TimelySourceFlagIndex
|
||||
headIdx := params.BeaconConfig().TimelyHeadFlagIndex
|
||||
|
||||
var participation []byte
|
||||
if slots.ToEpoch(latestPerf.inclusionSlot) ==
|
||||
slots.ToEpoch(latestPerf.attestedSlot) {
|
||||
participation, err = state.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get current epoch participation")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
participation, err = state.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get previous epoch participation")
|
||||
return
|
||||
}
|
||||
}
|
||||
flags := participation[idx]
|
||||
hasFlag, err := altair.HasValidatorFlag(flags, sourceIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get timely Source flag")
|
||||
return
|
||||
}
|
||||
latestPerf.timelySource = hasFlag
|
||||
hasFlag, err = altair.HasValidatorFlag(flags, headIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get timely Head flag")
|
||||
return
|
||||
}
|
||||
latestPerf.timelyHead = hasFlag
|
||||
hasFlag, err = altair.HasValidatorFlag(flags, targetIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get timely Target flag")
|
||||
return
|
||||
}
|
||||
latestPerf.timelyTarget = hasFlag
|
||||
|
||||
if latestPerf.timelySource {
|
||||
timelySourceCounter.WithLabelValues(fmt.Sprintf("%d", idx)).Inc()
|
||||
aggregatedPerf.totalCorrectSource++
|
||||
}
|
||||
if latestPerf.timelyHead {
|
||||
timelyHeadCounter.WithLabelValues(fmt.Sprintf("%d", idx)).Inc()
|
||||
aggregatedPerf.totalCorrectHead++
|
||||
}
|
||||
if latestPerf.timelyTarget {
|
||||
timelyTargetCounter.WithLabelValues(fmt.Sprintf("%d", idx)).Inc()
|
||||
aggregatedPerf.totalCorrectTarget++
|
||||
}
|
||||
}
|
||||
logFields["CorrectHead"] = latestPerf.timelyHead
|
||||
logFields["CorrectSource"] = latestPerf.timelySource
|
||||
logFields["CorrectTarget"] = latestPerf.timelyTarget
|
||||
logFields["InclusionSlot"] = latestPerf.inclusionSlot
|
||||
logFields["NewBalance"] = balance
|
||||
logFields["BalanceChange"] = balanceChg
|
||||
|
||||
s.latestPerformance[types.ValidatorIndex(idx)] = latestPerf
|
||||
s.aggregatedPerformance[types.ValidatorIndex(idx)] = aggregatedPerf
|
||||
log.WithFields(logFields).Info("Attestation included")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// processUnaggregatedAttestation logs when the beacon node sees an unaggregated attestation from one of our
|
||||
// tracked validators
|
||||
func (s *Service) processUnaggregatedAttestation(ctx context.Context, att *ethpb.Attestation) {
|
||||
root := bytesutil.ToBytes32(att.Data.BeaconBlockRoot)
|
||||
state := s.config.StateGen.StateByRootIfCachedNoCopy(root)
|
||||
if state == nil {
|
||||
log.Debug("Skipping unaggregated attestation due to state not found in cache")
|
||||
return
|
||||
}
|
||||
attestingIndices, err := attestingIndices(ctx, state, att)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get attesting indices")
|
||||
return
|
||||
}
|
||||
for _, idx := range attestingIndices {
|
||||
if s.updatedPerformanceFromTrackedVal(types.ValidatorIndex(idx), att.Data.Slot) {
|
||||
logFields := logMessageTimelyFlagsForIndex(types.ValidatorIndex(idx), att.Data)
|
||||
log.WithFields(logFields).Info("Processed unaggregated attestation")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// processAggregatedAttestation logs when we see an aggregation from one of our tracked validators or an aggregated
|
||||
// attestation from one of our tracked validators
|
||||
func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.AggregateAttestationAndProof) {
|
||||
if s.TrackedIndex(att.AggregatorIndex) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"ValidatorIndex": att.AggregatorIndex,
|
||||
}).Info("Processed attestation aggregation")
|
||||
aggregatedPerf := s.aggregatedPerformance[att.AggregatorIndex]
|
||||
aggregatedPerf.totalAggregations++
|
||||
s.aggregatedPerformance[att.AggregatorIndex] = aggregatedPerf
|
||||
aggregationCounter.WithLabelValues(fmt.Sprintf("%d", att.AggregatorIndex)).Inc()
|
||||
}
|
||||
|
||||
var root [32]byte
|
||||
copy(root[:], att.Aggregate.Data.BeaconBlockRoot)
|
||||
state := s.config.StateGen.StateByRootIfCachedNoCopy(root)
|
||||
if state == nil {
|
||||
log.Debug("Skipping agregated attestation due to state not found in cache")
|
||||
return
|
||||
}
|
||||
attestingIndices, err := attestingIndices(ctx, state, att.Aggregate)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get attesting indices")
|
||||
return
|
||||
}
|
||||
for _, idx := range attestingIndices {
|
||||
if s.updatedPerformanceFromTrackedVal(types.ValidatorIndex(idx), att.Aggregate.Data.Slot) {
|
||||
logFields := logMessageTimelyFlagsForIndex(types.ValidatorIndex(idx), att.Aggregate.Data)
|
||||
log.WithFields(logFields).Info("Processed aggregated attestation")
|
||||
}
|
||||
}
|
||||
}
|
||||
287
beacon-chain/monitor/process_attestation_test.go
Normal file
287
beacon-chain/monitor/process_attestation_test.go
Normal file
@@ -0,0 +1,287 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func setupService(t *testing.T) *Service {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
trackedVals := map[types.ValidatorIndex]interface{}{
|
||||
1: nil,
|
||||
2: nil,
|
||||
12: nil,
|
||||
}
|
||||
latestPerformance := map[types.ValidatorIndex]ValidatorLatestPerformance{
|
||||
1: {
|
||||
balance: 32000000000,
|
||||
},
|
||||
2: {
|
||||
balance: 32000000000,
|
||||
},
|
||||
12: {
|
||||
balance: 31900000000,
|
||||
},
|
||||
}
|
||||
|
||||
aggregatedPerformance := map[types.ValidatorIndex]ValidatorAggregatedPerformance{
|
||||
1: {},
|
||||
2: {},
|
||||
12: {},
|
||||
}
|
||||
|
||||
return &Service{
|
||||
config: &ValidatorMonitorConfig{
|
||||
StateGen: stategen.New(beaconDB),
|
||||
TrackedValidators: trackedVals,
|
||||
},
|
||||
latestPerformance: latestPerformance,
|
||||
aggregatedPerformance: aggregatedPerformance,
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAttestingIndices(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 256)
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b11, 0b1},
|
||||
}
|
||||
attestingIndices, err := attestingIndices(ctx, beaconState, att)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, attestingIndices, []uint64{0xc, 0x2})
|
||||
|
||||
}
|
||||
|
||||
func TestProcessIncludedAttestationTwoTracked(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
require.NoError(t, state.SetSlot(2))
|
||||
require.NoError(t, state.SetCurrentParticipationBits(bytes.Repeat([]byte{0xff}, 13)))
|
||||
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b11, 0b1},
|
||||
}
|
||||
s.processIncludedAttestation(context.Background(), state, att)
|
||||
wanted1 := "\"Attestation included\" BalanceChange=0 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
|
||||
wanted2 := "\"Attestation included\" BalanceChange=100000000 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
|
||||
require.LogsContain(t, hook, wanted1)
|
||||
require.LogsContain(t, hook, wanted2)
|
||||
}
|
||||
|
||||
func TestProcessUnaggregatedAttestationStateNotCached(t *testing.T) {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
|
||||
s := setupService(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
require.NoError(t, state.SetSlot(2))
|
||||
header := state.LatestBlockHeader()
|
||||
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
require.NoError(t, state.SetCurrentParticipationBits(participation))
|
||||
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: header.GetStateRoot(),
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b11, 0b1},
|
||||
}
|
||||
s.processUnaggregatedAttestation(ctx, att)
|
||||
require.LogsContain(t, hook, "Skipping unaggregated attestation due to state not found in cache")
|
||||
logrus.SetLevel(logrus.InfoLevel)
|
||||
}
|
||||
|
||||
func TestProcessUnaggregatedAttestationStateCached(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
s := setupService(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
require.NoError(t, state.SetCurrentParticipationBits(participation))
|
||||
|
||||
root := [32]byte{}
|
||||
copy(root[:], "hello-world")
|
||||
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: root[:],
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: root[:],
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root[:],
|
||||
},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b11, 0b1},
|
||||
}
|
||||
require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
|
||||
s.processUnaggregatedAttestation(context.Background(), att)
|
||||
wanted1 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
|
||||
wanted2 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
|
||||
require.LogsContain(t, hook, wanted1)
|
||||
require.LogsContain(t, hook, wanted2)
|
||||
}
|
||||
|
||||
func TestProcessAggregatedAttestationStateNotCached(t *testing.T) {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
|
||||
s := setupService(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
require.NoError(t, state.SetSlot(2))
|
||||
header := state.LatestBlockHeader()
|
||||
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
require.NoError(t, state.SetCurrentParticipationBits(participation))
|
||||
|
||||
att := ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 2,
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: header.GetStateRoot(),
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b11, 0b1},
|
||||
},
|
||||
}
|
||||
s.processAggregatedAttestation(ctx, att)
|
||||
require.LogsContain(t, hook, "\"Processed attestation aggregation\" ValidatorIndex=2 prefix=monitor")
|
||||
require.LogsContain(t, hook, "Skipping agregated attestation due to state not found in cache")
|
||||
logrus.SetLevel(logrus.InfoLevel)
|
||||
}
|
||||
|
||||
func TestProcessAggregatedAttestationStateCached(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
s := setupService(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
require.NoError(t, state.SetCurrentParticipationBits(participation))
|
||||
|
||||
root := [32]byte{}
|
||||
copy(root[:], "hello-world")
|
||||
|
||||
att := ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 2,
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: root[:],
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: root[:],
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root[:],
|
||||
},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b10, 0b1},
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
|
||||
s.processAggregatedAttestation(ctx, att)
|
||||
require.LogsContain(t, hook, "\"Processed attestation aggregation\" ValidatorIndex=2 prefix=monitor")
|
||||
require.LogsContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor")
|
||||
require.LogsDoNotContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor")
|
||||
}
|
||||
|
||||
func TestProcessAttestations(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
ctx := context.Background()
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
require.NoError(t, state.SetSlot(2))
|
||||
require.NoError(t, state.SetCurrentParticipationBits(bytes.Repeat([]byte{0xff}, 13)))
|
||||
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b11, 0b1},
|
||||
}
|
||||
|
||||
block := ðpb.BeaconBlockAltair{
|
||||
Slot: 2,
|
||||
Body: ðpb.BeaconBlockBodyAltair{
|
||||
Attestations: []*ethpb.Attestation{att},
|
||||
},
|
||||
}
|
||||
|
||||
wrappedBlock, err := wrapper.WrappedAltairBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
s.processAttestations(ctx, state, wrappedBlock)
|
||||
wanted1 := "\"Attestation included\" BalanceChange=0 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
|
||||
wanted2 := "\"Attestation included\" BalanceChange=100000000 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
|
||||
require.LogsContain(t, hook, wanted1)
|
||||
require.LogsContain(t, hook, wanted2)
|
||||
|
||||
}
|
||||
47
beacon-chain/monitor/process_block.go
Normal file
47
beacon-chain/monitor/process_block.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// processSlashings logs the event of one of our tracked validators was slashed
|
||||
func (s *Service) processSlashings(blk block.BeaconBlock) {
|
||||
for _, slashing := range blk.Body().ProposerSlashings() {
|
||||
idx := slashing.Header_1.Header.ProposerIndex
|
||||
if s.TrackedIndex(idx) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"ProposerIndex": idx,
|
||||
"Slot:": blk.Slot(),
|
||||
"SlashingSlot": slashing.Header_1.Header.Slot,
|
||||
"Root1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_1.Header.BodyRoot)),
|
||||
"Root2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_2.Header.BodyRoot)),
|
||||
}).Info("Proposer slashing was included")
|
||||
}
|
||||
}
|
||||
|
||||
for _, slashing := range blk.Body().AttesterSlashings() {
|
||||
for _, idx := range blocks.SlashableAttesterIndices(slashing) {
|
||||
if s.TrackedIndex(types.ValidatorIndex(idx)) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"AttesterIndex": idx,
|
||||
"Slot:": blk.Slot(),
|
||||
"Slot1": slashing.Attestation_1.Data.Slot,
|
||||
"Root1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_1.Data.BeaconBlockRoot)),
|
||||
"SourceEpoch1": slashing.Attestation_1.Data.Source.Epoch,
|
||||
"TargetEpoch1": slashing.Attestation_1.Data.Target.Epoch,
|
||||
"Slot2": slashing.Attestation_2.Data.Slot,
|
||||
"Root2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_2.Data.BeaconBlockRoot)),
|
||||
"SourceEpoch2": slashing.Attestation_2.Data.Source.Epoch,
|
||||
"TargetEpoch2": slashing.Attestation_2.Data.Target.Epoch,
|
||||
}).Info("Attester slashing was included")
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
131
beacon-chain/monitor/process_block_test.go
Normal file
131
beacon-chain/monitor/process_block_test.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestProcessSlashings(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
block *ethpb.BeaconBlock
|
||||
wantedErr string
|
||||
}{
|
||||
{
|
||||
name: "Proposer slashing a tracked index",
|
||||
block: ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
ProposerSlashings: []*ethpb.ProposerSlashing{
|
||||
{
|
||||
Header_1: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: 2,
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch + 1,
|
||||
},
|
||||
},
|
||||
Header_2: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: 2,
|
||||
Slot: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantedErr: "\"Proposer slashing was included\" ProposerIndex=2",
|
||||
},
|
||||
{
|
||||
name: "Proposer slashing an untracked index",
|
||||
block: ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
ProposerSlashings: []*ethpb.ProposerSlashing{
|
||||
{
|
||||
Header_1: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: 3,
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch + 4,
|
||||
},
|
||||
},
|
||||
Header_2: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: 3,
|
||||
Slot: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantedErr: "",
|
||||
},
|
||||
{
|
||||
name: "Attester slashing a tracked index",
|
||||
block: ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
AttesterSlashings: []*ethpb.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{1, 3, 4},
|
||||
}),
|
||||
Attestation_2: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: []uint64{1, 5, 6},
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantedErr: "\"Attester slashing was included\" AttesterIndex=1",
|
||||
},
|
||||
{
|
||||
name: "Attester slashing untracked index",
|
||||
block: ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
AttesterSlashings: []*ethpb.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{1, 3, 4},
|
||||
}),
|
||||
Attestation_2: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: []uint64{3, 5, 6},
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantedErr: "",
|
||||
}}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := &Service{
|
||||
config: &ValidatorMonitorConfig{
|
||||
TrackedValidators: map[types.ValidatorIndex]interface{}{
|
||||
1: nil,
|
||||
2: nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
s.processSlashings(wrapper.WrappedPhase0BeaconBlock(tt.block))
|
||||
if tt.wantedErr != "" {
|
||||
require.LogsContain(t, hook, tt.wantedErr)
|
||||
} else {
|
||||
require.LogsDoNotContain(t, hook, "slashing")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
31
beacon-chain/monitor/process_exit.go
Normal file
31
beacon-chain/monitor/process_exit.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// processExitsFromBlock logs the event of one of our tracked validators' exit was
|
||||
// included in a block
|
||||
func (s *Service) processExitsFromBlock(blk block.BeaconBlock) {
|
||||
for _, exit := range blk.Body().VoluntaryExits() {
|
||||
idx := exit.Exit.ValidatorIndex
|
||||
if s.TrackedIndex(idx) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"ValidatorIndex": idx,
|
||||
"Slot": blk.Slot(),
|
||||
}).Info("Voluntary exit was included")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// processExit logs the event of one of our tracked validators' exit was processed
|
||||
func (s *Service) processExit(exit *ethpb.SignedVoluntaryExit) {
|
||||
idx := exit.Exit.ValidatorIndex
|
||||
if s.TrackedIndex(idx) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"ValidatorIndex": idx,
|
||||
}).Info("Voluntary exit was processed")
|
||||
}
|
||||
}
|
||||
126
beacon-chain/monitor/process_exit_test.go
Normal file
126
beacon-chain/monitor/process_exit_test.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestProcessExitsFromBlockTrackedIndices(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := &Service{
|
||||
config: &ValidatorMonitorConfig{
|
||||
TrackedValidators: map[types.ValidatorIndex]interface{}{
|
||||
1: nil,
|
||||
2: nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
exits := []*ethpb.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: 3,
|
||||
Epoch: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: 2,
|
||||
Epoch: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
block := ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
VoluntaryExits: exits,
|
||||
},
|
||||
}
|
||||
|
||||
s.processExitsFromBlock(wrapper.WrappedPhase0BeaconBlock(block))
|
||||
require.LogsContain(t, hook, "\"Voluntary exit was included\" Slot=0 ValidatorIndex=2")
|
||||
}
|
||||
|
||||
func TestProcessExitsFromBlockUntrackedIndices(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := &Service{
|
||||
config: &ValidatorMonitorConfig{
|
||||
TrackedValidators: map[types.ValidatorIndex]interface{}{
|
||||
1: nil,
|
||||
2: nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
exits := []*ethpb.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: 3,
|
||||
Epoch: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: 4,
|
||||
Epoch: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
block := ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
VoluntaryExits: exits,
|
||||
},
|
||||
}
|
||||
|
||||
s.processExitsFromBlock(wrapper.WrappedPhase0BeaconBlock(block))
|
||||
require.LogsDoNotContain(t, hook, "\"Voluntary exit was included\"")
|
||||
}
|
||||
|
||||
func TestProcessExitP2PTrackedIndices(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := &Service{
|
||||
config: &ValidatorMonitorConfig{
|
||||
TrackedValidators: map[types.ValidatorIndex]interface{}{
|
||||
1: nil,
|
||||
2: nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
exit := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: 1,
|
||||
Epoch: 1,
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
s.processExit(exit)
|
||||
require.LogsContain(t, hook, "\"Voluntary exit was processed\" ValidatorIndex=1")
|
||||
}
|
||||
|
||||
func TestProcessExitP2PUntrackedIndices(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := &Service{
|
||||
config: &ValidatorMonitorConfig{
|
||||
TrackedValidators: map[types.ValidatorIndex]interface{}{
|
||||
1: nil,
|
||||
2: nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
exit := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: 3,
|
||||
Epoch: 1,
|
||||
},
|
||||
}
|
||||
s.processExit(exit)
|
||||
require.LogsDoNotContain(t, hook, "\"Voluntary exit was processed\"")
|
||||
}
|
||||
52
beacon-chain/monitor/service.go
Normal file
52
beacon-chain/monitor/service.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
)
|
||||
|
||||
// ValidatorLatestPerformance keeps track of the latest participation of the validator
|
||||
type ValidatorLatestPerformance struct {
|
||||
attestedSlot types.Slot
|
||||
inclusionSlot types.Slot
|
||||
timelySource bool
|
||||
timelyTarget bool
|
||||
timelyHead bool
|
||||
balance uint64
|
||||
balanceChange uint64
|
||||
}
|
||||
|
||||
// ValidatorAggregatedPerformance keeps track of the accumulated performance of
|
||||
// the validator since launch
|
||||
type ValidatorAggregatedPerformance struct {
|
||||
totalAttestedCount uint64
|
||||
totalRequestedCount uint64
|
||||
totalDistance uint64
|
||||
totalCorrectSource uint64
|
||||
totalCorrectTarget uint64
|
||||
totalCorrectHead uint64
|
||||
totalAggregations uint64
|
||||
}
|
||||
|
||||
// ValidatorMonitorConfig contains the list of validator indices that the
|
||||
// monitor service tracks, as well as the event feed notifier that the
|
||||
// monitor needs to subscribe.
|
||||
type ValidatorMonitorConfig struct {
|
||||
StateGen stategen.StateManager
|
||||
TrackedValidators map[types.ValidatorIndex]interface{}
|
||||
}
|
||||
|
||||
// Service is the main structure that tracks validators and reports logs and
|
||||
// metrics of their performances throughout their lifetime.
|
||||
type Service struct {
|
||||
config *ValidatorMonitorConfig
|
||||
latestPerformance map[types.ValidatorIndex]ValidatorLatestPerformance
|
||||
aggregatedPerformance map[types.ValidatorIndex]ValidatorAggregatedPerformance
|
||||
}
|
||||
|
||||
// TrackedIndex returns if the given validator index corresponds to one of the
|
||||
// validators we follow
|
||||
func (s *Service) TrackedIndex(idx types.ValidatorIndex) bool {
|
||||
_, ok := s.config.TrackedValidators[idx]
|
||||
return ok
|
||||
}
|
||||
21
beacon-chain/monitor/service_test.go
Normal file
21
beacon-chain/monitor/service_test.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func TestTrackedIndex(t *testing.T) {
|
||||
s := &Service{
|
||||
config: &ValidatorMonitorConfig{
|
||||
TrackedValidators: map[types.ValidatorIndex]interface{}{
|
||||
1: nil,
|
||||
2: nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
require.Equal(t, s.TrackedIndex(types.ValidatorIndex(1)), true)
|
||||
require.Equal(t, s.TrackedIndex(types.ValidatorIndex(3)), false)
|
||||
}
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
"config.go",
|
||||
"log.go",
|
||||
"node.go",
|
||||
"options.go",
|
||||
"prometheus.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/node",
|
||||
@@ -18,14 +19,14 @@ go_library(
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/db/slasherkv:go_default_library",
|
||||
"//beacon-chain/deterministic-genesis:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/protoarray:go_default_library",
|
||||
"//beacon-chain/gateway:go_default_library",
|
||||
"//beacon-chain/interop-cold-start:go_default_library",
|
||||
"//beacon-chain/light:go_default_library",
|
||||
"//beacon-chain/node/registration:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
@@ -36,6 +37,7 @@ go_library(
|
||||
"//beacon-chain/rpc:go_default_library",
|
||||
"//beacon-chain/rpc/apimiddleware:go_default_library",
|
||||
"//beacon-chain/slasher:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//beacon-chain/sync/initial-sync:go_default_library",
|
||||
@@ -44,6 +46,7 @@ go_library(
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//monitoring/backup:go_default_library",
|
||||
"//monitoring/prometheus:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
@@ -75,6 +78,7 @@ go_test(
|
||||
"//config/params:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/cmd"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
@@ -95,3 +96,26 @@ func configureInteropConfig(cliCtx *cli.Context) {
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
}
|
||||
}
|
||||
|
||||
func configureExecutionSetting(cliCtx *cli.Context) {
|
||||
if cliCtx.IsSet(flags.TerminalTotalDifficultyOverride.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c.TerminalTotalDifficulty = cliCtx.Uint64(flags.TerminalTotalDifficultyOverride.Name)
|
||||
params.OverrideBeaconConfig(c)
|
||||
}
|
||||
if cliCtx.IsSet(flags.TerminalBlockHashOverride.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c.TerminalBlockHash = common.HexToHash(cliCtx.String(flags.TerminalBlockHashOverride.Name))
|
||||
params.OverrideBeaconConfig(c)
|
||||
}
|
||||
if cliCtx.IsSet(flags.TerminalBlockHashActivationEpochOverride.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c.TerminalBlockHashActivationEpoch = types.Epoch(cliCtx.Uint64(flags.TerminalBlockHashActivationEpochOverride.Name))
|
||||
params.OverrideBeaconConfig(c)
|
||||
}
|
||||
if cliCtx.IsSet(flags.Coinbase.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c.Coinbase = common.HexToAddress(cliCtx.String(flags.Coinbase.Name))
|
||||
params.OverrideBeaconConfig(c)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/cmd"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
@@ -70,6 +71,29 @@ func TestConfigureProofOfWork(t *testing.T) {
|
||||
assert.Equal(t, "deposit-contract", params.BeaconConfig().DepositContractAddress)
|
||||
}
|
||||
|
||||
func TestConfigureExecutionSetting(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.Uint64(flags.TerminalTotalDifficultyOverride.Name, 0, "")
|
||||
set.String(flags.TerminalBlockHashOverride.Name, "", "")
|
||||
set.Uint64(flags.TerminalBlockHashActivationEpochOverride.Name, 0, "")
|
||||
set.String(flags.Coinbase.Name, "", "")
|
||||
require.NoError(t, set.Set(flags.TerminalTotalDifficultyOverride.Name, strconv.Itoa(100)))
|
||||
require.NoError(t, set.Set(flags.TerminalBlockHashOverride.Name, "0xA"))
|
||||
require.NoError(t, set.Set(flags.TerminalBlockHashActivationEpochOverride.Name, strconv.Itoa(200)))
|
||||
require.NoError(t, set.Set(flags.Coinbase.Name, "0xB"))
|
||||
cliCtx := cli.NewContext(&app, set, nil)
|
||||
|
||||
configureExecutionSetting(cliCtx)
|
||||
|
||||
assert.Equal(t, uint64(100), params.BeaconConfig().TerminalTotalDifficulty)
|
||||
assert.Equal(t, common.HexToHash("0xA"), params.BeaconConfig().TerminalBlockHash)
|
||||
assert.Equal(t, types.Epoch(200), params.BeaconConfig().TerminalBlockHashActivationEpoch)
|
||||
assert.Equal(t, common.HexToAddress("0xB"), params.BeaconConfig().Coinbase)
|
||||
}
|
||||
|
||||
func TestConfigureNetwork(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
|
||||
@@ -21,14 +21,14 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/slasherkv"
|
||||
interopcoldstart "github.com/prysmaticlabs/prysm/beacon-chain/deterministic-genesis"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/gateway"
|
||||
interopcoldstart "github.com/prysmaticlabs/prysm/beacon-chain/interop-cold-start"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/light"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/node/registration"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
|
||||
@@ -39,6 +39,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/rpc"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/rpc/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/slasher"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
regularsync "github.com/prysmaticlabs/prysm/beacon-chain/sync"
|
||||
initialsync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync"
|
||||
@@ -47,6 +48,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/container/slice"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/monitoring/backup"
|
||||
"github.com/prysmaticlabs/prysm/monitoring/prometheus"
|
||||
"github.com/prysmaticlabs/prysm/runtime"
|
||||
@@ -62,6 +64,14 @@ const testSkipPowFlag = "test-skip-pow"
|
||||
// 128MB max message size when enabling debug endpoints.
|
||||
const debugGrpcMaxMsgSize = 1 << 27
|
||||
|
||||
// Used as a struct to keep cli flag options for configuring services
|
||||
// for the beacon node. We keep this as a separate struct to not pollute the actual BeaconNode
|
||||
// struct, as it is merely used to pass down configuration options into the appropriate services.
|
||||
type serviceFlagOpts struct {
|
||||
blockchainFlagOpts []blockchain.Option
|
||||
powchainFlagOpts []powchain.Option
|
||||
}
|
||||
|
||||
// BeaconNode defines a struct that handles the services running a random beacon chain
|
||||
// full PoS node. It handles the lifecycle of the entire system and registers
|
||||
// services to a service registry.
|
||||
@@ -87,11 +97,13 @@ type BeaconNode struct {
|
||||
collector *bcnodeCollector
|
||||
slasherBlockHeadersFeed *event.Feed
|
||||
slasherAttestationsFeed *event.Feed
|
||||
finalizedStateAtStartUp state.BeaconState
|
||||
serviceFlagOpts *serviceFlagOpts
|
||||
}
|
||||
|
||||
// New creates a new node instance, sets up configuration options, and registers
|
||||
// every required service to the node.
|
||||
func New(cliCtx *cli.Context) (*BeaconNode, error) {
|
||||
func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
if err := configureTracing(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -127,9 +139,16 @@ func New(cliCtx *cli.Context) (*BeaconNode, error) {
|
||||
syncCommitteePool: synccommittee.NewPool(),
|
||||
slasherBlockHeadersFeed: new(event.Feed),
|
||||
slasherAttestationsFeed: new(event.Feed),
|
||||
serviceFlagOpts: &serviceFlagOpts{},
|
||||
}
|
||||
|
||||
depositAddress, err := registration.DepositContractAddress()
|
||||
for _, opt := range opts {
|
||||
if err := opt(beacon); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
depositAddress, err := powchain.DepositContractAddress()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -141,7 +160,9 @@ func New(cliCtx *cli.Context) (*BeaconNode, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
beacon.startStateGen()
|
||||
if err := beacon.startStateGen(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := beacon.registerP2P(cliCtx); err != nil {
|
||||
return nil, err
|
||||
@@ -155,7 +176,7 @@ func New(cliCtx *cli.Context) (*BeaconNode, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := beacon.registerInteropServices(); err != nil {
|
||||
if err := beacon.registerDeterminsticGenesisService(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -173,6 +194,10 @@ func New(cliCtx *cli.Context) (*BeaconNode, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := beacon.registerLightClientServer(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := beacon.registerSlasherService(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -414,8 +439,34 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) startStateGen() {
|
||||
func (b *BeaconNode) startStateGen() error {
|
||||
b.stateGen = stategen.New(b.db)
|
||||
|
||||
cp, err := b.db.FinalizedCheckpoint(b.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r := bytesutil.ToBytes32(cp.Root)
|
||||
// Consider edge case where finalized root are zeros instead of genesis root hash.
|
||||
if r == params.BeaconConfig().ZeroHash {
|
||||
genesisBlock, err := b.db.GenesisBlock(b.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if genesisBlock != nil && !genesisBlock.IsNil() {
|
||||
r, err = genesisBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
b.finalizedStateAtStartUp, err = b.stateGen.StateByRoot(b.ctx, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
@@ -480,29 +531,24 @@ func (b *BeaconNode) registerBlockchainService() error {
|
||||
return err
|
||||
}
|
||||
|
||||
wsp := b.cliCtx.String(flags.WeakSubjectivityCheckpt.Name)
|
||||
wsCheckpt, err := helpers.ParseWeakSubjectivityInputString(wsp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
maxRoutines := b.cliCtx.Int(cmd.MaxGoroutines.Name)
|
||||
blockchainService, err := blockchain.NewService(b.ctx, &blockchain.Config{
|
||||
BeaconDB: b.db,
|
||||
DepositCache: b.depositCache,
|
||||
ChainStartFetcher: web3Service,
|
||||
AttPool: b.attestationPool,
|
||||
ExitPool: b.exitPool,
|
||||
SlashingPool: b.slashingsPool,
|
||||
P2p: b.fetchP2P(),
|
||||
MaxRoutines: maxRoutines,
|
||||
StateNotifier: b,
|
||||
ForkChoiceStore: b.forkChoiceStore,
|
||||
AttService: attService,
|
||||
StateGen: b.stateGen,
|
||||
SlasherAttestationsFeed: b.slasherAttestationsFeed,
|
||||
WeakSubjectivityCheckpt: wsCheckpt,
|
||||
})
|
||||
// skipcq: CRT-D0001
|
||||
opts := append(
|
||||
b.serviceFlagOpts.blockchainFlagOpts,
|
||||
blockchain.WithDatabase(b.db),
|
||||
blockchain.WithDepositCache(b.depositCache),
|
||||
blockchain.WithChainStartFetcher(web3Service),
|
||||
blockchain.WithAttestationPool(b.attestationPool),
|
||||
blockchain.WithExitPool(b.exitPool),
|
||||
blockchain.WithSlashingPool(b.slashingsPool),
|
||||
blockchain.WithP2PBroadcaster(b.fetchP2P()),
|
||||
blockchain.WithStateNotifier(b),
|
||||
blockchain.WithForkChoiceStore(b.forkChoiceStore),
|
||||
blockchain.WithAttestationService(attService),
|
||||
blockchain.WithStateGen(b.stateGen),
|
||||
blockchain.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
|
||||
blockchain.WithFinalizedStateAtStartUp(b.finalizedStateAtStartUp),
|
||||
)
|
||||
blockchainService, err := blockchain.NewService(b.ctx, opts...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not register blockchain service")
|
||||
}
|
||||
@@ -513,29 +559,27 @@ func (b *BeaconNode) registerPOWChainService() error {
|
||||
if b.cliCtx.Bool(testSkipPowFlag) {
|
||||
return b.services.RegisterService(&powchain.Service{})
|
||||
}
|
||||
|
||||
depAddress, endpoints, err := registration.PowchainPreregistration(b.cliCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bs, err := powchain.NewPowchainCollector(b.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg := &powchain.Web3ServiceConfig{
|
||||
HttpEndpoints: endpoints,
|
||||
DepositContract: common.HexToAddress(depAddress),
|
||||
BeaconDB: b.db,
|
||||
DepositCache: b.depositCache,
|
||||
StateNotifier: b,
|
||||
StateGen: b.stateGen,
|
||||
Eth1HeaderReqLimit: b.cliCtx.Uint64(flags.Eth1HeaderReqLimit.Name),
|
||||
BeaconNodeStatsUpdater: bs,
|
||||
depositContractAddr, err := powchain.DepositContractAddress()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
web3Service, err := powchain.NewService(b.ctx, cfg)
|
||||
// skipcq: CRT-D0001
|
||||
opts := append(
|
||||
b.serviceFlagOpts.powchainFlagOpts,
|
||||
powchain.WithDepositContractAddress(common.HexToAddress(depositContractAddr)),
|
||||
powchain.WithDatabase(b.db),
|
||||
powchain.WithDepositCache(b.depositCache),
|
||||
powchain.WithStateNotifier(b),
|
||||
powchain.WithStateGen(b.stateGen),
|
||||
powchain.WithBeaconNodeStatsUpdater(bs),
|
||||
powchain.WithFinalizedStateAtStartup(b.finalizedStateAtStartUp),
|
||||
)
|
||||
web3Service, err := powchain.NewService(b.ctx, opts...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not register proof-of-work chain web3Service")
|
||||
}
|
||||
@@ -559,24 +603,24 @@ func (b *BeaconNode) registerSyncService() error {
|
||||
return err
|
||||
}
|
||||
|
||||
rs := regularsync.NewService(b.ctx, ®ularsync.Config{
|
||||
DB: b.db,
|
||||
P2P: b.fetchP2P(),
|
||||
Chain: chainService,
|
||||
InitialSync: initSync,
|
||||
StateNotifier: b,
|
||||
BlockNotifier: b,
|
||||
AttestationNotifier: b,
|
||||
OperationNotifier: b,
|
||||
AttPool: b.attestationPool,
|
||||
ExitPool: b.exitPool,
|
||||
SlashingPool: b.slashingsPool,
|
||||
SyncCommsPool: b.syncCommitteePool,
|
||||
StateGen: b.stateGen,
|
||||
SlasherAttestationsFeed: b.slasherAttestationsFeed,
|
||||
SlasherBlockHeadersFeed: b.slasherBlockHeadersFeed,
|
||||
})
|
||||
|
||||
rs := regularsync.NewService(
|
||||
b.ctx,
|
||||
regularsync.WithDatabase(b.db),
|
||||
regularsync.WithP2P(b.fetchP2P()),
|
||||
regularsync.WithChainService(chainService),
|
||||
regularsync.WithInitialSync(initSync),
|
||||
regularsync.WithStateNotifier(b),
|
||||
regularsync.WithBlockNotifier(b),
|
||||
regularsync.WithAttestationNotifier(b),
|
||||
regularsync.WithOperationNotifier(b),
|
||||
regularsync.WithAttestationPool(b.attestationPool),
|
||||
regularsync.WithExitPool(b.exitPool),
|
||||
regularsync.WithSlashingPool(b.slashingsPool),
|
||||
regularsync.WithSyncCommsPool(b.syncCommitteePool),
|
||||
regularsync.WithStateGen(b.stateGen),
|
||||
regularsync.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
|
||||
regularsync.WithSlasherBlockHeadersFeed(b.slasherBlockHeadersFeed),
|
||||
)
|
||||
return b.services.RegisterService(rs)
|
||||
}
|
||||
|
||||
@@ -642,6 +686,11 @@ func (b *BeaconNode) registerRPCService() error {
|
||||
return err
|
||||
}
|
||||
|
||||
var lightService *light.Service
|
||||
if err := b.services.FetchService(&lightService); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var slasherService *slasher.Service
|
||||
if features.Get().EnableSlasher {
|
||||
if err := b.services.FetchService(&slasherService); err != nil {
|
||||
@@ -718,6 +767,7 @@ func (b *BeaconNode) registerRPCService() error {
|
||||
StateGen: b.stateGen,
|
||||
EnableDebugRPCEndpoints: enableDebugRPCEndpoints,
|
||||
MaxMsgSize: maxMsgSize,
|
||||
LightUpdatesFetcher: lightService,
|
||||
})
|
||||
|
||||
return b.services.RegisterService(rpcService)
|
||||
@@ -801,7 +851,7 @@ func (b *BeaconNode) registerGRPCGateway() error {
|
||||
return b.services.RegisterService(g)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerInteropServices() error {
|
||||
func (b *BeaconNode) registerDeterminsticGenesisService() error {
|
||||
genesisTime := b.cliCtx.Uint64(flags.InteropGenesisTimeFlag.Name)
|
||||
genesisValidators := b.cliCtx.Uint64(flags.InteropNumValidatorsFlag.Name)
|
||||
genesisStatePath := b.cliCtx.String(flags.InteropGenesisStateFlag.Name)
|
||||
@@ -815,7 +865,37 @@ func (b *BeaconNode) registerInteropServices() error {
|
||||
GenesisPath: genesisStatePath,
|
||||
})
|
||||
|
||||
// Register genesis state as start-up state when interop mode.
|
||||
// The start-up state gets reused across services.
|
||||
st, err := b.db.GenesisState(b.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.finalizedStateAtStartUp = st
|
||||
|
||||
return b.services.RegisterService(svc)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerLightClientServer() error {
|
||||
var chainService *blockchain.Service
|
||||
if err := b.services.FetchService(&chainService); err != nil {
|
||||
return err
|
||||
}
|
||||
var syncService *initialsync.Service
|
||||
if err := b.services.FetchService(&syncService); err != nil {
|
||||
return err
|
||||
}
|
||||
svc := light.New(b.ctx, &light.Config{
|
||||
Database: b.db,
|
||||
StateGen: b.stateGen,
|
||||
HeadFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
StateNotifier: b,
|
||||
TimeFetcher: chainService,
|
||||
SyncChecker: syncService,
|
||||
})
|
||||
|
||||
return b.services.RegisterService(svc)
|
||||
}
|
||||
|
||||
25
beacon-chain/node/options.go
Normal file
25
beacon-chain/node/options.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
)
|
||||
|
||||
// Option for beacon node configuration.
|
||||
type Option func(bn *BeaconNode) error
|
||||
|
||||
// WithBlockchainFlagOptions includes functional options for the blockchain service related to CLI flags.
|
||||
func WithBlockchainFlagOptions(opts []blockchain.Option) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
bn.serviceFlagOpts.blockchainFlagOpts = opts
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPowchainFlagOptions includes functional options for the powchain service related to CLI flags.
|
||||
func WithPowchainFlagOptions(opts []powchain.Option) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
bn.serviceFlagOpts.powchainFlagOpts = opts
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -5,15 +5,12 @@ go_library(
|
||||
srcs = [
|
||||
"log.go",
|
||||
"p2p.go",
|
||||
"powchain.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/node/registration",
|
||||
visibility = ["//beacon-chain/node:__subpackages__"],
|
||||
deps = [
|
||||
"//cmd:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@in_gopkg_yaml_v2//:go_default_library",
|
||||
@@ -22,18 +19,13 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"p2p_test.go",
|
||||
"powchain_test.go",
|
||||
],
|
||||
srcs = ["p2p_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//cmd:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
package registration
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
// PowchainPreregistration prepares data for powchain.Service's registration.
|
||||
func PowchainPreregistration(cliCtx *cli.Context) (depositContractAddress string, endpoints []string, err error) {
|
||||
depositContractAddress, err = DepositContractAddress()
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
if cliCtx.String(flags.HTTPWeb3ProviderFlag.Name) == "" && len(cliCtx.StringSlice(flags.FallbackWeb3ProviderFlag.Name)) == 0 {
|
||||
log.Error(
|
||||
"No ETH1 node specified to run with the beacon node. Please consider running your own Ethereum proof-of-work node for better uptime, security, and decentralization of Ethereum. Visit https://docs.prylabs.network/docs/prysm-usage/setup-eth1 for more information.",
|
||||
)
|
||||
log.Error(
|
||||
"You will need to specify --http-web3provider and/or --fallback-web3provider to attach an eth1 node to the prysm node. Without an eth1 node block proposals for your validator will be affected and the beacon node will not be able to initialize the genesis state.",
|
||||
)
|
||||
}
|
||||
|
||||
endpoints = []string{cliCtx.String(flags.HTTPWeb3ProviderFlag.Name)}
|
||||
endpoints = append(endpoints, cliCtx.StringSlice(flags.FallbackWeb3ProviderFlag.Name)...)
|
||||
return
|
||||
}
|
||||
|
||||
// DepositContractAddress returns the address of the deposit contract.
|
||||
func DepositContractAddress() (string, error) {
|
||||
address := params.BeaconConfig().DepositContractAddress
|
||||
if address == "" {
|
||||
return "", errors.New("valid deposit contract is required")
|
||||
}
|
||||
|
||||
if !common.IsHexAddress(address) {
|
||||
return "", errors.New("invalid deposit contract address given: " + address)
|
||||
}
|
||||
|
||||
return address, nil
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
package registration
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func TestPowchainPreregistration(t *testing.T) {
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.String(flags.HTTPWeb3ProviderFlag.Name, "primary", "")
|
||||
fallback := cli.StringSlice{}
|
||||
err := fallback.Set("fallback1")
|
||||
require.NoError(t, err)
|
||||
err = fallback.Set("fallback2")
|
||||
require.NoError(t, err)
|
||||
set.Var(&fallback, flags.FallbackWeb3ProviderFlag.Name, "")
|
||||
ctx := cli.NewContext(&app, set, nil)
|
||||
|
||||
address, endpoints, err := PowchainPreregistration(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, params.BeaconConfig().DepositContractAddress, address)
|
||||
assert.DeepEqual(t, []string{"primary", "fallback1", "fallback2"}, endpoints)
|
||||
}
|
||||
|
||||
func TestPowchainPreregistration_EmptyWeb3Provider(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.String(flags.HTTPWeb3ProviderFlag.Name, "", "")
|
||||
fallback := cli.StringSlice{}
|
||||
set.Var(&fallback, flags.FallbackWeb3ProviderFlag.Name, "")
|
||||
ctx := cli.NewContext(&app, set, nil)
|
||||
|
||||
_, _, err := PowchainPreregistration(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.LogsContain(t, hook, "No ETH1 node specified to run with the beacon node")
|
||||
}
|
||||
|
||||
func TestDepositContractAddress_Ok(t *testing.T) {
|
||||
address, err := DepositContractAddress()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, params.BeaconConfig().DepositContractAddress, address)
|
||||
}
|
||||
|
||||
func TestDepositContractAddress_EmptyAddress(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.DepositContractAddress = ""
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
_, err := DepositContractAddress()
|
||||
assert.ErrorContains(t, "valid deposit contract is required", err)
|
||||
}
|
||||
|
||||
func TestDepositContractAddress_NotHexAddress(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.DepositContractAddress = "abc?!"
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
_, err := DepositContractAddress()
|
||||
assert.ErrorContains(t, "invalid deposit contract address given", err)
|
||||
}
|
||||
@@ -74,7 +74,7 @@ func (s *Store) SyncCommitteeContributions(slot types.Slot) ([]*ethpb.SyncCommit
|
||||
|
||||
item := s.contributionCache.RetrieveByKey(syncCommitteeKey(slot))
|
||||
if item == nil {
|
||||
return nil, nil
|
||||
return []*ethpb.SyncCommitteeContribution{}, nil
|
||||
}
|
||||
|
||||
contributions, ok := item.Value.([]*ethpb.SyncCommitteeContribution)
|
||||
|
||||
@@ -36,11 +36,11 @@ func TestSyncCommitteeContributionCache_RoundTrip(t *testing.T) {
|
||||
|
||||
conts, err := store.SyncCommitteeContributions(1)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, []*ethpb.SyncCommitteeContribution(nil), conts)
|
||||
require.DeepSSZEqual(t, []*ethpb.SyncCommitteeContribution{}, conts)
|
||||
|
||||
conts, err = store.SyncCommitteeContributions(2)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, []*ethpb.SyncCommitteeContribution(nil), conts)
|
||||
require.DeepSSZEqual(t, []*ethpb.SyncCommitteeContribution{}, conts)
|
||||
|
||||
conts, err = store.SyncCommitteeContributions(3)
|
||||
require.NoError(t, err)
|
||||
@@ -73,10 +73,10 @@ func TestSyncCommitteeContributionCache_RoundTrip(t *testing.T) {
|
||||
// All the contributions should persist after get.
|
||||
conts, err = store.SyncCommitteeContributions(1)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, []*ethpb.SyncCommitteeContribution(nil), conts)
|
||||
require.DeepSSZEqual(t, []*ethpb.SyncCommitteeContribution{}, conts)
|
||||
conts, err = store.SyncCommitteeContributions(2)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, []*ethpb.SyncCommitteeContribution(nil), conts)
|
||||
require.DeepSSZEqual(t, []*ethpb.SyncCommitteeContribution{}, conts)
|
||||
|
||||
conts, err = store.SyncCommitteeContributions(3)
|
||||
require.NoError(t, err)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user