mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
368 Commits
db-scale
...
inc-defaul
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d6a3647e1e | ||
|
|
59b9519284 | ||
|
|
d455c6adb8 | ||
|
|
d4038fb752 | ||
|
|
56ab5872bb | ||
|
|
eb6d68a4b1 | ||
|
|
7920528ede | ||
|
|
1af3c07ec5 | ||
|
|
7279349ae2 | ||
|
|
2d60d04b57 | ||
|
|
ef8bc97d3e | ||
|
|
071f6de559 | ||
|
|
b697463da9 | ||
|
|
bfbf693660 | ||
|
|
ed07f4bd77 | ||
|
|
1922416cac | ||
|
|
a5dc40393d | ||
|
|
b238235210 | ||
|
|
73a6a85069 | ||
|
|
ac4a30588f | ||
|
|
3579551f15 | ||
|
|
956e7a7563 | ||
|
|
acd7e62cfb | ||
|
|
165f0667f0 | ||
|
|
49826ebe28 | ||
|
|
3ad6abd9c0 | ||
|
|
c61f54176d | ||
|
|
0165351db3 | ||
|
|
8f49167117 | ||
|
|
b4c27f64de | ||
|
|
e4ac5e74b7 | ||
|
|
0b9b635646 | ||
|
|
c8c1d04c07 | ||
|
|
601987faf2 | ||
|
|
a060d765b3 | ||
|
|
5550334956 | ||
|
|
002253bba3 | ||
|
|
c055642f79 | ||
|
|
c7d64c03ac | ||
|
|
131fb43ba7 | ||
|
|
cf0bd633f0 | ||
|
|
df8da80db8 | ||
|
|
dc527a3c80 | ||
|
|
94f80bd208 | ||
|
|
c6bfefa0bc | ||
|
|
3446eaa5f2 | ||
|
|
193866c731 | ||
|
|
b90ce1b60f | ||
|
|
8449d24ed0 | ||
|
|
b92b01f379 | ||
|
|
bcd180ee4d | ||
|
|
5f10e51a49 | ||
|
|
c4f03868ce | ||
|
|
d55c0d74dd | ||
|
|
807b71244b | ||
|
|
2744eba391 | ||
|
|
5507558678 | ||
|
|
8cecd4e8bf | ||
|
|
69a9388515 | ||
|
|
bd308b6d73 | ||
|
|
9abea200a5 | ||
|
|
730678bf21 | ||
|
|
0b1a777d62 | ||
|
|
5461c5b84f | ||
|
|
03e7acbf9f | ||
|
|
6cc88e6454 | ||
|
|
65cea17268 | ||
|
|
aa86c94a91 | ||
|
|
75bb25d515 | ||
|
|
1e845bc276 | ||
|
|
58f7e942f2 | ||
|
|
3f1e3cf82f | ||
|
|
60003c481b | ||
|
|
c1197d7881 | ||
|
|
693cc79cc9 | ||
|
|
23778959eb | ||
|
|
92278e2255 | ||
|
|
c58ce41b3b | ||
|
|
d3b09d1e9d | ||
|
|
27082e2cd2 | ||
|
|
12080727ea | ||
|
|
a731b8c0bc | ||
|
|
f6eed74500 | ||
|
|
329a4a600c | ||
|
|
7ce712bb5e | ||
|
|
17a43c1158 | ||
|
|
77b8b13eff | ||
|
|
b59484b285 | ||
|
|
1964fb8146 | ||
|
|
63825290cb | ||
|
|
1619d880d4 | ||
|
|
af2b858aa2 | ||
|
|
57a323f083 | ||
|
|
738f00129b | ||
|
|
3a03623094 | ||
|
|
cde58f6924 | ||
|
|
78fe712e53 | ||
|
|
40e5a5d796 | ||
|
|
87507cbfe2 | ||
|
|
b516cfd998 | ||
|
|
f98d1ce64b | ||
|
|
a103dd91c0 | ||
|
|
74fe2cc8d0 | ||
|
|
a4bbaac262 | ||
|
|
1af11885ee | ||
|
|
1437cb8982 | ||
|
|
0b559afe30 | ||
|
|
54915850a2 | ||
|
|
69618d157a | ||
|
|
fa750650ed | ||
|
|
7303985232 | ||
|
|
958dd9d783 | ||
|
|
a3f8ccd924 | ||
|
|
d64f6cb7a8 | ||
|
|
a9a75e0004 | ||
|
|
339540274b | ||
|
|
12ba8f3645 | ||
|
|
f3a7f399c0 | ||
|
|
6163e091a7 | ||
|
|
2fb4ddcbe7 | ||
|
|
1c2e463a30 | ||
|
|
01e9125761 | ||
|
|
02a088d93c | ||
|
|
8cadb2ac6f | ||
|
|
be722604f7 | ||
|
|
3bb2acfc7d | ||
|
|
7719356b69 | ||
|
|
75b9bdba7c | ||
|
|
525c818672 | ||
|
|
a55fdf8949 | ||
|
|
7f41b69281 | ||
|
|
c5189a6862 | ||
|
|
b4b976c28b | ||
|
|
ced24892a5 | ||
|
|
49f989e342 | ||
|
|
3003f08770 | ||
|
|
0232b5f8f5 | ||
|
|
90a15b2fbe | ||
|
|
0f58c9a925 | ||
|
|
6311cfd8ab | ||
|
|
6dcf47675b | ||
|
|
72aa782849 | ||
|
|
cc637bad4a | ||
|
|
f5719f8c8e | ||
|
|
f550a964f5 | ||
|
|
c5c039fd6b | ||
|
|
4d9947543f | ||
|
|
7370c42bae | ||
|
|
3c76cc3af5 | ||
|
|
28af5bc601 | ||
|
|
c9f299b50a | ||
|
|
9dfb385160 | ||
|
|
b1774efeb7 | ||
|
|
4b3a723166 | ||
|
|
50f253619e | ||
|
|
753afb4fb2 | ||
|
|
1835f54197 | ||
|
|
26a2311c82 | ||
|
|
9f419bee7d | ||
|
|
d04eaf8fa0 | ||
|
|
82ceb51548 | ||
|
|
6a2ef13b87 | ||
|
|
72a2dd004b | ||
|
|
1a0e16a48b | ||
|
|
bff5c1e1a9 | ||
|
|
3f5ce0cdca | ||
|
|
d51b52d432 | ||
|
|
7b5a821c81 | ||
|
|
f89265f2f7 | ||
|
|
c731b715ed | ||
|
|
2a68c69d8f | ||
|
|
a82fd7bf68 | ||
|
|
9c540627ab | ||
|
|
11f50453fc | ||
|
|
5f8b01ccda | ||
|
|
d800107927 | ||
|
|
d3d5cfca0b | ||
|
|
18ef760ee5 | ||
|
|
14e3e80df3 | ||
|
|
3bea0e7896 | ||
|
|
8455656597 | ||
|
|
af8bafd895 | ||
|
|
c538a6c068 | ||
|
|
d5eb8392b6 | ||
|
|
4906a0e6de | ||
|
|
168e06e607 | ||
|
|
cf18d5dd42 | ||
|
|
aeb6940935 | ||
|
|
4020a603b6 | ||
|
|
3a5cfab5f2 | ||
|
|
8e5ba13352 | ||
|
|
e7e2f7850d | ||
|
|
c8748260fd | ||
|
|
a70febbf56 | ||
|
|
55ce1ccc33 | ||
|
|
28e0dc5d09 | ||
|
|
2b0f74904e | ||
|
|
5cf976e492 | ||
|
|
80aa4f483c | ||
|
|
fb92d518f9 | ||
|
|
1774188a17 | ||
|
|
ea31f096b5 | ||
|
|
ea1698c1b3 | ||
|
|
0802c8fe66 | ||
|
|
4322185d80 | ||
|
|
cab0a4c9df | ||
|
|
8b2b109939 | ||
|
|
59be30e9f4 | ||
|
|
b27d535ade | ||
|
|
e83c9d5862 | ||
|
|
eef2122a9e | ||
|
|
6406afc6cf | ||
|
|
8a01d412f5 | ||
|
|
aed6e13498 | ||
|
|
26ec352bdc | ||
|
|
ce712a1a3e | ||
|
|
98470fdb13 | ||
|
|
ef906cf704 | ||
|
|
65ec437ce5 | ||
|
|
58733a9f40 | ||
|
|
762ea6dce1 | ||
|
|
eef1730f9e | ||
|
|
de7f7d783e | ||
|
|
5eafb80091 | ||
|
|
cb0c382bf3 | ||
|
|
15485f50c3 | ||
|
|
9ee00f09a1 | ||
|
|
72817a6d0e | ||
|
|
943dec525c | ||
|
|
d9799e6b6c | ||
|
|
f0fd29d367 | ||
|
|
22982e5221 | ||
|
|
5a505509c3 | ||
|
|
588658a649 | ||
|
|
ae31eed013 | ||
|
|
e26cde5e09 | ||
|
|
5cc201288d | ||
|
|
b4fa626aff | ||
|
|
498e5efed1 | ||
|
|
f563d00e79 | ||
|
|
ff50448ca2 | ||
|
|
26120e26cb | ||
|
|
8d610319bc | ||
|
|
288b38be8e | ||
|
|
af06bb9737 | ||
|
|
52f1190b17 | ||
|
|
7df01feb58 | ||
|
|
49c73e65a1 | ||
|
|
06c3306ac2 | ||
|
|
c3228cd5a7 | ||
|
|
8da8855ad5 | ||
|
|
4ca04dcc4b | ||
|
|
68ef8ae99a | ||
|
|
64c02c405b | ||
|
|
33d1ae0792 | ||
|
|
182bd615ac | ||
|
|
3c54eb1cf6 | ||
|
|
9166f40a04 | ||
|
|
98990ba280 | ||
|
|
9ace518539 | ||
|
|
0ea4b02b8b | ||
|
|
d73794ce72 | ||
|
|
2ad0ad5877 | ||
|
|
963acefe12 | ||
|
|
0ee85bcbab | ||
|
|
5fb0bcfacd | ||
|
|
f52a214cce | ||
|
|
88b94eae18 | ||
|
|
3d02addfe4 | ||
|
|
53f7030871 | ||
|
|
84335b0084 | ||
|
|
b1c2454658 | ||
|
|
29a8b6c08f | ||
|
|
3bce9df382 | ||
|
|
8ee3019954 | ||
|
|
ef1227b14c | ||
|
|
80b7f691ae | ||
|
|
c69bce5d84 | ||
|
|
ad06230291 | ||
|
|
a1a301ad5d | ||
|
|
d28ae62d02 | ||
|
|
d6a02833dc | ||
|
|
fade8aa7a9 | ||
|
|
8604832499 | ||
|
|
ceabee0198 | ||
|
|
ebedf481f8 | ||
|
|
9f27af43cb | ||
|
|
09690590f0 | ||
|
|
e3c8491177 | ||
|
|
bfc90e9abb | ||
|
|
d173a6e695 | ||
|
|
7f857ae23a | ||
|
|
23e39d3d64 | ||
|
|
ec65c442c4 | ||
|
|
b38b0186b8 | ||
|
|
4bf935928b | ||
|
|
acc4ee8d9d | ||
|
|
7b7ed87ad8 | ||
|
|
5ab88da183 | ||
|
|
d6338f6042 | ||
|
|
c84c6ab547 | ||
|
|
52d8a1646f | ||
|
|
3c61cc7d8a | ||
|
|
37ca409cc1 | ||
|
|
b381ad49b5 | ||
|
|
00c3a7dcaf | ||
|
|
d66edc9670 | ||
|
|
4b249607da | ||
|
|
02483ba89c | ||
|
|
9629c354f1 | ||
|
|
886332c8fa | ||
|
|
424c8f6b46 | ||
|
|
cee3b626f3 | ||
|
|
5569a68452 | ||
|
|
1eff00fb33 | ||
|
|
2bcda2e021 | ||
|
|
98fea2e94d | ||
|
|
e53be1acbe | ||
|
|
c9f5f8dad5 | ||
|
|
03bf463d0f | ||
|
|
afb49aaeb9 | ||
|
|
1fd65ad7fd | ||
|
|
b1df3b55b0 | ||
|
|
3767574c77 | ||
|
|
d3c97da4e1 | ||
|
|
1d216a8737 | ||
|
|
790bf03123 | ||
|
|
ab60b1c7b2 | ||
|
|
236a5c4167 | ||
|
|
5e2229ce9d | ||
|
|
6ffba5c769 | ||
|
|
3e61763bd7 | ||
|
|
23bdce2354 | ||
|
|
d94bf32dcf | ||
|
|
7cbef104b0 | ||
|
|
cd6d0d9cf1 | ||
|
|
afbe02697d | ||
|
|
2c921ec628 | ||
|
|
0e72938914 | ||
|
|
71d55d1cff | ||
|
|
d8aa0f8827 | ||
|
|
37bc407b56 | ||
|
|
5983d0a397 | ||
|
|
85faecf2ca | ||
|
|
f42227aa04 | ||
|
|
c9d5b4ba0e | ||
|
|
fed004686b | ||
|
|
4ae7513835 | ||
|
|
1d53fd2fd3 | ||
|
|
a2c1185032 | ||
|
|
448d62d6e3 | ||
|
|
4858de7875 | ||
|
|
cd1e3f2b3e | ||
|
|
6f20d17d15 | ||
|
|
94fd99f5cd | ||
|
|
d4a420ddfd | ||
|
|
838b19e985 | ||
|
|
788338a004 | ||
|
|
39c33b82ad | ||
|
|
905e0f4c1c | ||
|
|
0ade1f121d | ||
|
|
ee52f8dff3 | ||
|
|
50159c2e48 | ||
|
|
cae58bbbd8 | ||
|
|
9b37418761 | ||
|
|
a78cdf86cc | ||
|
|
1c4ea75a18 | ||
|
|
6f4c80531c |
18
.bazelrc
18
.bazelrc
@@ -62,20 +62,7 @@ build:llvm-asan --config=llvm
|
||||
build:llvm-asan --config=asan
|
||||
build:llvm-asan --linkopt -fuse-ld=ld.lld
|
||||
|
||||
build:fuzz --define=gotags=libfuzzer
|
||||
build:fuzz --config=llvm-asan
|
||||
build:fuzz --copt=-fsanitize=fuzzer-no-link
|
||||
build:fuzz --linkopt=-fsanitize=fuzzer
|
||||
build:fuzz --copt=-fno-omit-frame-pointer
|
||||
build:fuzz --define=FUZZING_ENGINE=libfuzzer
|
||||
build:fuzz --copt=-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
|
||||
build:fuzz --linkopt -Wl,--no-as-needed
|
||||
build:fuzz --define=gc_goopts=-d=libfuzzer,checkptr
|
||||
build:fuzz --run_under=//tools:fuzz_wrapper
|
||||
build:fuzz --compilation_mode=opt
|
||||
build:fuzz --define=blst_disabled=true
|
||||
|
||||
test:fuzz --local_test_jobs="HOST_CPUS*.5"
|
||||
build:fuzz --@io_bazel_rules_go//go/config:tags=fuzz
|
||||
|
||||
# Build binary with cgo symbolizer for debugging / profiling.
|
||||
build:cgo_symbolizer --config=llvm
|
||||
@@ -230,3 +217,6 @@ build:remote --remote_local_fallback
|
||||
|
||||
# Ignore GoStdLib with remote caching
|
||||
build --modify_execution_info='GoStdlib.*=+no-remote-cache'
|
||||
|
||||
# Set bazel gotag
|
||||
build --define gotags=bazel
|
||||
|
||||
@@ -1 +1 @@
|
||||
4.2.1
|
||||
5.0.0
|
||||
|
||||
@@ -10,40 +10,37 @@
|
||||
|
||||
# Prysm specific remote-cache properties.
|
||||
#build:remote-cache --disk_cache=
|
||||
build:remote-cache --remote_download_toplevel
|
||||
build:remote-cache --remote_download_minimal
|
||||
build:remote-cache --remote_cache=grpc://bazel-remote-cache:9092
|
||||
build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
|
||||
build:remote-cache --remote_local_fallback
|
||||
build:remote-cache --experimental_remote_cache_async
|
||||
build:remote-cache --experimental_remote_merkle_tree_cache
|
||||
build:remote-cache --experimental_action_cache_store_output_metadata
|
||||
build:remote-cache --experimental_remote_cache_compression
|
||||
# Enforce stricter environment rules, which eliminates some non-hermetic
|
||||
# behavior and therefore improves both the remote cache hit rate and the
|
||||
# correctness and repeatability of the build.
|
||||
build:remote-cache --incompatible_strict_action_env=true
|
||||
|
||||
build --experimental_use_hermetic_linux_sandbox
|
||||
|
||||
# Import workspace options.
|
||||
import %workspace%/.bazelrc
|
||||
|
||||
startup --host_jvm_args=-Xmx2g --host_jvm_args=-Xms2g
|
||||
query --repository_cache=/tmp/repositorycache
|
||||
query --experimental_repository_cache_hardlinks
|
||||
build --repository_cache=/tmp/repositorycache
|
||||
build --experimental_repository_cache_hardlinks
|
||||
startup --host_jvm_args=-Xmx4g --host_jvm_args=-Xms2g
|
||||
build --experimental_strict_action_env
|
||||
build --disk_cache=/tmp/bazelbuilds
|
||||
build --experimental_multi_threaded_digest
|
||||
build --sandbox_tmpfs_path=/tmp
|
||||
build --verbose_failures
|
||||
build --announce_rc
|
||||
build --show_progress_rate_limit=5
|
||||
build --curses=yes --color=no
|
||||
build --curses=no --color=no
|
||||
build --keep_going
|
||||
build --test_output=errors
|
||||
build --flaky_test_attempts=5
|
||||
# Disabled race detection due to unstable test results under constrained environment build kite
|
||||
# build --features=race
|
||||
|
||||
# Disable flaky test detection for fuzzing.
|
||||
test:fuzz --flaky_test_attempts=1
|
||||
|
||||
# Better caching
|
||||
build:nostamp --nostamp
|
||||
build:nostamp --workspace_status_command=./hack/workspace_status_ci.sh
|
||||
|
||||
8
.github/CODEOWNERS
vendored
8
.github/CODEOWNERS
vendored
@@ -6,3 +6,11 @@
|
||||
|
||||
# Anyone on prylabs team can approve dependency updates.
|
||||
deps.bzl @prysmaticlabs/core-team
|
||||
|
||||
# Radek and Nishant are responsible for changes that can affect the native state feature.
|
||||
# See https://www.notion.so/prysmaticlabs/Native-Beacon-State-Redesign-6cc9744b4ec1439bb34fa829b36a35c1
|
||||
/beacon-chain/state/fieldtrie/ @rkapka @nisdas
|
||||
/beacon-chain/state/v1/ @rkapka @nisdas
|
||||
/beacon-chain/state/v2/ @rkapka @nisdas
|
||||
/beacon-chain/state/v3/ @rkapka @nisdas
|
||||
/beacon-chain/state/state-native/ @rkapka @nisdas
|
||||
|
||||
2
.github/actions/gomodtidy/entrypoint.sh
vendored
2
.github/actions/gomodtidy/entrypoint.sh
vendored
@@ -7,7 +7,7 @@ cd $GITHUB_WORKSPACE
|
||||
cp go.mod go.mod.orig
|
||||
cp go.sum go.sum.orig
|
||||
|
||||
go mod tidy
|
||||
go mod tidy -compat=1.17
|
||||
|
||||
echo "Checking go.mod and go.sum:"
|
||||
checks=0
|
||||
|
||||
2
.github/workflows/go.yml
vendored
2
.github/workflows/go.yml
vendored
@@ -39,7 +39,7 @@ jobs:
|
||||
- name: Golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
args: --print-issued-lines --sort-results --no-config --timeout=10m --disable-all -E deadcode -E errcheck -E gosimple --skip-files=validator/web/site_data.go
|
||||
args: --print-issued-lines --sort-results --no-config --timeout=10m --disable-all -E deadcode -E errcheck -E gosimple --skip-files=validator/web/site_data.go --skip-dirs=proto
|
||||
|
||||
build:
|
||||
name: Build
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -30,9 +30,5 @@ password.txt
|
||||
# Dist files
|
||||
dist
|
||||
|
||||
# libfuzzer
|
||||
oom-*
|
||||
crash-*
|
||||
|
||||
# deepsource cli
|
||||
bin
|
||||
|
||||
95
BUILD.bazel
95
BUILD.bazel
@@ -3,7 +3,7 @@ load("@com_github_atlassian_bazel_tools//gometalinter:def.bzl", "gometalinter")
|
||||
load("@com_github_atlassian_bazel_tools//goimports:def.bzl", "goimports")
|
||||
load("@io_kubernetes_build//defs:run_in_workspace.bzl", "workspace_binary")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "nogo")
|
||||
load("@graknlabs_bazel_distribution//common:rules.bzl", "assemble_targz", "assemble_versioned")
|
||||
load("@vaticle_bazel_distribution//common:rules.bzl", "assemble_targz", "assemble_versioned")
|
||||
load("@bazel_skylib//rules:common_settings.bzl", "string_setting")
|
||||
|
||||
prefix = "github.com/prysmaticlabs/prysm"
|
||||
@@ -16,6 +16,8 @@ exports_files([
|
||||
# gazelle:map_kind go_library go_library @prysm//tools/go:def.bzl
|
||||
# gazelle:map_kind go_test go_test @prysm//tools/go:def.bzl
|
||||
# gazelle:map_kind go_repository go_repository @prysm//tools/go:def.bzl
|
||||
# gazelle:build_tags bazel
|
||||
# gazelle:exclude tools/analyzers/**/testdata/**
|
||||
gazelle(
|
||||
name = "gazelle",
|
||||
prefix = prefix,
|
||||
@@ -86,50 +88,52 @@ nogo(
|
||||
config = "nogo_config.json",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@org_golang_x_tools//go/analysis/passes/unsafeptr:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/unreachable:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/unmarshal:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/tests:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/structtag:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/stdmethods:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/shift:go_tool_library",
|
||||
# "@org_golang_x_tools//go/analysis/passes/shadow:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/printf:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/pkgfact:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/nilness:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/nilfunc:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/loopclosure:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/httpresponse:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/findcall:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/ctrlflow:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/copylock:go_tool_library",
|
||||
# "@org_golang_x_tools//go/analysis/passes/cgocall:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/buildtag:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/buildssa:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/bools:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/atomicalign:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/atomic:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/assign:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/inspect:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/asmdecl:go_tool_library",
|
||||
"//tools/analyzers/maligned:go_tool_library",
|
||||
"//tools/analyzers/cryptorand:go_tool_library",
|
||||
"//tools/analyzers/errcheck:go_tool_library",
|
||||
"//tools/analyzers/featureconfig:go_tool_library",
|
||||
"//tools/analyzers/comparesame:go_tool_library",
|
||||
"//tools/analyzers/shadowpredecl:go_tool_library",
|
||||
"//tools/analyzers/nop:go_tool_library",
|
||||
"//tools/analyzers/slicedirect:go_tool_library",
|
||||
"//tools/analyzers/interfacechecker:go_tool_library",
|
||||
"//tools/analyzers/ineffassign:go_tool_library",
|
||||
"//tools/analyzers/properpermissions:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/unsafeptr:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/unreachable:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/unmarshal:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/tests:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/structtag:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/stdmethods:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/shift:go_default_library",
|
||||
# "@org_golang_x_tools//go/analysis/passes/shadow:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/printf:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/pkgfact:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/nilness:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/nilfunc:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/loopclosure:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/httpresponse:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/findcall:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/ctrlflow:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/copylock:go_default_library",
|
||||
# "@org_golang_x_tools//go/analysis/passes/cgocall:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/buildtag:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/bools:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/atomicalign:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/atomic:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/assign:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/inspect:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/asmdecl:go_default_library",
|
||||
"//tools/analyzers/maligned:go_default_library",
|
||||
"//tools/analyzers/cryptorand:go_default_library",
|
||||
"//tools/analyzers/errcheck:go_default_library",
|
||||
"//tools/analyzers/featureconfig:go_default_library",
|
||||
"//tools/analyzers/comparesame:go_default_library",
|
||||
"//tools/analyzers/shadowpredecl:go_default_library",
|
||||
"//tools/analyzers/nop:go_default_library",
|
||||
"//tools/analyzers/slicedirect:go_default_library",
|
||||
"//tools/analyzers/interfacechecker:go_default_library",
|
||||
"//tools/analyzers/ineffassign:go_default_library",
|
||||
"//tools/analyzers/properpermissions:go_default_library",
|
||||
"//tools/analyzers/recursivelock:go_default_library",
|
||||
"//tools/analyzers/uintcast:go_default_library",
|
||||
] + select({
|
||||
# nogo checks that fail with coverage enabled.
|
||||
":coverage_enabled": [],
|
||||
"//conditions:default": [
|
||||
"@org_golang_x_tools//go/analysis/passes/lostcancel:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/composite:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/lostcancel:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/composite:go_default_library",
|
||||
],
|
||||
}),
|
||||
)
|
||||
@@ -144,15 +148,6 @@ common_files = {
|
||||
"//:README.md": "README.md",
|
||||
}
|
||||
|
||||
string_setting(
|
||||
name = "gotags",
|
||||
build_setting_default = "",
|
||||
values = [
|
||||
"",
|
||||
"libfuzzer",
|
||||
],
|
||||
)
|
||||
|
||||
sh_binary(
|
||||
name = "prysm_sh",
|
||||
srcs = ["prysm.sh"],
|
||||
|
||||
@@ -190,7 +190,7 @@ Anyone can become a part-time contributor and help out on implementing Ethereum
|
||||
|
||||
Core contributors are remote contractors of Prysmatic Labs, LLC. and are considered critical team members of our organization. Core devs have all of the responsibilities of part-time contributors plus the majority of the following:
|
||||
|
||||
- Stay up to date on the latest beacon chain sepcification
|
||||
- Stay up to date on the latest beacon chain specification
|
||||
- Monitor github issues and PR’s to make sure owner, labels, descriptions are correct
|
||||
- Formulate independent ideas, suggest new work to do, point out improvements to existing approaches
|
||||
- Participate in code review, ensure code quality is excellent, and have ensure high code coverage
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
[](https://buildkite.com/prysmatic-labs/prysm)
|
||||
[](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.0.0)
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.1.8)
|
||||
[](https://discord.gg/CTYGPUJ)
|
||||
|
||||
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/eth2/) specification, developed by [Prysmatic Labs](https://prysmaticlabs.com). See the [Changelog](https://github.com/prysmaticlabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
|
||||
|
||||
60
WORKSPACE
60
WORKSPACE
@@ -60,10 +60,10 @@ bazel_skylib_workspace()
|
||||
|
||||
http_archive(
|
||||
name = "bazel_gazelle",
|
||||
sha256 = "62ca106be173579c0a167deb23358fdfe71ffa1e4cfdddf5582af26520f1c66f",
|
||||
sha256 = "de69a09dc70417580aabf20a28619bb3ef60d038470c7cf8442fafcf627c21cb",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.24.0/bazel-gazelle-v0.24.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.24.0/bazel-gazelle-v0.24.0.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -85,16 +85,13 @@ http_archive(
|
||||
name = "io_bazel_rules_go",
|
||||
patch_args = ["-p1"],
|
||||
patches = [
|
||||
# Required until https://github.com/bazelbuild/rules_go/pull/2450 merges otherwise nilness
|
||||
# nogo check fails for certain third_party dependencies.
|
||||
"//third_party:io_bazel_rules_go.patch",
|
||||
# Expose internals of go_test for custom build transitions.
|
||||
"//third_party:io_bazel_rules_go_test.patch",
|
||||
],
|
||||
sha256 = "7c10271940c6bce577d51a075ae77728964db285dac0a46614a7934dc34303e6",
|
||||
sha256 = "2b1641428dff9018f9e85c0384f03ec6c10660d935b750e3fa1492a281a53b0f",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.26.0/rules_go-v0.26.0.tar.gz",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.26.0/rules_go-v0.26.0.tar.gz",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.29.0/rules_go-v0.29.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.29.0/rules_go-v0.29.0.zip",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -120,13 +117,6 @@ http_archive(
|
||||
urls = ["https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.76/fuzzit_Linux_x86_64.zip"],
|
||||
)
|
||||
|
||||
git_repository(
|
||||
name = "graknlabs_bazel_distribution",
|
||||
commit = "962f3a7e56942430c0ec120c24f9e9f2a9c2ce1a",
|
||||
remote = "https://github.com/graknlabs/bazel-distribution",
|
||||
shallow_since = "1569509514 +0300",
|
||||
)
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_docker//repositories:repositories.bzl",
|
||||
container_repositories = "repositories",
|
||||
@@ -186,7 +176,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.16.4",
|
||||
go_version = "1.17.6",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
@@ -222,10 +212,10 @@ filegroup(
|
||||
)
|
||||
""",
|
||||
sha256 = "91434d5fd5e1c6eb7b0174fed2afe25e09bddf00e1e4c431db931b2cee4e7773",
|
||||
url = "https://github.com/eth2-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
|
||||
url = "https://github.com/eth-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.1.5"
|
||||
consensus_spec_version = "v1.1.10"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -241,7 +231,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "a7d7173d953494c0dfde432c9fc064c25d46d666b024749b3474ae0cdfc50050",
|
||||
sha256 = "28043009cc2f6fc9804e73c8c1fc2cb27062f1591e6884f3015ae1dd7a276883",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -257,7 +247,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "f86872061588c0197516b23025d39e9365b4716c112218a618739dc0d6f4666a",
|
||||
sha256 = "bc1a283ca068f310f04d70c4f6a8eaa0b8f7e9318073a8bdc2ee233111b4e339",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -273,7 +263,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "7a06975360fd37fbb4694d0e06abb78d2a0835146c1d9b26d33569edff8b98f0",
|
||||
sha256 = "bbabb482c229ff9d4e2c7b77c992edb452f9d0af7c6d8dd4f922f06a7b101e81",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -288,7 +278,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "87d8089200163340484d61212fbdffbb5d9d03e1244622761dcb91e641a65761",
|
||||
sha256 = "408a5524548ad3fcf387f65ac7ec52781d9ee899499720bb12451b48a15818d4",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
@@ -319,9 +309,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "9dc47bf6b14aed7fac8833e35ab83a69131b43fa5789b3256bf1ac3d4861aeb8",
|
||||
strip_prefix = "eth2-networks-7fa1b868985ee24aad65567f9250cf7fa86f97b1",
|
||||
url = "https://github.com/eth2-clients/eth2-networks/archive/7fa1b868985ee24aad65567f9250cf7fa86f97b1.tar.gz",
|
||||
sha256 = "4e8a18b21d056c4032605621b1a6632198eabab57cb90c61e273f344c287f1b2",
|
||||
strip_prefix = "eth2-networks-791a5369c5981e829698b17fbcdcdacbdaba97c8",
|
||||
url = "https://github.com/eth-clients/eth2-networks/archive/791a5369c5981e829698b17fbcdcdacbdaba97c8.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -341,16 +331,6 @@ git_repository(
|
||||
# Group the sources of the library so that CMake rule have access to it
|
||||
all_content = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])"""
|
||||
|
||||
http_archive(
|
||||
name = "sigp_beacon_fuzz_corpora",
|
||||
build_file = "//third_party:beacon-fuzz/corpora.BUILD",
|
||||
sha256 = "42993d0901a316afda45b4ba6d53c7c21f30c551dcec290a4ca131c24453d1ef",
|
||||
strip_prefix = "beacon-fuzz-corpora-bac24ad78d45cc3664c0172241feac969c1ac29b",
|
||||
urls = [
|
||||
"https://github.com/sigp/beacon-fuzz-corpora/archive/bac24ad78d45cc3664c0172241feac969c1ac29b.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
# External dependencies
|
||||
|
||||
http_archive(
|
||||
@@ -362,9 +342,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "0a3d94428ea28916276694c517b82b364122063fdbf924f54ee9ae0bc500289f",
|
||||
sha256 = "4797a7e594a5b1f4c1c8080701613f3ee451b01ec0861499ea7d9b60877a6b23",
|
||||
urls = [
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.1/prysm-web-ui.tar.gz",
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.3/prysm-web-ui.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -377,6 +357,10 @@ load("@prysm//third_party/herumi:herumi.bzl", "bls_dependencies")
|
||||
|
||||
bls_dependencies()
|
||||
|
||||
load("@prysm//testing/endtoend:deps.bzl", "e2e_deps")
|
||||
|
||||
e2e_deps()
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_docker//go:image.bzl",
|
||||
_go_image_repos = "repositories",
|
||||
|
||||
54
api/client/beacon/BUILD.bazel
Normal file
54
api/client/beacon/BUILD.bazel
Normal file
@@ -0,0 +1,54 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"checkpoint.go",
|
||||
"client.go",
|
||||
"doc.go",
|
||||
"errors.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/api/client/beacon",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/apimiddleware:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz/detect:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/block:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_x_mod//semver:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"checkpoint_test.go",
|
||||
"client_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/ssz/detect:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/wrapper:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
],
|
||||
)
|
||||
262
api/client/beacon/checkpoint.go
Normal file
262
api/client/beacon/checkpoint.go
Normal file
@@ -0,0 +1,262 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz/detect"
|
||||
"github.com/prysmaticlabs/prysm/io/file"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.org/x/mod/semver"
|
||||
)
|
||||
|
||||
// OriginData represents the BeaconState and SignedBeaconBlock necessary to start an empty Beacon Node
|
||||
// using Checkpoint Sync.
|
||||
type OriginData struct {
|
||||
wsd *WeakSubjectivityData
|
||||
sb []byte
|
||||
bb []byte
|
||||
st state.BeaconState
|
||||
b block.SignedBeaconBlock
|
||||
cf *detect.VersionedUnmarshaler
|
||||
}
|
||||
|
||||
// CheckpointString returns the standard string representation of a Checkpoint for the block root and epoch for the
|
||||
// SignedBeaconBlock value found by DownloadOriginData.
|
||||
// The format is a a hex-encoded block root, followed by the epoch of the block, separated by a colon. For example:
|
||||
// "0x1c35540cac127315fabb6bf29181f2ae0de1a3fc909d2e76ba771e61312cc49a:74888"
|
||||
func (od *OriginData) CheckpointString() string {
|
||||
return fmt.Sprintf("%#x:%d", od.wsd.BlockRoot, od.wsd.Epoch)
|
||||
}
|
||||
|
||||
// SaveBlock saves the downloaded block to a unique file in the given path.
|
||||
// For readability and collision avoidance, the file name includes: type, config name, slot and root
|
||||
func (od *OriginData) SaveBlock(dir string) (string, error) {
|
||||
blockPath := path.Join(dir, fname("state", od.cf, od.st.Slot(), od.wsd.BlockRoot))
|
||||
return blockPath, file.WriteFile(blockPath, od.sb)
|
||||
}
|
||||
|
||||
// SaveState saves the downloaded state to a unique file in the given path.
|
||||
// For readability and collision avoidance, the file name includes: type, config name, slot and root
|
||||
func (od *OriginData) SaveState(dir string) (string, error) {
|
||||
statePath := path.Join(dir, fname("state", od.cf, od.st.Slot(), od.wsd.StateRoot))
|
||||
return statePath, file.WriteFile(statePath, od.sb)
|
||||
}
|
||||
|
||||
// StateBytes returns the ssz-encoded bytes of the downloaded BeaconState value.
|
||||
func (od *OriginData) StateBytes() []byte {
|
||||
return od.sb
|
||||
}
|
||||
|
||||
// BlockBytes returns the ssz-encoded bytes of the downloaded SignedBeaconBlock value.
|
||||
func (od *OriginData) BlockBytes() []byte {
|
||||
return od.bb
|
||||
}
|
||||
|
||||
func fname(prefix string, cf *detect.VersionedUnmarshaler, slot types.Slot, root [32]byte) string {
|
||||
return fmt.Sprintf("%s_%s_%s_%d-%#x.ssz", prefix, cf.Config.ConfigName, version.String(cf.Fork), slot, root)
|
||||
}
|
||||
|
||||
// this method downloads the head state, which can be used to find the correct chain config
|
||||
// and use prysm's helper methods to compute the latest weak subjectivity epoch.
|
||||
func getWeakSubjectivityEpochFromHead(ctx context.Context, client *Client) (types.Epoch, error) {
|
||||
headBytes, err := client.GetState(ctx, IdHead)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
cf, err := detect.FromState(headBytes)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error detecting chain config for beacon state")
|
||||
}
|
||||
log.Printf("detected supported config in remote head state, name=%s, fork=%s", cf.Config.ConfigName, version.String(cf.Fork))
|
||||
headState, err := cf.UnmarshalBeaconState(headBytes)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error unmarshaling state to correct version")
|
||||
}
|
||||
|
||||
epoch, err := helpers.LatestWeakSubjectivityEpoch(ctx, headState, cf.Config)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error computing the weak subjectivity epoch from head state")
|
||||
}
|
||||
|
||||
log.Printf("(computed client-side) weak subjectivity epoch = %d", epoch)
|
||||
return epoch, nil
|
||||
}
|
||||
|
||||
const (
|
||||
prysmMinimumVersion = "v2.0.7"
|
||||
prysmImplementationName = "Prysm"
|
||||
)
|
||||
|
||||
// ErrUnsupportedPrysmCheckpointVersion indicates remote beacon node can't be used for checkpoint retrieval.
|
||||
var ErrUnsupportedPrysmCheckpointVersion = errors.New("node does not meet minimum version requirements for checkpoint retrieval")
|
||||
|
||||
// for older endpoints or clients that do not support the weak_subjectivity api method
|
||||
// we gather the necessary data for a checkpoint sync by:
|
||||
// - inspecting the remote server's head state and computing the weak subjectivity epoch locally
|
||||
// - requesting the state at the first slot of the epoch
|
||||
// - using hash_tree_root(state.latest_block_header) to compute the block the state integrates
|
||||
// - requesting that block by its root
|
||||
func downloadBackwardsCompatible(ctx context.Context, client *Client) (*OriginData, error) {
|
||||
log.Print("falling back to generic checkpoint derivation, weak_subjectivity API not supported by server")
|
||||
nv, err := client.GetNodeVersion(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to proceed with fallback method without confirming node version")
|
||||
}
|
||||
if nv.implementation == prysmImplementationName && semver.Compare(nv.semver, prysmMinimumVersion) < 0 {
|
||||
return nil, errors.Wrapf(ErrUnsupportedPrysmCheckpointVersion, "%s < minimum (%s)", nv.semver, prysmMinimumVersion)
|
||||
}
|
||||
epoch, err := getWeakSubjectivityEpochFromHead(ctx, client)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing weak subjectivity epoch via head state inspection")
|
||||
}
|
||||
|
||||
// use first slot of the epoch for the state slot
|
||||
slot, err := slots.EpochStart(epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error computing first slot of epoch=%d", epoch)
|
||||
}
|
||||
|
||||
log.Printf("requesting checkpoint state at slot %d", slot)
|
||||
// get the state at the first slot of the epoch
|
||||
stateBytes, err := client.GetState(ctx, IdFromSlot(slot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to request state by slot from api, slot=%d", slot)
|
||||
}
|
||||
|
||||
// ConfigFork is used to unmarshal the BeaconState so we can read the block root in latest_block_header
|
||||
cf, err := detect.FromState(stateBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error detecting chain config for beacon state")
|
||||
}
|
||||
log.Printf("detected supported config in checkpoint state, name=%s, fork=%s", cf.Config.ConfigName, version.String(cf.Fork))
|
||||
|
||||
st, err := cf.UnmarshalBeaconState(stateBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error using detected config fork to unmarshal state bytes")
|
||||
}
|
||||
|
||||
// compute state and block roots
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root of state")
|
||||
}
|
||||
|
||||
header := st.LatestBlockHeader()
|
||||
header.StateRoot = stateRoot[:]
|
||||
computedBlockRoot, err := header.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error while computing block root using state data")
|
||||
}
|
||||
|
||||
blockBytes, err := client.GetBlock(ctx, IdFromRoot(computedBlockRoot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting block by root = %d", computedBlockRoot)
|
||||
}
|
||||
block, err := cf.UnmarshalBeaconBlock(blockBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
|
||||
}
|
||||
blockRoot, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root for block obtained via root")
|
||||
}
|
||||
|
||||
log.Printf("BeaconState slot=%d, Block slot=%d", st.Slot(), block.Block().Slot())
|
||||
log.Printf("BeaconState htr=%#xd, Block state_root=%#x", stateRoot, block.Block().StateRoot())
|
||||
log.Printf("BeaconBlock root computed from state=%#x, Block htr=%#x", computedBlockRoot, blockRoot)
|
||||
|
||||
return &OriginData{
|
||||
wsd: &WeakSubjectivityData{
|
||||
BlockRoot: blockRoot,
|
||||
StateRoot: stateRoot,
|
||||
Epoch: epoch,
|
||||
},
|
||||
st: st,
|
||||
sb: stateBytes,
|
||||
b: block,
|
||||
bb: blockBytes,
|
||||
cf: cf,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DownloadOriginData attempts to use the proposed weak_subjectivity beacon node api
|
||||
// to obtain the weak_subjectivity metadata (epoch, block_root, state_root) needed to sync
|
||||
// a beacon node from the canonical weak subjectivity checkpoint. As this is a proposed API
|
||||
// that will only be supported by prysm at first, in the event of a 404 we fallback to using a
|
||||
// different technique where we first download the head state which can be used to compute the
|
||||
// weak subjectivity epoch on the client side.
|
||||
func DownloadOriginData(ctx context.Context, client *Client) (*OriginData, error) {
|
||||
ws, err := client.GetWeakSubjectivity(ctx)
|
||||
if err != nil {
|
||||
// a 404/405 is expected if querying an endpoint that doesn't support the weak subjectivity checkpoint api
|
||||
if !errors.Is(err, ErrNotOK) {
|
||||
return nil, errors.Wrap(err, "unexpected API response for prysm-only weak subjectivity checkpoint API")
|
||||
}
|
||||
// fall back to vanilla Beacon Node API method
|
||||
return downloadBackwardsCompatible(ctx, client)
|
||||
}
|
||||
log.Printf("server weak subjectivity checkpoint response - epoch=%d, block_root=%#x, state_root=%#x", ws.Epoch, ws.BlockRoot, ws.StateRoot)
|
||||
|
||||
// use first slot of the epoch for the block slot
|
||||
slot, err := slots.EpochStart(ws.Epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error computing first slot of epoch=%d", ws.Epoch)
|
||||
}
|
||||
log.Printf("requesting checkpoint state at slot %d", slot)
|
||||
|
||||
stateBytes, err := client.GetState(ctx, IdFromSlot(slot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to request state by slot from api, slot=%d", slot)
|
||||
}
|
||||
cf, err := detect.FromState(stateBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error detecting chain config for beacon state")
|
||||
}
|
||||
log.Printf("detected supported config in checkpoint state, name=%s, fork=%s", cf.Config.ConfigName, version.String(cf.Fork))
|
||||
|
||||
state, err := cf.UnmarshalBeaconState(stateBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error using detected config fork to unmarshal state bytes")
|
||||
}
|
||||
stateRoot, err := state.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to compute htr for state at slot=%d", slot)
|
||||
}
|
||||
|
||||
blockRoot, err := state.LatestBlockHeader().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root of latest_block_header")
|
||||
}
|
||||
blockBytes, err := client.GetBlock(ctx, IdFromRoot(ws.BlockRoot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting block by slot = %d", slot)
|
||||
}
|
||||
block, err := cf.UnmarshalBeaconBlock(blockBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
|
||||
}
|
||||
realBlockRoot, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block")
|
||||
}
|
||||
log.Printf("BeaconState slot=%d, Block slot=%d", state.Slot(), block.Block().Slot())
|
||||
log.Printf("BeaconState htr=%#xd, Block state_root=%#x", stateRoot, block.Block().StateRoot())
|
||||
log.Printf("BeaconState latest_block_header htr=%#xd, block htr=%#x", blockRoot, realBlockRoot)
|
||||
return &OriginData{
|
||||
wsd: ws,
|
||||
st: state,
|
||||
b: block,
|
||||
sb: stateBytes,
|
||||
bb: blockBytes,
|
||||
cf: cf,
|
||||
}, nil
|
||||
}
|
||||
407
api/client/beacon/checkpoint_test.go
Normal file
407
api/client/beacon/checkpoint_test.go
Normal file
@@ -0,0 +1,407 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/network/forks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz/detect"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
type testRT struct {
|
||||
rt func(*http.Request) (*http.Response, error)
|
||||
}
|
||||
|
||||
func (rt *testRT) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
if rt.rt != nil {
|
||||
return rt.rt(req)
|
||||
}
|
||||
return nil, errors.New("RoundTripper not implemented")
|
||||
}
|
||||
|
||||
var _ http.RoundTripper = &testRT{}
|
||||
|
||||
func marshalToEnvelope(val interface{}) ([]byte, error) {
|
||||
raw, err := json.Marshal(val)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error marshaling value to place in data envelope")
|
||||
}
|
||||
env := struct {
|
||||
Data json.RawMessage `json:"data"`
|
||||
}{
|
||||
Data: raw,
|
||||
}
|
||||
return json.Marshal(env)
|
||||
}
|
||||
|
||||
func TestMarshalToEnvelope(t *testing.T) {
|
||||
d := struct {
|
||||
Version string `json:"version"`
|
||||
}{
|
||||
Version: "Prysm/v2.0.5 (linux amd64)",
|
||||
}
|
||||
encoded, err := marshalToEnvelope(d)
|
||||
require.NoError(t, err)
|
||||
expected := `{"data":{"version":"Prysm/v2.0.5 (linux amd64)"}}`
|
||||
require.Equal(t, expected, string(encoded))
|
||||
}
|
||||
|
||||
func TestFallbackVersionCheck(t *testing.T) {
|
||||
c := &Client{
|
||||
hc: &http.Client{},
|
||||
host: "localhost:3500",
|
||||
scheme: "http",
|
||||
}
|
||||
c.hc.Transport = &testRT{rt: func(req *http.Request) (*http.Response, error) {
|
||||
res := &http.Response{Request: req}
|
||||
switch req.URL.Path {
|
||||
case getNodeVersionPath:
|
||||
res.StatusCode = http.StatusOK
|
||||
b := bytes.NewBuffer(nil)
|
||||
d := struct {
|
||||
Version string `json:"version"`
|
||||
}{
|
||||
Version: "Prysm/v2.0.5 (linux amd64)",
|
||||
}
|
||||
encoded, err := marshalToEnvelope(d)
|
||||
require.NoError(t, err)
|
||||
b.Write(encoded)
|
||||
res.Body = io.NopCloser(b)
|
||||
case getWeakSubjectivityPath:
|
||||
res.StatusCode = http.StatusNotFound
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}}
|
||||
|
||||
ctx := context.Background()
|
||||
_, err := DownloadOriginData(ctx, c)
|
||||
require.ErrorIs(t, err, ErrUnsupportedPrysmCheckpointVersion)
|
||||
}
|
||||
|
||||
func TestFname(t *testing.T) {
|
||||
vu := &detect.VersionedUnmarshaler{
|
||||
Config: params.MainnetConfig(),
|
||||
Fork: version.Phase0,
|
||||
}
|
||||
slot := types.Slot(23)
|
||||
prefix := "block"
|
||||
var root [32]byte
|
||||
copy(root[:], []byte{0x23, 0x23, 0x23})
|
||||
expected := "block_mainnet_phase0_23-0x2323230000000000000000000000000000000000000000000000000000000000.ssz"
|
||||
actual := fname(prefix, vu, slot, root)
|
||||
require.Equal(t, expected, actual)
|
||||
|
||||
vu.Config = params.MinimalSpecConfig()
|
||||
vu.Fork = version.Altair
|
||||
slot = 17
|
||||
prefix = "state"
|
||||
copy(root[29:], []byte{0x17, 0x17, 0x17})
|
||||
expected = "state_minimal_altair_17-0x2323230000000000000000000000000000000000000000000000000000171717.ssz"
|
||||
actual = fname(prefix, vu, slot, root)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestDownloadOriginData(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := params.MainnetConfig()
|
||||
|
||||
epoch := cfg.AltairForkEpoch - 1
|
||||
// set up checkpoint state, using the epoch that will be computed as the ws checkpoint state based on the head state
|
||||
wSlot, err := slots.EpochStart(epoch)
|
||||
require.NoError(t, err)
|
||||
wst, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
fork, err := forkForEpoch(cfg, epoch)
|
||||
require.NoError(t, wst.SetFork(fork))
|
||||
|
||||
// set up checkpoint block
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, wrapper.SetBlockParentRoot(b, cfg.ZeroHash))
|
||||
require.NoError(t, wrapper.SetBlockSlot(b, wSlot))
|
||||
require.NoError(t, wrapper.SetProposerIndex(b, 0))
|
||||
|
||||
// set up state header pointing at checkpoint block - this is how the block is downloaded by root
|
||||
header, err := b.Header()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, wst.SetLatestBlockHeader(header.Header))
|
||||
|
||||
// order of operations can be confusing here:
|
||||
// - when computing the state root, make sure block header is complete, EXCEPT the state root should be zero-value
|
||||
// - before computing the block root (to match the request route), the block should include the state root
|
||||
// *computed from the state with a header that does not have a state root set yet*
|
||||
wRoot, err := wst.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, wrapper.SetBlockStateRoot(b, wRoot))
|
||||
serBlock, err := b.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
bRoot, err := b.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
wsSerialized, err := wst.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
expectedWSD := WeakSubjectivityData{
|
||||
BlockRoot: bRoot,
|
||||
StateRoot: wRoot,
|
||||
Epoch: epoch,
|
||||
}
|
||||
|
||||
hc := &http.Client{
|
||||
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
|
||||
res := &http.Response{Request: req}
|
||||
switch req.URL.Path {
|
||||
case getWeakSubjectivityPath:
|
||||
res.StatusCode = http.StatusOK
|
||||
cp := struct {
|
||||
Epoch string `json:"epoch"`
|
||||
Root string `json:"root"`
|
||||
}{
|
||||
Epoch: fmt.Sprintf("%d", slots.ToEpoch(b.Block().Slot())),
|
||||
Root: fmt.Sprintf("%#x", bRoot),
|
||||
}
|
||||
wsr := struct {
|
||||
Checkpoint interface{} `json:"ws_checkpoint"`
|
||||
StateRoot string `json:"state_root"`
|
||||
}{
|
||||
Checkpoint: cp,
|
||||
StateRoot: fmt.Sprintf("%#x", wRoot),
|
||||
}
|
||||
rb, err := marshalToEnvelope(wsr)
|
||||
require.NoError(t, err)
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(rb))
|
||||
case renderGetStatePath(IdFromSlot(wSlot)):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
|
||||
case renderGetBlockPath(IdFromRoot(bRoot)):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}},
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
host: "localhost:3500",
|
||||
scheme: "http",
|
||||
}
|
||||
|
||||
od, err := DownloadOriginData(ctx, c)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedWSD.Epoch, od.wsd.Epoch)
|
||||
require.Equal(t, expectedWSD.StateRoot, od.wsd.StateRoot)
|
||||
require.Equal(t, expectedWSD.BlockRoot, od.wsd.BlockRoot)
|
||||
require.DeepEqual(t, wsSerialized, od.sb)
|
||||
require.DeepEqual(t, serBlock, od.bb)
|
||||
require.DeepEqual(t, wst.Fork().CurrentVersion, od.cf.Version[:])
|
||||
require.DeepEqual(t, version.Phase0, od.cf.Fork)
|
||||
}
|
||||
|
||||
// runs downloadBackwardsCompatible directly
|
||||
// and via DownloadOriginData with a round tripper that triggers the backwards compatible code path
|
||||
func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := params.MainnetConfig()
|
||||
|
||||
st, expectedEpoch := defaultTestHeadState(t, cfg)
|
||||
serialized, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
// set up checkpoint state, using the epoch that will be computed as the ws checkpoint state based on the head state
|
||||
wSlot, err := slots.EpochStart(expectedEpoch)
|
||||
require.NoError(t, err)
|
||||
wst, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
fork, err := forkForEpoch(cfg, cfg.GenesisEpoch)
|
||||
require.NoError(t, wst.SetFork(fork))
|
||||
|
||||
// set up checkpoint block
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, wrapper.SetBlockParentRoot(b, cfg.ZeroHash))
|
||||
require.NoError(t, wrapper.SetBlockSlot(b, wSlot))
|
||||
require.NoError(t, wrapper.SetProposerIndex(b, 0))
|
||||
|
||||
// set up state header pointing at checkpoint block - this is how the block is downloaded by root
|
||||
header, err := b.Header()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, wst.SetLatestBlockHeader(header.Header))
|
||||
|
||||
// order of operations can be confusing here:
|
||||
// - when computing the state root, make sure block header is complete, EXCEPT the state root should be zero-value
|
||||
// - before computing the block root (to match the request route), the block should include the state root
|
||||
// *computed from the state with a header that does not have a state root set yet*
|
||||
wRoot, err := wst.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, wrapper.SetBlockStateRoot(b, wRoot))
|
||||
serBlock, err := b.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
bRoot, err := b.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
wsSerialized, err := wst.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
hc := &http.Client{
|
||||
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
|
||||
res := &http.Response{Request: req}
|
||||
switch req.URL.Path {
|
||||
case getNodeVersionPath:
|
||||
res.StatusCode = http.StatusOK
|
||||
b := bytes.NewBuffer(nil)
|
||||
d := struct {
|
||||
Version string `json:"version"`
|
||||
}{
|
||||
Version: "Lighthouse/v0.1.5 (Linux x86_64)",
|
||||
}
|
||||
encoded, err := marshalToEnvelope(d)
|
||||
require.NoError(t, err)
|
||||
b.Write(encoded)
|
||||
res.Body = io.NopCloser(b)
|
||||
case getWeakSubjectivityPath:
|
||||
res.StatusCode = http.StatusNotFound
|
||||
case renderGetStatePath(IdHead):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
|
||||
case renderGetStatePath(IdFromSlot(wSlot)):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
|
||||
case renderGetBlockPath(IdFromRoot(bRoot)):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}},
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
host: "localhost:3500",
|
||||
scheme: "http",
|
||||
}
|
||||
|
||||
odPub, err := DownloadOriginData(ctx, c)
|
||||
require.NoError(t, err)
|
||||
|
||||
odPriv, err := downloadBackwardsCompatible(ctx, c)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, odPriv.wsd, odPub.wsd)
|
||||
require.DeepEqual(t, odPriv.sb, odPub.sb)
|
||||
require.DeepEqual(t, odPriv.bb, odPub.bb)
|
||||
require.DeepEqual(t, odPriv.cf.Fork, odPub.cf.Fork)
|
||||
require.DeepEqual(t, odPriv.cf.Version, odPub.cf.Version)
|
||||
}
|
||||
|
||||
func TestGetWeakSubjectivityEpochFromHead(t *testing.T) {
|
||||
st, expectedEpoch := defaultTestHeadState(t, params.MainnetConfig())
|
||||
serialized, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
hc := &http.Client{
|
||||
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
|
||||
res := &http.Response{Request: req}
|
||||
switch req.URL.Path {
|
||||
case renderGetStatePath(IdHead):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
|
||||
}
|
||||
return res, nil
|
||||
}},
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
host: "localhost:3500",
|
||||
scheme: "http",
|
||||
}
|
||||
actualEpoch, err := getWeakSubjectivityEpochFromHead(context.Background(), c)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedEpoch, actualEpoch)
|
||||
}
|
||||
|
||||
func forkForEpoch(cfg *params.BeaconChainConfig, epoch types.Epoch) (*ethpb.Fork, error) {
|
||||
os := forks.NewOrderedSchedule(cfg)
|
||||
currentVersion, err := os.VersionForEpoch(epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prevVersion, err := os.Previous(currentVersion)
|
||||
if err != nil {
|
||||
if !errors.Is(err, forks.ErrNoPreviousVersion) {
|
||||
return nil, err
|
||||
}
|
||||
// use same version for both in the case of genesis
|
||||
prevVersion = currentVersion
|
||||
}
|
||||
forkEpoch := cfg.ForkVersionSchedule[currentVersion]
|
||||
return ðpb.Fork{
|
||||
PreviousVersion: prevVersion[:],
|
||||
CurrentVersion: currentVersion[:],
|
||||
Epoch: forkEpoch,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func defaultTestHeadState(t *testing.T, cfg *params.BeaconChainConfig) (state.BeaconState, types.Epoch) {
|
||||
st, err := util.NewBeaconStateAltair()
|
||||
require.NoError(t, err)
|
||||
|
||||
fork, err := forkForEpoch(cfg, cfg.AltairForkEpoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetFork(fork))
|
||||
|
||||
slot, err := slots.EpochStart(cfg.AltairForkEpoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(slot))
|
||||
|
||||
var validatorCount, avgBalance uint64 = 100, 35
|
||||
require.NoError(t, populateValidators(cfg, st, validatorCount, avgBalance))
|
||||
require.NoError(t, st.SetFinalizedCheckpoint(ðpb.Checkpoint{
|
||||
Epoch: fork.Epoch - 10,
|
||||
Root: make([]byte, 32),
|
||||
}))
|
||||
// to see the math for this, look at helpers.LatestWeakSubjectivityEpoch
|
||||
// and for the values use mainnet config values, the validatorCount and avgBalance above, and altair fork epoch
|
||||
expectedEpoch := slots.ToEpoch(st.Slot()) - 224
|
||||
return st, expectedEpoch
|
||||
}
|
||||
|
||||
// TODO(10429): refactor beacon state options in testing/util to take a state.BeaconState so this can become an option
|
||||
func populateValidators(cfg *params.BeaconChainConfig, st state.BeaconState, valCount, avgBalance uint64) error {
|
||||
validators := make([]*ethpb.Validator, valCount)
|
||||
balances := make([]uint64, len(validators))
|
||||
for i := uint64(0); i < valCount; i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, cfg.BLSPubkeyLength),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
EffectiveBalance: avgBalance * 1e9,
|
||||
ExitEpoch: cfg.FarFutureEpoch,
|
||||
}
|
||||
balances[i] = validators[i].EffectiveBalance
|
||||
}
|
||||
|
||||
if err := st.SetValidators(validators); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := st.SetBalances(balances); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
437
api/client/beacon/client.go
Normal file
437
api/client/beacon/client.go
Normal file
@@ -0,0 +1,437 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/network/forks"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/rpc/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
getSignedBlockPath = "/eth/v2/beacon/blocks"
|
||||
getBlockRootPath = "/eth/v1/beacon/blocks/{{.Id}}/root"
|
||||
getForkForStatePath = "/eth/v1/beacon/states/{{.Id}}/fork"
|
||||
getWeakSubjectivityPath = "/eth/v1/beacon/weak_subjectivity"
|
||||
getForkSchedulePath = "/eth/v1/config/fork_schedule"
|
||||
getStatePath = "/eth/v2/debug/beacon/states"
|
||||
getNodeVersionPath = "/eth/v1/node/version"
|
||||
)
|
||||
|
||||
// StateOrBlockId represents the block_id / state_id parameters that several of the Eth Beacon API methods accept.
|
||||
// StateOrBlockId constants are defined for named identifiers, and helper methods are provided
|
||||
// for slot and root identifiers. Example text from the Eth Beacon Node API documentation:
|
||||
//
|
||||
// "Block identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
|
||||
// <slot>, <hex encoded blockRoot with 0x prefix>."
|
||||
type StateOrBlockId string
|
||||
|
||||
const (
|
||||
IdFinalized StateOrBlockId = "finalized"
|
||||
IdGenesis StateOrBlockId = "genesis"
|
||||
IdHead StateOrBlockId = "head"
|
||||
IdJustified StateOrBlockId = "justified"
|
||||
)
|
||||
|
||||
// IdFromRoot encodes a block root in the format expected by the API in places where a root can be used to identify
|
||||
// a BeaconState or SignedBeaconBlock.
|
||||
func IdFromRoot(r [32]byte) StateOrBlockId {
|
||||
return StateOrBlockId(fmt.Sprintf("%#x", r))
|
||||
}
|
||||
|
||||
// IdFromRoot encodes a Slot in the format expected by the API in places where a slot can be used to identify
|
||||
// a BeaconState or SignedBeaconBlock.
|
||||
func IdFromSlot(s types.Slot) StateOrBlockId {
|
||||
return StateOrBlockId(strconv.FormatUint(uint64(s), 10))
|
||||
}
|
||||
|
||||
// idTemplate is used to create template functions that can interpolate StateOrBlockId values.
|
||||
func idTemplate(ts string) func(StateOrBlockId) string {
|
||||
t := template.Must(template.New("").Parse(ts))
|
||||
f := func(id StateOrBlockId) string {
|
||||
b := bytes.NewBuffer(nil)
|
||||
err := t.Execute(b, struct{ Id string }{Id: string(id)})
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("invalid idTemplate: %s", ts))
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
// run the template to ensure that it is valid
|
||||
// this should happen load time (using package scoped vars) to ensure runtime errors aren't possible
|
||||
_ = f(IdGenesis)
|
||||
return f
|
||||
}
|
||||
|
||||
// ClientOpt is a functional option for the Client type (http.Client wrapper)
|
||||
type ClientOpt func(*Client)
|
||||
|
||||
// WithTimeout sets the .Timeout attribute of the wrapped http.Client.
|
||||
func WithTimeout(timeout time.Duration) ClientOpt {
|
||||
return func(c *Client) {
|
||||
c.hc.Timeout = timeout
|
||||
}
|
||||
}
|
||||
|
||||
// Client provides a collection of helper methods for calling the Eth Beacon Node API endpoints.
|
||||
type Client struct {
|
||||
hc *http.Client
|
||||
host string
|
||||
scheme string
|
||||
}
|
||||
|
||||
// NewClient constructs a new client with the provided options (ex WithTimeout).
|
||||
// `host` is the base host + port used to construct request urls. This value can be
|
||||
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
|
||||
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
|
||||
host, err := validHostname(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &Client{
|
||||
hc: &http.Client{},
|
||||
scheme: "http",
|
||||
host: host,
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(c)
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func validHostname(h string) (string, error) {
|
||||
// try to parse as url (being permissive)
|
||||
u, err := url.Parse(h)
|
||||
if err == nil && u.Host != "" {
|
||||
return u.Host, nil
|
||||
}
|
||||
// try to parse as host:port
|
||||
host, port, err := net.SplitHostPort(h)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprintf("%s:%s", host, port), nil
|
||||
}
|
||||
|
||||
func (c *Client) urlForPath(methodPath string) *url.URL {
|
||||
u := &url.URL{
|
||||
Scheme: c.scheme,
|
||||
Host: c.host,
|
||||
}
|
||||
u.Path = path.Join(u.Path, methodPath)
|
||||
return u
|
||||
}
|
||||
|
||||
type reqOption func(*http.Request)
|
||||
|
||||
func withSSZEncoding() reqOption {
|
||||
return func(req *http.Request) {
|
||||
req.Header.Set("Accept", "application/octet-stream")
|
||||
}
|
||||
}
|
||||
|
||||
// get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
|
||||
func (c *Client) get(ctx context.Context, path string, opts ...reqOption) ([]byte, error) {
|
||||
u := c.urlForPath(path)
|
||||
log.Printf("requesting %s", u.String())
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(req)
|
||||
}
|
||||
r, err := c.hc.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
err = r.Body.Close()
|
||||
}()
|
||||
if r.StatusCode != http.StatusOK {
|
||||
return nil, non200Err(r)
|
||||
}
|
||||
b, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error reading http response body from GetBlock")
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func renderGetBlockPath(id StateOrBlockId) string {
|
||||
return path.Join(getSignedBlockPath, string(id))
|
||||
}
|
||||
|
||||
// GetBlock retrieves the SignedBeaconBlock for the given block id.
|
||||
// Block identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
|
||||
// <slot>, <hex encoded blockRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
|
||||
// for the named identifiers.
|
||||
// The return value contains the ssz-encoded bytes.
|
||||
func (c *Client) GetBlock(ctx context.Context, blockId StateOrBlockId) ([]byte, error) {
|
||||
blockPath := renderGetBlockPath(blockId)
|
||||
b, err := c.get(ctx, blockPath, withSSZEncoding())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting state by id = %s", blockId)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
var getBlockRootTpl = idTemplate(getBlockRootPath)
|
||||
|
||||
// GetBlockRoot retrieves the hash_tree_root of the BeaconBlock for the given block id.
|
||||
// Block identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
|
||||
// <slot>, <hex encoded blockRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
|
||||
// for the named identifiers.
|
||||
func (c *Client) GetBlockRoot(ctx context.Context, blockId StateOrBlockId) ([32]byte, error) {
|
||||
rootPath := getBlockRootTpl(blockId)
|
||||
b, err := c.get(ctx, rootPath)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrapf(err, "error requesting block root by id = %s", blockId)
|
||||
}
|
||||
jsonr := &struct{ Data struct{ Root string } }{}
|
||||
err = json.Unmarshal(b, jsonr)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "error decoding json data from get block root response")
|
||||
}
|
||||
rs, err := hexutil.Decode(jsonr.Data.Root)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, fmt.Sprintf("error decoding hex-encoded value %s", jsonr.Data.Root))
|
||||
}
|
||||
return bytesutil.ToBytes32(rs), nil
|
||||
}
|
||||
|
||||
var getForkTpl = idTemplate(getForkForStatePath)
|
||||
|
||||
// GetFork queries the Beacon Node API for the Fork from the state identified by stateId.
|
||||
// Block identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
|
||||
// <slot>, <hex encoded blockRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
|
||||
// for the named identifiers.
|
||||
func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fork, error) {
|
||||
body, err := c.get(ctx, getForkTpl(stateId))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting fork by state id = %s", stateId)
|
||||
}
|
||||
fr := &forkResponse{}
|
||||
dataWrapper := &struct{ Data *forkResponse }{Data: fr}
|
||||
err = json.Unmarshal(body, dataWrapper)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error decoding json response in GetFork")
|
||||
}
|
||||
|
||||
return fr.Fork()
|
||||
}
|
||||
|
||||
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
|
||||
func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, error) {
|
||||
body, err := c.get(ctx, getForkSchedulePath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error requesting fork schedule")
|
||||
}
|
||||
fsr := &forkScheduleResponse{}
|
||||
err = json.Unmarshal(body, fsr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ofs, err := fsr.OrderedForkSchedule()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("problem unmarshaling %s response", getForkSchedulePath))
|
||||
}
|
||||
return ofs, nil
|
||||
}
|
||||
|
||||
type NodeVersion struct {
|
||||
implementation string
|
||||
semver string
|
||||
systemInfo string
|
||||
}
|
||||
|
||||
var versionRE = regexp.MustCompile(`^(\w+)\/(v\d+\.\d+\.\d+) \((.*)\)$`)
|
||||
|
||||
func parseNodeVersion(v string) (*NodeVersion, error) {
|
||||
groups := versionRE.FindStringSubmatch(v)
|
||||
if len(groups) != 4 {
|
||||
return nil, errors.Wrapf(ErrInvalidNodeVersion, "could not be parsed: %s", v)
|
||||
}
|
||||
return &NodeVersion{
|
||||
implementation: groups[1],
|
||||
semver: groups[2],
|
||||
systemInfo: groups[3],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetNodeVersion requests that the beacon node identify information about its implementation in a format
|
||||
// similar to a HTTP User-Agent field. ex: Lighthouse/v0.1.5 (Linux x86_64)
|
||||
func (c *Client) GetNodeVersion(ctx context.Context) (*NodeVersion, error) {
|
||||
b, err := c.get(ctx, getNodeVersionPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error requesting node version")
|
||||
}
|
||||
d := struct {
|
||||
Data struct {
|
||||
Version string `json:"version"`
|
||||
} `json:"data"`
|
||||
}{}
|
||||
err = json.Unmarshal(b, &d)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error unmarshaling response body: %s", string(b))
|
||||
}
|
||||
return parseNodeVersion(d.Data.Version)
|
||||
}
|
||||
|
||||
func renderGetStatePath(id StateOrBlockId) string {
|
||||
return path.Join(getStatePath, string(id))
|
||||
}
|
||||
|
||||
// GetState retrieves the BeaconState for the given state id.
|
||||
// State identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
|
||||
// <slot>, <hex encoded stateRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
|
||||
// for the named identifiers.
|
||||
// The return value contains the ssz-encoded bytes.
|
||||
func (c *Client) GetState(ctx context.Context, stateId StateOrBlockId) ([]byte, error) {
|
||||
statePath := path.Join(getStatePath, string(stateId))
|
||||
b, err := c.get(ctx, statePath, withSSZEncoding())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting state by id = %s", stateId)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// GetWeakSubjectivity calls a proposed API endpoint that is unique to prysm
|
||||
// This api method does the following:
|
||||
// - computes weak subjectivity epoch
|
||||
// - finds the highest non-skipped block preceding the epoch
|
||||
// - returns the htr of the found block and returns this + the value of state_root from the block
|
||||
func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData, error) {
|
||||
body, err := c.get(ctx, getWeakSubjectivityPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v := &apimiddleware.WeakSubjectivityResponse{}
|
||||
err = json.Unmarshal(body, v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
epoch, err := strconv.ParseUint(v.Data.Checkpoint.Epoch, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blockRoot, err := hexutil.Decode(v.Data.Checkpoint.Root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stateRoot, err := hexutil.Decode(v.Data.StateRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &WeakSubjectivityData{
|
||||
Epoch: types.Epoch(epoch),
|
||||
BlockRoot: bytesutil.ToBytes32(blockRoot),
|
||||
StateRoot: bytesutil.ToBytes32(stateRoot),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// WeakSubjectivityData represents the state root, block root and epoch of the BeaconState + SignedBeaconBlock
|
||||
// that falls at the beginning of the current weak subjectivity period. These values can be used to construct
|
||||
// a weak subjectivity checkpoint, or to download a BeaconState+SignedBeaconBlock pair that can be used to bootstrap
|
||||
// a new Beacon Node using Checkpoint Sync.
|
||||
type WeakSubjectivityData struct {
|
||||
BlockRoot [32]byte
|
||||
StateRoot [32]byte
|
||||
Epoch types.Epoch
|
||||
}
|
||||
|
||||
func non200Err(response *http.Response) error {
|
||||
bodyBytes, err := ioutil.ReadAll(response.Body)
|
||||
var body string
|
||||
if err != nil {
|
||||
body = "(Unable to read response body.)"
|
||||
} else {
|
||||
body = "response body:\n" + string(bodyBytes)
|
||||
}
|
||||
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
|
||||
switch response.StatusCode {
|
||||
case 404:
|
||||
return errors.Wrap(ErrNotFound, msg)
|
||||
default:
|
||||
return errors.Wrap(ErrNotOK, msg)
|
||||
}
|
||||
}
|
||||
|
||||
type forkResponse struct {
|
||||
PreviousVersion string `json:"previous_version"`
|
||||
CurrentVersion string `json:"current_version"`
|
||||
Epoch string `json:"epoch"`
|
||||
}
|
||||
|
||||
func (f *forkResponse) Fork() (*ethpb.Fork, error) {
|
||||
epoch, err := strconv.ParseUint(f.Epoch, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cSlice, err := hexutil.Decode(f.CurrentVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(cSlice) != 4 {
|
||||
return nil, fmt.Errorf("got %d byte version for CurrentVersion, expected 4 bytes. hex=%s", len(cSlice), f.CurrentVersion)
|
||||
}
|
||||
pSlice, err := hexutil.Decode(f.PreviousVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(pSlice) != 4 {
|
||||
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(pSlice), f.PreviousVersion)
|
||||
}
|
||||
return ðpb.Fork{
|
||||
CurrentVersion: cSlice,
|
||||
PreviousVersion: pSlice,
|
||||
Epoch: types.Epoch(epoch),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type forkScheduleResponse struct {
|
||||
Data []forkResponse
|
||||
}
|
||||
|
||||
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
|
||||
ofs := make(forks.OrderedSchedule, 0)
|
||||
for _, d := range fsr.Data {
|
||||
epoch, err := strconv.Atoi(d.Epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vSlice, err := hexutil.Decode(d.CurrentVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(vSlice) != 4 {
|
||||
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(vSlice), d.CurrentVersion)
|
||||
}
|
||||
version := bytesutil.ToBytes4(vSlice)
|
||||
ofs = append(ofs, forks.ForkScheduleEntry{
|
||||
Version: version,
|
||||
Epoch: types.Epoch(uint64(epoch)),
|
||||
})
|
||||
}
|
||||
sort.Sort(ofs)
|
||||
return ofs, nil
|
||||
}
|
||||
58
api/client/beacon/client_test.go
Normal file
58
api/client/beacon/client_test.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func TestParseNodeVersion(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
v string
|
||||
err error
|
||||
nv *NodeVersion
|
||||
}{
|
||||
{
|
||||
name: "empty string",
|
||||
v: "",
|
||||
err: ErrInvalidNodeVersion,
|
||||
},
|
||||
{
|
||||
name: "Prysm as the version string",
|
||||
v: "Prysm",
|
||||
err: ErrInvalidNodeVersion,
|
||||
},
|
||||
{
|
||||
name: "semver only",
|
||||
v: "v2.0.6",
|
||||
err: ErrInvalidNodeVersion,
|
||||
},
|
||||
{
|
||||
name: "implementation and semver only",
|
||||
v: "Prysm/v2.0.6",
|
||||
err: ErrInvalidNodeVersion,
|
||||
},
|
||||
{
|
||||
name: "complete version",
|
||||
v: "Prysm/v2.0.6 (linux amd64)",
|
||||
nv: &NodeVersion{
|
||||
implementation: "Prysm",
|
||||
semver: "v2.0.6",
|
||||
systemInfo: "linux amd64",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
nv, err := parseNodeVersion(c.v)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, c.nv, nv)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
6
api/client/beacon/doc.go
Normal file
6
api/client/beacon/doc.go
Normal file
@@ -0,0 +1,6 @@
|
||||
/*
|
||||
Package beacon provides a client for interacting with the standard Eth Beacon Node API.
|
||||
Interactive swagger documentation for the API is available here: https://ethereum.github.io/beacon-APIs/
|
||||
|
||||
*/
|
||||
package beacon
|
||||
13
api/client/beacon/errors.go
Normal file
13
api/client/beacon/errors.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package beacon
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
// ErrNotOK is used to indicate when an HTTP request to the Beacon Node API failed with any non-2xx response code.
|
||||
// More specific errors may be returned, but an error in reaction to a non-2xx response will always wrap ErrNotOK.
|
||||
var ErrNotOK = errors.New("did not receive 2xx response from API")
|
||||
|
||||
// ErrNotFound specifically means that a '404 - NOT FOUND' response was received from the API.
|
||||
var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
|
||||
|
||||
// ErrInvalidNodeVersion indicates that the /eth/v1/node/version api response format was not recognized.
|
||||
var ErrInvalidNodeVersion = errors.New("invalid node version response")
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"gateway.go",
|
||||
"log.go",
|
||||
"options.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/api/gateway",
|
||||
visibility = [
|
||||
|
||||
@@ -3,6 +3,7 @@ package apimiddleware
|
||||
import (
|
||||
"net/http"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
@@ -14,6 +15,8 @@ import (
|
||||
type ApiProxyMiddleware struct {
|
||||
GatewayAddress string
|
||||
EndpointCreator EndpointFactory
|
||||
Timeout time.Duration
|
||||
router *mux.Router
|
||||
}
|
||||
|
||||
// EndpointFactory is responsible for creating new instances of Endpoint values.
|
||||
@@ -29,6 +32,8 @@ type Endpoint struct {
|
||||
GetResponse interface{} // The struct corresponding to the JSON structure used in a GET response.
|
||||
PostRequest interface{} // The struct corresponding to the JSON structure used in a POST request.
|
||||
PostResponse interface{} // The struct corresponding to the JSON structure used in a POST response.
|
||||
DeleteRequest interface{} // The struct corresponding to the JSON structure used in a DELETE request.
|
||||
DeleteResponse interface{} // The struct corresponding to the JSON structure used in a DELETE response.
|
||||
RequestURLLiterals []string // Names of URL parameters that should not be base64-encoded.
|
||||
RequestQueryParams []QueryParam // Query parameters of the request.
|
||||
Err ErrorJson // The struct corresponding to the error that should be returned in case of a request failure.
|
||||
@@ -74,16 +79,23 @@ type fieldProcessor struct {
|
||||
// Run starts the proxy, registering all proxy endpoints.
|
||||
func (m *ApiProxyMiddleware) Run(gatewayRouter *mux.Router) {
|
||||
for _, path := range m.EndpointCreator.Paths() {
|
||||
m.handleApiPath(gatewayRouter, path, m.EndpointCreator)
|
||||
gatewayRouter.HandleFunc(path, m.WithMiddleware(path))
|
||||
}
|
||||
m.router = gatewayRouter
|
||||
}
|
||||
|
||||
func (m *ApiProxyMiddleware) handleApiPath(gatewayRouter *mux.Router, path string, endpointFactory EndpointFactory) {
|
||||
gatewayRouter.HandleFunc(path, func(w http.ResponseWriter, req *http.Request) {
|
||||
endpoint, err := endpointFactory.Create(path)
|
||||
// ServeHTTP for the proxy middleware.
|
||||
func (m *ApiProxyMiddleware) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
m.router.ServeHTTP(w, req)
|
||||
}
|
||||
|
||||
// WithMiddleware wraps the given endpoint handler with the middleware logic.
|
||||
func (m *ApiProxyMiddleware) WithMiddleware(path string) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, req *http.Request) {
|
||||
endpoint, err := m.EndpointCreator.Create(path)
|
||||
if err != nil {
|
||||
errJson := InternalServerErrorWithMessage(err, "could not create endpoint")
|
||||
WriteError(w, errJson, nil)
|
||||
log.WithError(err).Errorf("Could not create endpoint for path: %s", path)
|
||||
return
|
||||
}
|
||||
|
||||
for _, handler := range endpoint.CustomHandlers {
|
||||
@@ -93,16 +105,14 @@ func (m *ApiProxyMiddleware) handleApiPath(gatewayRouter *mux.Router, path strin
|
||||
}
|
||||
|
||||
if req.Method == "POST" {
|
||||
if errJson := deserializeRequestBodyIntoContainerWrapped(endpoint, req, w); errJson != nil {
|
||||
if errJson := handlePostRequestForEndpoint(endpoint, w, req); errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if errJson := ProcessRequestContainerFields(endpoint.PostRequest); errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
if errJson := SetRequestBodyToRequestContainer(endpoint.PostRequest, req); errJson != nil {
|
||||
if req.Method == "DELETE" {
|
||||
if errJson := handleDeleteRequestForEndpoint(endpoint, req); errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
@@ -112,7 +122,7 @@ func (m *ApiProxyMiddleware) handleApiPath(gatewayRouter *mux.Router, path strin
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
grpcResp, errJson := ProxyRequest(req)
|
||||
grpcResp, errJson := m.ProxyRequest(req)
|
||||
if errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
@@ -137,6 +147,8 @@ func (m *ApiProxyMiddleware) handleApiPath(gatewayRouter *mux.Router, path strin
|
||||
var resp interface{}
|
||||
if req.Method == "GET" {
|
||||
resp = endpoint.GetResponse
|
||||
} else if req.Method == "DELETE" {
|
||||
resp = endpoint.DeleteResponse
|
||||
} else {
|
||||
resp = endpoint.PostResponse
|
||||
}
|
||||
@@ -164,7 +176,27 @@ func (m *ApiProxyMiddleware) handleApiPath(gatewayRouter *mux.Router, path strin
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func handlePostRequestForEndpoint(endpoint *Endpoint, w http.ResponseWriter, req *http.Request) ErrorJson {
|
||||
if errJson := deserializeRequestBodyIntoContainerWrapped(endpoint, req, w); errJson != nil {
|
||||
return errJson
|
||||
}
|
||||
if errJson := ProcessRequestContainerFields(endpoint.PostRequest); errJson != nil {
|
||||
return errJson
|
||||
}
|
||||
return SetRequestBodyToRequestContainer(endpoint.PostRequest, req)
|
||||
}
|
||||
|
||||
func handleDeleteRequestForEndpoint(endpoint *Endpoint, req *http.Request) ErrorJson {
|
||||
if errJson := DeserializeRequestBodyIntoContainer(req.Body, endpoint.DeleteRequest); errJson != nil {
|
||||
return errJson
|
||||
}
|
||||
if errJson := ProcessRequestContainerFields(endpoint.DeleteRequest); errJson != nil {
|
||||
return errJson
|
||||
}
|
||||
return SetRequestBodyToRequestContainer(endpoint.DeleteRequest, req)
|
||||
}
|
||||
|
||||
func deserializeRequestBodyIntoContainerWrapped(endpoint *Endpoint, req *http.Request, w http.ResponseWriter) ErrorJson {
|
||||
|
||||
@@ -5,10 +5,10 @@ import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/api/grpc"
|
||||
@@ -75,11 +75,14 @@ func (m *ApiProxyMiddleware) PrepareRequestForProxying(endpoint Endpoint, req *h
|
||||
}
|
||||
|
||||
// ProxyRequest proxies the request to grpc-gateway.
|
||||
func ProxyRequest(req *http.Request) (*http.Response, ErrorJson) {
|
||||
func (m *ApiProxyMiddleware) ProxyRequest(req *http.Request) (*http.Response, ErrorJson) {
|
||||
// We do not use http.DefaultClient because it does not have any timeout.
|
||||
netClient := &http.Client{Timeout: time.Minute * 2}
|
||||
netClient := &http.Client{Timeout: m.Timeout}
|
||||
grpcResp, err := netClient.Do(req)
|
||||
if err != nil {
|
||||
if err, ok := err.(net.Error); ok && err.Timeout() {
|
||||
return nil, TimeoutError()
|
||||
}
|
||||
return nil, InternalServerErrorWithMessage(err, "could not proxy request")
|
||||
}
|
||||
if grpcResp == nil {
|
||||
@@ -111,9 +114,14 @@ func HandleGrpcResponseError(errJson ErrorJson, resp *http.Response, respBody []
|
||||
w.Header().Set(h, v)
|
||||
}
|
||||
}
|
||||
// Set code to HTTP code because unmarshalled body contained gRPC code.
|
||||
errJson.SetCode(resp.StatusCode)
|
||||
WriteError(w, errJson, resp.Header)
|
||||
// Handle gRPC timeout.
|
||||
if resp.StatusCode == http.StatusGatewayTimeout {
|
||||
WriteError(w, TimeoutError(), resp.Header)
|
||||
} else {
|
||||
// Set code to HTTP code because unmarshalled body contained gRPC code.
|
||||
errJson.SetCode(resp.StatusCode)
|
||||
WriteError(w, errJson, resp.Header)
|
||||
}
|
||||
}
|
||||
return responseHasError, nil
|
||||
}
|
||||
|
||||
@@ -41,6 +41,13 @@ func InternalServerError(err error) *DefaultErrorJson {
|
||||
}
|
||||
}
|
||||
|
||||
func TimeoutError() *DefaultErrorJson {
|
||||
return &DefaultErrorJson{
|
||||
Message: "Request timeout",
|
||||
Code: http.StatusRequestTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
// StatusCode returns the error's underlying error code.
|
||||
func (e *DefaultErrorJson) StatusCode() int {
|
||||
return e.Code
|
||||
|
||||
@@ -34,74 +34,52 @@ type PbMux struct {
|
||||
type PbHandlerRegistration func(context.Context, *gwruntime.ServeMux, *grpc.ClientConn) error
|
||||
|
||||
// MuxHandler is a function that implements the mux handler functionality.
|
||||
type MuxHandler func(http.Handler, http.ResponseWriter, *http.Request)
|
||||
type MuxHandler func(
|
||||
apiMiddlewareHandler *apimiddleware.ApiProxyMiddleware,
|
||||
h http.HandlerFunc,
|
||||
w http.ResponseWriter,
|
||||
req *http.Request,
|
||||
)
|
||||
|
||||
// Config parameters for setting up the gateway service.
|
||||
type config struct {
|
||||
maxCallRecvMsgSize uint64
|
||||
remoteCert string
|
||||
gatewayAddr string
|
||||
remoteAddr string
|
||||
allowedOrigins []string
|
||||
apiMiddlewareEndpointFactory apimiddleware.EndpointFactory
|
||||
muxHandler MuxHandler
|
||||
pbHandlers []*PbMux
|
||||
router *mux.Router
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// Gateway is the gRPC gateway to serve HTTP JSON traffic as a proxy and forward it to the gRPC server.
|
||||
type Gateway struct {
|
||||
conn *grpc.ClientConn
|
||||
pbHandlers []*PbMux
|
||||
muxHandler MuxHandler
|
||||
maxCallRecvMsgSize uint64
|
||||
router *mux.Router
|
||||
server *http.Server
|
||||
cancel context.CancelFunc
|
||||
remoteCert string
|
||||
gatewayAddr string
|
||||
apiMiddlewareEndpointFactory apimiddleware.EndpointFactory
|
||||
ctx context.Context
|
||||
startFailure error
|
||||
remoteAddr string
|
||||
allowedOrigins []string
|
||||
cfg *config
|
||||
conn *grpc.ClientConn
|
||||
server *http.Server
|
||||
cancel context.CancelFunc
|
||||
proxy *apimiddleware.ApiProxyMiddleware
|
||||
ctx context.Context
|
||||
startFailure error
|
||||
}
|
||||
|
||||
// New returns a new instance of the Gateway.
|
||||
func New(
|
||||
ctx context.Context,
|
||||
pbHandlers []*PbMux,
|
||||
muxHandler MuxHandler,
|
||||
remoteAddr,
|
||||
gatewayAddress string,
|
||||
) *Gateway {
|
||||
func New(ctx context.Context, opts ...Option) (*Gateway, error) {
|
||||
g := &Gateway{
|
||||
pbHandlers: pbHandlers,
|
||||
muxHandler: muxHandler,
|
||||
router: mux.NewRouter(),
|
||||
gatewayAddr: gatewayAddress,
|
||||
ctx: ctx,
|
||||
remoteAddr: remoteAddr,
|
||||
allowedOrigins: []string{},
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
router: mux.NewRouter(),
|
||||
},
|
||||
}
|
||||
return g
|
||||
}
|
||||
|
||||
// WithRouter allows adding a custom mux router to the gateway.
|
||||
func (g *Gateway) WithRouter(r *mux.Router) *Gateway {
|
||||
g.router = r
|
||||
return g
|
||||
}
|
||||
|
||||
// WithAllowedOrigins allows adding a set of allowed origins to the gateway.
|
||||
func (g *Gateway) WithAllowedOrigins(origins []string) *Gateway {
|
||||
g.allowedOrigins = origins
|
||||
return g
|
||||
}
|
||||
|
||||
// WithRemoteCert allows adding a custom certificate to the gateway,
|
||||
func (g *Gateway) WithRemoteCert(cert string) *Gateway {
|
||||
g.remoteCert = cert
|
||||
return g
|
||||
}
|
||||
|
||||
// WithMaxCallRecvMsgSize allows specifying the maximum allowed gRPC message size.
|
||||
func (g *Gateway) WithMaxCallRecvMsgSize(size uint64) *Gateway {
|
||||
g.maxCallRecvMsgSize = size
|
||||
return g
|
||||
}
|
||||
|
||||
// WithApiMiddleware allows adding API Middleware proxy to the gateway.
|
||||
func (g *Gateway) WithApiMiddleware(endpointFactory apimiddleware.EndpointFactory) *Gateway {
|
||||
g.apiMiddlewareEndpointFactory = endpointFactory
|
||||
return g
|
||||
for _, opt := range opts {
|
||||
if err := opt(g); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return g, nil
|
||||
}
|
||||
|
||||
// Start the gateway service.
|
||||
@@ -109,7 +87,7 @@ func (g *Gateway) Start() {
|
||||
ctx, cancel := context.WithCancel(g.ctx)
|
||||
g.cancel = cancel
|
||||
|
||||
conn, err := g.dial(ctx, "tcp", g.remoteAddr)
|
||||
conn, err := g.dial(ctx, "tcp", g.cfg.remoteAddr)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to connect to gRPC server")
|
||||
g.startFailure = err
|
||||
@@ -117,7 +95,7 @@ func (g *Gateway) Start() {
|
||||
}
|
||||
g.conn = conn
|
||||
|
||||
for _, h := range g.pbHandlers {
|
||||
for _, h := range g.cfg.pbHandlers {
|
||||
for _, r := range h.Registrations {
|
||||
if err := r(ctx, h.Mux, g.conn); err != nil {
|
||||
log.WithError(err).Error("Failed to register handler")
|
||||
@@ -126,29 +104,29 @@ func (g *Gateway) Start() {
|
||||
}
|
||||
}
|
||||
for _, p := range h.Patterns {
|
||||
g.router.PathPrefix(p).Handler(h.Mux)
|
||||
g.cfg.router.PathPrefix(p).Handler(h.Mux)
|
||||
}
|
||||
}
|
||||
|
||||
corsMux := g.corsMiddleware(g.router)
|
||||
corsMux := g.corsMiddleware(g.cfg.router)
|
||||
|
||||
if g.muxHandler != nil {
|
||||
g.router.PathPrefix("/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
g.muxHandler(corsMux, w, r)
|
||||
if g.cfg.apiMiddlewareEndpointFactory != nil && !g.cfg.apiMiddlewareEndpointFactory.IsNil() {
|
||||
g.registerApiMiddleware()
|
||||
}
|
||||
|
||||
if g.cfg.muxHandler != nil {
|
||||
g.cfg.router.PathPrefix("/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
g.cfg.muxHandler(g.proxy, corsMux.ServeHTTP, w, r)
|
||||
})
|
||||
}
|
||||
|
||||
if g.apiMiddlewareEndpointFactory != nil && !g.apiMiddlewareEndpointFactory.IsNil() {
|
||||
go g.registerApiMiddleware()
|
||||
}
|
||||
|
||||
g.server = &http.Server{
|
||||
Addr: g.gatewayAddr,
|
||||
Handler: g.router,
|
||||
Addr: g.cfg.gatewayAddr,
|
||||
Handler: corsMux,
|
||||
}
|
||||
|
||||
go func() {
|
||||
log.WithField("address", g.gatewayAddr).Info("Starting gRPC gateway")
|
||||
log.WithField("address", g.cfg.gatewayAddr).Info("Starting gRPC gateway")
|
||||
if err := g.server.ListenAndServe(); err != http.ErrServerClosed {
|
||||
log.WithError(err).Error("Failed to start gRPC gateway")
|
||||
g.startFailure = err
|
||||
@@ -162,11 +140,9 @@ func (g *Gateway) Status() error {
|
||||
if g.startFailure != nil {
|
||||
return g.startFailure
|
||||
}
|
||||
|
||||
if s := g.conn.GetState(); s != connectivity.Ready {
|
||||
return fmt.Errorf("grpc server is %s", s)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -183,18 +159,16 @@ func (g *Gateway) Stop() error {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if g.cancel != nil {
|
||||
g.cancel()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *Gateway) corsMiddleware(h http.Handler) http.Handler {
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: g.allowedOrigins,
|
||||
AllowedMethods: []string{http.MethodPost, http.MethodGet, http.MethodOptions},
|
||||
AllowedOrigins: g.cfg.allowedOrigins,
|
||||
AllowedMethods: []string{http.MethodPost, http.MethodGet, http.MethodDelete, http.MethodOptions},
|
||||
AllowCredentials: true,
|
||||
MaxAge: 600,
|
||||
AllowedHeaders: []string{"*"},
|
||||
@@ -236,8 +210,8 @@ func (g *Gateway) dial(ctx context.Context, network, addr string) (*grpc.ClientC
|
||||
// "addr" must be a valid TCP address with a port number.
|
||||
func (g *Gateway) dialTCP(ctx context.Context, addr string) (*grpc.ClientConn, error) {
|
||||
security := grpc.WithInsecure()
|
||||
if len(g.remoteCert) > 0 {
|
||||
creds, err := credentials.NewClientTLSFromFile(g.remoteCert, "")
|
||||
if len(g.cfg.remoteCert) > 0 {
|
||||
creds, err := credentials.NewClientTLSFromFile(g.cfg.remoteCert, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -245,7 +219,7 @@ func (g *Gateway) dialTCP(ctx context.Context, addr string) (*grpc.ClientConn, e
|
||||
}
|
||||
opts := []grpc.DialOption{
|
||||
security,
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(g.maxCallRecvMsgSize))),
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(g.cfg.maxCallRecvMsgSize))),
|
||||
}
|
||||
|
||||
return grpc.DialContext(ctx, addr, opts...)
|
||||
@@ -266,16 +240,17 @@ func (g *Gateway) dialUnix(ctx context.Context, addr string) (*grpc.ClientConn,
|
||||
opts := []grpc.DialOption{
|
||||
grpc.WithInsecure(),
|
||||
grpc.WithContextDialer(f),
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(g.maxCallRecvMsgSize))),
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(g.cfg.maxCallRecvMsgSize))),
|
||||
}
|
||||
return grpc.DialContext(ctx, addr, opts...)
|
||||
}
|
||||
|
||||
func (g *Gateway) registerApiMiddleware() {
|
||||
proxy := &apimiddleware.ApiProxyMiddleware{
|
||||
GatewayAddress: g.gatewayAddr,
|
||||
EndpointCreator: g.apiMiddlewareEndpointFactory,
|
||||
g.proxy = &apimiddleware.ApiProxyMiddleware{
|
||||
GatewayAddress: g.cfg.gatewayAddr,
|
||||
EndpointCreator: g.cfg.apiMiddlewareEndpointFactory,
|
||||
Timeout: g.cfg.timeout,
|
||||
}
|
||||
log.Info("Starting API middleware")
|
||||
proxy.Run(g.router)
|
||||
g.proxy.Run(g.cfg.router)
|
||||
}
|
||||
|
||||
@@ -40,26 +40,30 @@ func TestGateway_Customized(t *testing.T) {
|
||||
size := uint64(100)
|
||||
endpointFactory := &mockEndpointFactory{}
|
||||
|
||||
g := New(
|
||||
context.Background(),
|
||||
[]*PbMux{},
|
||||
func(handler http.Handler, writer http.ResponseWriter, request *http.Request) {
|
||||
opts := []Option{
|
||||
WithRouter(r),
|
||||
WithRemoteCert(cert),
|
||||
WithAllowedOrigins(origins),
|
||||
WithMaxCallRecvMsgSize(size),
|
||||
WithApiMiddleware(endpointFactory),
|
||||
WithMuxHandler(func(
|
||||
_ *apimiddleware.ApiProxyMiddleware,
|
||||
_ http.HandlerFunc,
|
||||
_ http.ResponseWriter,
|
||||
_ *http.Request,
|
||||
) {
|
||||
}),
|
||||
}
|
||||
|
||||
},
|
||||
"",
|
||||
"",
|
||||
).WithRouter(r).
|
||||
WithRemoteCert(cert).
|
||||
WithAllowedOrigins(origins).
|
||||
WithMaxCallRecvMsgSize(size).
|
||||
WithApiMiddleware(endpointFactory)
|
||||
g, err := New(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, r, g.router)
|
||||
assert.Equal(t, cert, g.remoteCert)
|
||||
require.Equal(t, 1, len(g.allowedOrigins))
|
||||
assert.Equal(t, origins[0], g.allowedOrigins[0])
|
||||
assert.Equal(t, size, g.maxCallRecvMsgSize)
|
||||
assert.Equal(t, endpointFactory, g.apiMiddlewareEndpointFactory)
|
||||
assert.Equal(t, r, g.cfg.router)
|
||||
assert.Equal(t, cert, g.cfg.remoteCert)
|
||||
require.Equal(t, 1, len(g.cfg.allowedOrigins))
|
||||
assert.Equal(t, origins[0], g.cfg.allowedOrigins[0])
|
||||
assert.Equal(t, size, g.cfg.maxCallRecvMsgSize)
|
||||
assert.Equal(t, endpointFactory, g.cfg.apiMiddlewareEndpointFactory)
|
||||
}
|
||||
|
||||
func TestGateway_StartStop(t *testing.T) {
|
||||
@@ -75,23 +79,27 @@ func TestGateway_StartStop(t *testing.T) {
|
||||
selfAddress := fmt.Sprintf("%s:%d", rpcHost, ctx.Int(flags.RPCPort.Name))
|
||||
gatewayAddress := fmt.Sprintf("%s:%d", gatewayHost, gatewayPort)
|
||||
|
||||
g := New(
|
||||
ctx.Context,
|
||||
[]*PbMux{},
|
||||
func(handler http.Handler, writer http.ResponseWriter, request *http.Request) {
|
||||
opts := []Option{
|
||||
WithGatewayAddr(gatewayAddress),
|
||||
WithRemoteAddr(selfAddress),
|
||||
WithMuxHandler(func(
|
||||
_ *apimiddleware.ApiProxyMiddleware,
|
||||
_ http.HandlerFunc,
|
||||
_ http.ResponseWriter,
|
||||
_ *http.Request,
|
||||
) {
|
||||
}),
|
||||
}
|
||||
|
||||
},
|
||||
selfAddress,
|
||||
gatewayAddress,
|
||||
)
|
||||
g, err := New(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
g.Start()
|
||||
go func() {
|
||||
require.LogsContain(t, hook, "Starting gRPC gateway")
|
||||
require.LogsDoNotContain(t, hook, "Starting API middleware")
|
||||
}()
|
||||
|
||||
err := g.Stop()
|
||||
err = g.Stop()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -106,15 +114,15 @@ func TestGateway_NilHandler_NotFoundHandlerRegistered(t *testing.T) {
|
||||
selfAddress := fmt.Sprintf("%s:%d", rpcHost, ctx.Int(flags.RPCPort.Name))
|
||||
gatewayAddress := fmt.Sprintf("%s:%d", gatewayHost, gatewayPort)
|
||||
|
||||
g := New(
|
||||
ctx.Context,
|
||||
[]*PbMux{},
|
||||
/* muxHandler */ nil,
|
||||
selfAddress,
|
||||
gatewayAddress,
|
||||
)
|
||||
opts := []Option{
|
||||
WithGatewayAddr(gatewayAddress),
|
||||
WithRemoteAddr(selfAddress),
|
||||
}
|
||||
|
||||
g, err := New(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
writer := httptest.NewRecorder()
|
||||
g.router.ServeHTTP(writer, &http.Request{Method: "GET", Host: "localhost", URL: &url.URL{Path: "/foo"}})
|
||||
g.cfg.router.ServeHTTP(writer, &http.Request{Method: "GET", Host: "localhost", URL: &url.URL{Path: "/foo"}})
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code)
|
||||
}
|
||||
|
||||
93
api/gateway/options.go
Normal file
93
api/gateway/options.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/prysmaticlabs/prysm/api/gateway/apimiddleware"
|
||||
)
|
||||
|
||||
type Option func(g *Gateway) error
|
||||
|
||||
func (g *Gateway) SetRouter(r *mux.Router) *Gateway {
|
||||
g.cfg.router = r
|
||||
return g
|
||||
}
|
||||
|
||||
func WithPbHandlers(handlers []*PbMux) Option {
|
||||
return func(g *Gateway) error {
|
||||
g.cfg.pbHandlers = handlers
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithMuxHandler(m MuxHandler) Option {
|
||||
return func(g *Gateway) error {
|
||||
g.cfg.muxHandler = m
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithGatewayAddr(addr string) Option {
|
||||
return func(g *Gateway) error {
|
||||
g.cfg.gatewayAddr = addr
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithRemoteAddr(addr string) Option {
|
||||
return func(g *Gateway) error {
|
||||
g.cfg.remoteAddr = addr
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithRouter allows adding a custom mux router to the gateway.
|
||||
func WithRouter(r *mux.Router) Option {
|
||||
return func(g *Gateway) error {
|
||||
g.cfg.router = r
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithAllowedOrigins allows adding a set of allowed origins to the gateway.
|
||||
func WithAllowedOrigins(origins []string) Option {
|
||||
return func(g *Gateway) error {
|
||||
g.cfg.allowedOrigins = origins
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithRemoteCert allows adding a custom certificate to the gateway,
|
||||
func WithRemoteCert(cert string) Option {
|
||||
return func(g *Gateway) error {
|
||||
g.cfg.remoteCert = cert
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxCallRecvMsgSize allows specifying the maximum allowed gRPC message size.
|
||||
func WithMaxCallRecvMsgSize(size uint64) Option {
|
||||
return func(g *Gateway) error {
|
||||
g.cfg.maxCallRecvMsgSize = size
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithApiMiddleware allows adding an API middleware proxy to the gateway.
|
||||
func WithApiMiddleware(endpointFactory apimiddleware.EndpointFactory) Option {
|
||||
return func(g *Gateway) error {
|
||||
g.cfg.apiMiddlewareEndpointFactory = endpointFactory
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithTimeout allows changing the timeout value for API calls.
|
||||
func WithTimeout(seconds uint64) Option {
|
||||
return func(g *Gateway) error {
|
||||
g.cfg.timeout = time.Second * time.Duration(seconds)
|
||||
gwruntime.DefaultContextTimeout = time.Second * time.Duration(seconds)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -110,9 +110,8 @@ func TestLockUnlock_CleansUnused(t *testing.T) {
|
||||
lock := NewMultilock("dog", "cat", "owl")
|
||||
lock.Lock()
|
||||
assert.Equal(t, 3, len(locks.list))
|
||||
defer lock.Unlock()
|
||||
lock.Unlock()
|
||||
|
||||
<-time.After(100 * time.Millisecond)
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
|
||||
@@ -4,13 +4,16 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"chain_info.go",
|
||||
"error.go",
|
||||
"head.go",
|
||||
"head_sync_committee_info.go",
|
||||
"info.go",
|
||||
"init_sync_process_block.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"new_slot.go",
|
||||
"optimistic_sync.go",
|
||||
"options.go",
|
||||
"pow_block.go",
|
||||
"process_attestation.go",
|
||||
"process_attestation_helpers.go",
|
||||
"process_block.go",
|
||||
@@ -18,21 +21,24 @@ go_library(
|
||||
"receive_attestation.go",
|
||||
"receive_block.go",
|
||||
"service.go",
|
||||
"state_balance_cache.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd/beacon-chain:__subpackages__",
|
||||
"//testing/fuzz:__pkg__",
|
||||
"//testing/slasher/simulator:__pkg__",
|
||||
"//testing/spectest:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//async:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain/store:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
@@ -43,20 +49,25 @@ go_library(
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/forkchoice/protoarray:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//beacon-chain/powchain/engine-api-client/v1:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
@@ -64,7 +75,9 @@ go_library(
|
||||
"//runtime/version:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_emicklei_dot//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
@@ -91,10 +104,13 @@ go_test(
|
||||
"checktags_test.go",
|
||||
"head_sync_committee_info_test.go",
|
||||
"head_test.go",
|
||||
"info_test.go",
|
||||
"init_test.go",
|
||||
"log_test.go",
|
||||
"metrics_test.go",
|
||||
"mock_engine_test.go",
|
||||
"mock_test.go",
|
||||
"optimistic_sync_test.go",
|
||||
"pow_block_test.go",
|
||||
"process_attestation_test.go",
|
||||
"process_block_test.go",
|
||||
"receive_attestation_test.go",
|
||||
@@ -117,6 +133,7 @@ go_test(
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
@@ -141,6 +158,7 @@ go_test(
|
||||
"chain_info_norace_test.go",
|
||||
"checktags_test.go",
|
||||
"init_test.go",
|
||||
"mock_test.go",
|
||||
"receive_block_test.go",
|
||||
"service_norace_test.go",
|
||||
],
|
||||
|
||||
@@ -6,12 +6,16 @@ import (
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -35,7 +39,7 @@ type TimeFetcher interface {
|
||||
|
||||
// GenesisFetcher retrieves the Ethereum consensus data related to its genesis.
|
||||
type GenesisFetcher interface {
|
||||
GenesisValidatorRoot() [32]byte
|
||||
GenesisValidatorsRoot() [32]byte
|
||||
}
|
||||
|
||||
// HeadFetcher defines a common interface for methods in blockchain service which
|
||||
@@ -47,14 +51,16 @@ type HeadFetcher interface {
|
||||
HeadState(ctx context.Context) (state.BeaconState, error)
|
||||
HeadValidatorsIndices(ctx context.Context, epoch types.Epoch) ([]types.ValidatorIndex, error)
|
||||
HeadSeed(ctx context.Context, epoch types.Epoch) ([32]byte, error)
|
||||
HeadGenesisValidatorRoot() [32]byte
|
||||
HeadGenesisValidatorsRoot() [32]byte
|
||||
HeadETH1Data() *ethpb.Eth1Data
|
||||
HeadPublicKeyToValidatorIndex(ctx context.Context, pubKey [48]byte) (types.ValidatorIndex, bool)
|
||||
HeadValidatorIndexToPublicKey(ctx context.Context, index types.ValidatorIndex) ([48]byte, error)
|
||||
ProtoArrayStore() *protoarray.Store
|
||||
HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (types.ValidatorIndex, bool)
|
||||
HeadValidatorIndexToPublicKey(ctx context.Context, index types.ValidatorIndex) ([fieldparams.BLSPubkeyLength]byte, error)
|
||||
ChainHeads() ([][32]byte, []types.Slot)
|
||||
IsOptimistic(ctx context.Context) (bool, error)
|
||||
IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error)
|
||||
HeadSyncCommitteeFetcher
|
||||
HeadDomainFetcher
|
||||
ForkChoicer() forkchoice.ForkChoicer
|
||||
}
|
||||
|
||||
// ForkFetcher retrieves the current fork information of the Ethereum beacon chain.
|
||||
@@ -76,31 +82,45 @@ type FinalizationFetcher interface {
|
||||
PreviousJustifiedCheckpt() *ethpb.Checkpoint
|
||||
}
|
||||
|
||||
// FinalizedCheckpt returns the latest finalized checkpoint from head state.
|
||||
// FinalizedCheckpt returns the latest finalized checkpoint from chain store.
|
||||
func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
if s.finalizedCheckpt == nil {
|
||||
cp := s.store.FinalizedCheckpt()
|
||||
if cp == nil {
|
||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
}
|
||||
|
||||
return ethpb.CopyCheckpoint(s.finalizedCheckpt)
|
||||
return ethpb.CopyCheckpoint(cp)
|
||||
}
|
||||
|
||||
// CurrentJustifiedCheckpt returns the current justified checkpoint from head state.
|
||||
// CurrentJustifiedCheckpt returns the current justified checkpoint from chain store.
|
||||
func (s *Service) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
if s.justifiedCheckpt == nil {
|
||||
cp := s.store.JustifiedCheckpt()
|
||||
if cp == nil {
|
||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
}
|
||||
|
||||
return ethpb.CopyCheckpoint(s.justifiedCheckpt)
|
||||
return ethpb.CopyCheckpoint(cp)
|
||||
}
|
||||
|
||||
// PreviousJustifiedCheckpt returns the previous justified checkpoint from head state.
|
||||
// PreviousJustifiedCheckpt returns the previous justified checkpoint from chain store.
|
||||
func (s *Service) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
if s.prevJustifiedCheckpt == nil {
|
||||
cp := s.store.PrevJustifiedCheckpt()
|
||||
if cp == nil {
|
||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
}
|
||||
|
||||
return ethpb.CopyCheckpoint(s.prevJustifiedCheckpt)
|
||||
return ethpb.CopyCheckpoint(cp)
|
||||
}
|
||||
|
||||
// BestJustifiedCheckpt returns the best justified checkpoint from store.
|
||||
func (s *Service) BestJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
cp := s.store.BestJustifiedCheckpt()
|
||||
// If there is no best justified checkpoint, return the checkpoint with root as zeros to be used for genesis cases.
|
||||
if cp == nil {
|
||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
}
|
||||
|
||||
return ethpb.CopyCheckpoint(cp)
|
||||
}
|
||||
|
||||
// HeadSlot returns the slot of the head of the chain.
|
||||
@@ -197,8 +217,8 @@ func (s *Service) HeadSeed(ctx context.Context, epoch types.Epoch) ([32]byte, er
|
||||
return helpers.Seed(s.headState(ctx), epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
}
|
||||
|
||||
// HeadGenesisValidatorRoot returns genesis validator root of the head state.
|
||||
func (s *Service) HeadGenesisValidatorRoot() [32]byte {
|
||||
// HeadGenesisValidatorsRoot returns genesis validators root of the head state.
|
||||
func (s *Service) HeadGenesisValidatorsRoot() [32]byte {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
@@ -206,7 +226,7 @@ func (s *Service) HeadGenesisValidatorRoot() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
return s.headGenesisValidatorRoot()
|
||||
return s.headGenesisValidatorsRoot()
|
||||
}
|
||||
|
||||
// HeadETH1Data returns the eth1data of the current head state.
|
||||
@@ -220,26 +240,21 @@ func (s *Service) HeadETH1Data() *ethpb.Eth1Data {
|
||||
return s.head.state.Eth1Data()
|
||||
}
|
||||
|
||||
// ProtoArrayStore returns the proto array store object.
|
||||
func (s *Service) ProtoArrayStore() *protoarray.Store {
|
||||
return s.cfg.ForkChoiceStore.Store()
|
||||
}
|
||||
|
||||
// GenesisTime returns the genesis time of beacon chain.
|
||||
func (s *Service) GenesisTime() time.Time {
|
||||
return s.genesisTime
|
||||
}
|
||||
|
||||
// GenesisValidatorRoot returns the genesis validator
|
||||
// GenesisValidatorsRoot returns the genesis validator
|
||||
// root of the chain.
|
||||
func (s *Service) GenesisValidatorRoot() [32]byte {
|
||||
func (s *Service) GenesisValidatorsRoot() [32]byte {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
if !s.hasHeadState() {
|
||||
return [32]byte{}
|
||||
}
|
||||
return bytesutil.ToBytes32(s.head.state.GenesisValidatorRoot())
|
||||
return bytesutil.ToBytes32(s.head.state.GenesisValidatorsRoot())
|
||||
}
|
||||
|
||||
// CurrentFork retrieves the latest fork information of the beacon chain.
|
||||
@@ -270,45 +285,92 @@ func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, er
|
||||
// ChainHeads returns all possible chain heads (leaves of fork choice tree).
|
||||
// Heads roots and heads slots are returned.
|
||||
func (s *Service) ChainHeads() ([][32]byte, []types.Slot) {
|
||||
nodes := s.ProtoArrayStore().Nodes()
|
||||
|
||||
// Deliberate choice to not preallocate space for below.
|
||||
// Heads cant be more than 2-3 in the worst case where pre-allocation will be 64 to begin with.
|
||||
headsRoots := make([][32]byte, 0)
|
||||
headsSlots := make([]types.Slot, 0)
|
||||
|
||||
nonExistentNode := ^uint64(0)
|
||||
for _, node := range nodes {
|
||||
// Possible heads have no children.
|
||||
if node.BestDescendant() == nonExistentNode && node.BestChild() == nonExistentNode {
|
||||
headsRoots = append(headsRoots, node.Root())
|
||||
headsSlots = append(headsSlots, node.Slot())
|
||||
}
|
||||
}
|
||||
|
||||
return headsRoots, headsSlots
|
||||
return s.cfg.ForkChoiceStore.Tips()
|
||||
}
|
||||
|
||||
// HeadPublicKeyToValidatorIndex returns the validator index of the `pubkey` in current head state.
|
||||
func (s *Service) HeadPublicKeyToValidatorIndex(ctx context.Context, pubKey [48]byte) (types.ValidatorIndex, bool) {
|
||||
func (s *Service) HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (types.ValidatorIndex, bool) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
if !s.hasHeadState() {
|
||||
return 0, false
|
||||
}
|
||||
return s.headState(ctx).ValidatorIndexByPubkey(pubKey)
|
||||
return s.headValidatorIndexAtPubkey(pubKey)
|
||||
}
|
||||
|
||||
// HeadValidatorIndexToPublicKey returns the pubkey of the validator `index` in current head state.
|
||||
func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index types.ValidatorIndex) ([48]byte, error) {
|
||||
func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index types.ValidatorIndex) ([fieldparams.BLSPubkeyLength]byte, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
if !s.hasHeadState() {
|
||||
return [48]byte{}, nil
|
||||
return [fieldparams.BLSPubkeyLength]byte{}, nil
|
||||
}
|
||||
v, err := s.headValidatorAtIndex(index)
|
||||
if err != nil {
|
||||
return [48]byte{}, err
|
||||
return [fieldparams.BLSPubkeyLength]byte{}, err
|
||||
}
|
||||
return v.PublicKey(), nil
|
||||
}
|
||||
|
||||
// ForkChoicer returns the forkchoice interface
|
||||
func (s *Service) ForkChoicer() forkchoice.ForkChoicer {
|
||||
return s.cfg.ForkChoiceStore
|
||||
}
|
||||
|
||||
// IsOptimistic returns true if the current head is optimistic.
|
||||
func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
if slots.ToEpoch(s.CurrentSlot()) < params.BeaconConfig().BellatrixForkEpoch {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return s.IsOptimisticForRoot(ctx, s.head.root)
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot takes the root and slot as aguments instead of the current head
|
||||
// and returns true if it is optimistic.
|
||||
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(ctx, root)
|
||||
if err == nil {
|
||||
return optimistic, nil
|
||||
}
|
||||
if err != protoarray.ErrUnknownNodeRoot && err != doublylinkedtree.ErrNilNode {
|
||||
return false, err
|
||||
}
|
||||
ss, err := s.cfg.BeaconDB.StateSummary(ctx, root)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if ss == nil {
|
||||
return false, errInvalidNilSummary
|
||||
}
|
||||
|
||||
validatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if slots.ToEpoch(ss.Slot) > validatedCheckpoint.Epoch {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if slots.ToEpoch(ss.Slot)+1 < validatedCheckpoint.Epoch {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
lastValidated, err := s.cfg.BeaconDB.StateSummary(ctx, bytesutil.ToBytes32(validatedCheckpoint.Root))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return ss.Slot > lastValidated.Slot, nil
|
||||
}
|
||||
|
||||
// SetGenesisTime sets the genesis time of beacon chain.
|
||||
func (s *Service) SetGenesisTime(t time.Time) {
|
||||
s.genesisTime = t
|
||||
}
|
||||
|
||||
// ForkChoiceStore returns the fork choice store in the service
|
||||
func (s *Service) ForkChoiceStore() forkchoice.ForkChoicer {
|
||||
return s.cfg.ForkChoiceStore
|
||||
}
|
||||
|
||||
@@ -16,10 +16,13 @@ func TestHeadSlot_DataRace(t *testing.T) {
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB},
|
||||
}
|
||||
wait := make(chan struct{})
|
||||
go func() {
|
||||
defer close(wait)
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
}()
|
||||
s.HeadSlot()
|
||||
<-wait
|
||||
}
|
||||
|
||||
func TestHeadRoot_DataRace(t *testing.T) {
|
||||
@@ -28,24 +31,32 @@ func TestHeadRoot_DataRace(t *testing.T) {
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
head: &head{root: [32]byte{'A'}},
|
||||
}
|
||||
wait := make(chan struct{})
|
||||
go func() {
|
||||
defer close(wait)
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
}()
|
||||
_, err := s.HeadRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
<-wait
|
||||
}
|
||||
|
||||
func TestHeadBlock_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(ðpb.SignedBeaconBlock{})
|
||||
require.NoError(t, err)
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
head: &head{block: wrapper.WrappedPhase0SignedBeaconBlock(ðpb.SignedBeaconBlock{})},
|
||||
head: &head{block: wsb},
|
||||
}
|
||||
wait := make(chan struct{})
|
||||
go func() {
|
||||
defer close(wait)
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
}()
|
||||
_, err := s.HeadBlock(context.Background())
|
||||
_, err = s.HeadBlock(context.Background())
|
||||
require.NoError(t, err)
|
||||
<-wait
|
||||
}
|
||||
|
||||
func TestHeadState_DataRace(t *testing.T) {
|
||||
@@ -53,9 +64,12 @@ func TestHeadState_DataRace(t *testing.T) {
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
}
|
||||
wait := make(chan struct{})
|
||||
go func() {
|
||||
defer close(wait)
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
}()
|
||||
_, err := s.HeadState(context.Background())
|
||||
require.NoError(t, err)
|
||||
<-wait
|
||||
}
|
||||
|
||||
@@ -8,8 +8,10 @@ import (
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -39,12 +41,18 @@ func TestHeadRoot_Nil(t *testing.T) {
|
||||
assert.DeepEqual(t, params.BeaconConfig().ZeroHash[:], headRoot, "Incorrect pre chain start value")
|
||||
}
|
||||
|
||||
func TestService_ForkChoiceStore(t *testing.T) {
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}}
|
||||
p := c.ForkChoiceStore()
|
||||
require.Equal(t, 0, int(p.FinalizedEpoch()))
|
||||
}
|
||||
|
||||
func TestFinalizedCheckpt_CanRetrieve(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cp := ðpb.Checkpoint{Epoch: 5, Root: bytesutil.PadTo([]byte("foo"), 32)}
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
c.finalizedCheckpt = cp
|
||||
c.store.SetFinalizedCheckpt(cp)
|
||||
|
||||
assert.Equal(t, cp.Epoch, c.FinalizedCheckpt().Epoch, "Unexpected finalized epoch")
|
||||
}
|
||||
@@ -55,9 +63,9 @@ func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
genesisRoot := [32]byte{'A'}
|
||||
cp := ðpb.Checkpoint{Root: genesisRoot[:]}
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
c.finalizedCheckpt = cp
|
||||
c.genesisRoot = genesisRoot
|
||||
assert.DeepEqual(t, c.genesisRoot[:], c.FinalizedCheckpt().Root)
|
||||
c.store.SetFinalizedCheckpt(cp)
|
||||
c.originBlockRoot = genesisRoot
|
||||
assert.DeepEqual(t, c.originBlockRoot[:], c.FinalizedCheckpt().Root)
|
||||
}
|
||||
|
||||
func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
|
||||
@@ -66,7 +74,7 @@ func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
assert.Equal(t, params.BeaconConfig().ZeroHash, bytesutil.ToBytes32(c.CurrentJustifiedCheckpt().Root), "Unexpected justified epoch")
|
||||
cp := ðpb.Checkpoint{Epoch: 6, Root: bytesutil.PadTo([]byte("foo"), 32)}
|
||||
c.justifiedCheckpt = cp
|
||||
c.store.SetJustifiedCheckpt(cp)
|
||||
assert.Equal(t, cp.Epoch, c.CurrentJustifiedCheckpt().Epoch, "Unexpected justified epoch")
|
||||
}
|
||||
|
||||
@@ -76,9 +84,9 @@ func TestJustifiedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
genesisRoot := [32]byte{'B'}
|
||||
cp := ðpb.Checkpoint{Root: genesisRoot[:]}
|
||||
c.justifiedCheckpt = cp
|
||||
c.genesisRoot = genesisRoot
|
||||
assert.DeepEqual(t, c.genesisRoot[:], c.CurrentJustifiedCheckpt().Root)
|
||||
c.store.SetJustifiedCheckpt(cp)
|
||||
c.originBlockRoot = genesisRoot
|
||||
assert.DeepEqual(t, c.originBlockRoot[:], c.CurrentJustifiedCheckpt().Root)
|
||||
}
|
||||
|
||||
func TestPreviousJustifiedCheckpt_CanRetrieve(t *testing.T) {
|
||||
@@ -87,7 +95,7 @@ func TestPreviousJustifiedCheckpt_CanRetrieve(t *testing.T) {
|
||||
cp := ðpb.Checkpoint{Epoch: 7, Root: bytesutil.PadTo([]byte("foo"), 32)}
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
assert.Equal(t, params.BeaconConfig().ZeroHash, bytesutil.ToBytes32(c.CurrentJustifiedCheckpt().Root), "Unexpected justified epoch")
|
||||
c.prevJustifiedCheckpt = cp
|
||||
c.store.SetPrevJustifiedCheckpt(cp)
|
||||
assert.Equal(t, cp.Epoch, c.PreviousJustifiedCheckpt().Epoch, "Unexpected previous justified epoch")
|
||||
}
|
||||
|
||||
@@ -97,9 +105,9 @@ func TestPrevJustifiedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
genesisRoot := [32]byte{'C'}
|
||||
cp := ðpb.Checkpoint{Root: genesisRoot[:]}
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
c.prevJustifiedCheckpt = cp
|
||||
c.genesisRoot = genesisRoot
|
||||
assert.DeepEqual(t, c.genesisRoot[:], c.PreviousJustifiedCheckpt().Root)
|
||||
c.store.SetPrevJustifiedCheckpt(cp)
|
||||
c.originBlockRoot = genesisRoot
|
||||
assert.DeepEqual(t, c.originBlockRoot[:], c.PreviousJustifiedCheckpt().Root)
|
||||
}
|
||||
|
||||
func TestHeadSlot_CanRetrieve(t *testing.T) {
|
||||
@@ -125,7 +133,9 @@ func TestHeadRoot_UseDB(t *testing.T) {
|
||||
b := util.NewBeaconBlock()
|
||||
br, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: br[:]}))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(context.Background(), br))
|
||||
r, err := c.HeadRoot(context.Background())
|
||||
@@ -138,8 +148,10 @@ func TestHeadBlock_CanRetrieve(t *testing.T) {
|
||||
b.Block.Slot = 1
|
||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
c := &Service{}
|
||||
c.head = &head{block: wrapper.WrappedPhase0SignedBeaconBlock(b), state: s}
|
||||
c.head = &head{block: wsb, state: s}
|
||||
|
||||
recevied, err := c.HeadBlock(context.Background())
|
||||
require.NoError(t, err)
|
||||
@@ -184,15 +196,15 @@ func TestCurrentFork_NilHeadSTate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenesisValidatorRoot_CanRetrieve(t *testing.T) {
|
||||
func TestGenesisValidatorsRoot_CanRetrieve(t *testing.T) {
|
||||
// Should not panic if head state is nil.
|
||||
c := &Service{}
|
||||
assert.Equal(t, [32]byte{}, c.GenesisValidatorRoot(), "Did not get correct genesis validator root")
|
||||
assert.Equal(t, [32]byte{}, c.GenesisValidatorsRoot(), "Did not get correct genesis validators root")
|
||||
|
||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{GenesisValidatorsRoot: []byte{'a'}})
|
||||
require.NoError(t, err)
|
||||
c.head = &head{state: s}
|
||||
assert.Equal(t, [32]byte{'a'}, c.GenesisValidatorRoot(), "Did not get correct genesis validator root")
|
||||
assert.Equal(t, [32]byte{'a'}, c.GenesisValidatorsRoot(), "Did not get correct genesis validators root")
|
||||
}
|
||||
|
||||
func TestHeadETH1Data_Nil(t *testing.T) {
|
||||
@@ -221,7 +233,9 @@ func TestIsCanonical_Ok(t *testing.T) {
|
||||
blk.Block.Slot = 0
|
||||
root, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk)))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, root))
|
||||
can, err := c.IsCanonical(ctx, root)
|
||||
require.NoError(t, err)
|
||||
@@ -264,51 +278,64 @@ func TestService_HeadSeed(t *testing.T) {
|
||||
require.DeepEqual(t, seed, root)
|
||||
}
|
||||
|
||||
func TestService_HeadGenesisValidatorRoot(t *testing.T) {
|
||||
func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisState(t, 1)
|
||||
c := &Service{}
|
||||
|
||||
c.head = &head{}
|
||||
root := c.HeadGenesisValidatorRoot()
|
||||
root := c.HeadGenesisValidatorsRoot()
|
||||
require.Equal(t, [32]byte{}, root)
|
||||
|
||||
c.head = &head{state: s}
|
||||
root = c.HeadGenesisValidatorRoot()
|
||||
require.DeepEqual(t, root[:], s.GenesisValidatorRoot())
|
||||
root = c.HeadGenesisValidatorsRoot()
|
||||
require.DeepEqual(t, root[:], s.GenesisValidatorsRoot())
|
||||
}
|
||||
|
||||
func TestService_ProtoArrayStore(t *testing.T) {
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}}
|
||||
p := c.ProtoArrayStore()
|
||||
require.Equal(t, 0, int(p.FinalizedEpoch()))
|
||||
}
|
||||
|
||||
func TestService_ChainHeads(t *testing.T) {
|
||||
func TestService_ChainHeads_ProtoArray(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, [32]byte{}, 0, 0))
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0,
|
||||
params.BeaconConfig().ZeroHash)}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
|
||||
roots, slots := c.ChainHeads()
|
||||
require.DeepEqual(t, [][32]byte{{'c'}, {'d'}, {'e'}}, roots)
|
||||
require.DeepEqual(t, []types.Slot{102, 103, 104}, slots)
|
||||
}
|
||||
|
||||
func TestService_ChainHeads_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
|
||||
roots, slots := c.ChainHeads()
|
||||
require.Equal(t, 3, len(roots))
|
||||
rootMap := map[[32]byte]types.Slot{[32]byte{'c'}: 102, [32]byte{'d'}: 103, [32]byte{'e'}: 104}
|
||||
for i, root := range roots {
|
||||
slot, ok := rootMap[root]
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, slot, slots[i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestService_HeadPublicKeyToValidatorIndex(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisState(t, 10)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
_, e := c.HeadPublicKeyToValidatorIndex(context.Background(), [48]byte{})
|
||||
_, e := c.HeadPublicKeyToValidatorIndex([fieldparams.BLSPubkeyLength]byte{})
|
||||
require.Equal(t, false, e)
|
||||
|
||||
v, err := s.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
|
||||
i, e := c.HeadPublicKeyToValidatorIndex(context.Background(), bytesutil.ToBytes48(v.PublicKey))
|
||||
i, e := c.HeadPublicKeyToValidatorIndex(bytesutil.ToBytes48(v.PublicKey))
|
||||
require.Equal(t, true, e)
|
||||
require.Equal(t, types.ValidatorIndex(0), i)
|
||||
}
|
||||
@@ -317,12 +344,12 @@ func TestService_HeadPublicKeyToValidatorIndexNil(t *testing.T) {
|
||||
c := &Service{}
|
||||
c.head = nil
|
||||
|
||||
idx, e := c.HeadPublicKeyToValidatorIndex(context.Background(), [48]byte{})
|
||||
idx, e := c.HeadPublicKeyToValidatorIndex([fieldparams.BLSPubkeyLength]byte{})
|
||||
require.Equal(t, false, e)
|
||||
require.Equal(t, types.ValidatorIndex(0), idx)
|
||||
|
||||
c.head = &head{state: nil}
|
||||
i, e := c.HeadPublicKeyToValidatorIndex(context.Background(), [48]byte{})
|
||||
i, e := c.HeadPublicKeyToValidatorIndex([fieldparams.BLSPubkeyLength]byte{})
|
||||
require.Equal(t, false, e)
|
||||
require.Equal(t, types.ValidatorIndex(0), i)
|
||||
}
|
||||
@@ -347,10 +374,160 @@ func TestService_HeadValidatorIndexToPublicKeyNil(t *testing.T) {
|
||||
|
||||
p, err := c.HeadValidatorIndexToPublicKey(context.Background(), 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [48]byte{}, p)
|
||||
require.Equal(t, [fieldparams.BLSPubkeyLength]byte{}, p)
|
||||
|
||||
c.head = &head{state: nil}
|
||||
p, err = c.HeadValidatorIndexToPublicKey(context.Background(), 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [48]byte{}, p)
|
||||
require.Equal(t, [fieldparams.BLSPubkeyLength]byte{}, p)
|
||||
}
|
||||
|
||||
func TestService_IsOptimistic_ProtoArray(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.BellatrixForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
|
||||
opt, err := c.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, opt)
|
||||
}
|
||||
|
||||
func TestService_IsOptimistic_DoublyLinkedTree(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.BellatrixForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
|
||||
opt, err := c.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, opt)
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{genesisTime: time.Now()}
|
||||
opt, err := c.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, opt)
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticForRoot_ProtoArray(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
|
||||
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, opt)
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticForRoot_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
|
||||
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, opt)
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticForRoot_DB_ProtoArray(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 10
|
||||
br, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: br[:], Slot: 10}))
|
||||
|
||||
optimisticBlock := util.NewBeaconBlock()
|
||||
optimisticBlock.Block.Slot = 11
|
||||
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(optimisticBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
||||
|
||||
validatedBlock := util.NewBeaconBlock()
|
||||
validatedBlock.Block.Slot = 9
|
||||
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(validatedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
||||
|
||||
validatedCheckpoint := ðpb.Checkpoint{Root: br[:]}
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
|
||||
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
|
||||
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, validated)
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticForRoot__DB_DoublyLinkedTree(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 10
|
||||
br, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: br[:], Slot: 10}))
|
||||
|
||||
optimisticBlock := util.NewBeaconBlock()
|
||||
optimisticBlock.Block.Slot = 11
|
||||
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(optimisticBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
||||
|
||||
validatedBlock := util.NewBeaconBlock()
|
||||
validatedBlock.Block.Slot = 9
|
||||
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(validatedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
||||
|
||||
validatedCheckpoint := ðpb.Checkpoint{Root: br[:]}
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
|
||||
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
|
||||
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, validated)
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build !develop
|
||||
// +build !develop
|
||||
|
||||
package blockchain
|
||||
|
||||
19
beacon-chain/blockchain/error.go
Normal file
19
beacon-chain/blockchain/error.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package blockchain
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
var (
|
||||
// errNilJustifiedInStore is returned when a nil justified checkpt is returned from store.
|
||||
errNilJustifiedInStore = errors.New("nil justified checkpoint returned from store")
|
||||
// errNilBestJustifiedInStore is returned when a nil justified checkpt is returned from store.
|
||||
errNilBestJustifiedInStore = errors.New("nil best justified checkpoint returned from store")
|
||||
// errNilFinalizedInStore is returned when a nil finalized checkpt is returned from store.
|
||||
errNilFinalizedInStore = errors.New("nil finalized checkpoint returned from store")
|
||||
// errInvalidNilSummary is returned when a nil summary is returned from the DB.
|
||||
errInvalidNilSummary = errors.New("nil summary returned from the DB")
|
||||
// errNilParentInDB is returned when a nil parent block is returned from the DB.
|
||||
errNilParentInDB = errors.New("nil parent block in DB")
|
||||
// errWrongBlockCound is returned when the wrong number of blocks or
|
||||
// block roots is used
|
||||
errWrongBlockCount = errors.New("wrong number of blocks or block roots")
|
||||
)
|
||||
@@ -10,10 +10,11 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
@@ -23,6 +24,21 @@ import (
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// UpdateHeadWithBalances updates the beacon state head after getting justified balanced from cache.
|
||||
func (s *Service) UpdateHeadWithBalances(ctx context.Context) error {
|
||||
cp := s.store.JustifiedCheckpt()
|
||||
if cp == nil {
|
||||
return errors.New("no justified checkpoint")
|
||||
}
|
||||
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(cp.Root))
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", cp.Root)
|
||||
return errors.Wrap(err, msg)
|
||||
}
|
||||
|
||||
return s.updateHead(ctx, balances)
|
||||
}
|
||||
|
||||
// This defines the current chain service's view of head.
|
||||
type head struct {
|
||||
slot types.Slot // current head slot.
|
||||
@@ -37,24 +53,20 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.updateHead")
|
||||
defer span.End()
|
||||
|
||||
// To get the proper head update, a node first checks its best justified
|
||||
// can become justified. This is designed to prevent bounce attack and
|
||||
// ensure head gets its best justified info.
|
||||
if s.bestJustifiedCheckpt.Epoch > s.justifiedCheckpt.Epoch {
|
||||
s.justifiedCheckpt = s.bestJustifiedCheckpt
|
||||
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Get head from the fork choice service.
|
||||
f := s.finalizedCheckpt
|
||||
j := s.justifiedCheckpt
|
||||
// To get head before the first justified epoch, the fork choice will start with genesis root
|
||||
f := s.store.FinalizedCheckpt()
|
||||
if f == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
j := s.store.JustifiedCheckpt()
|
||||
if j == nil {
|
||||
return errNilJustifiedInStore
|
||||
}
|
||||
// To get head before the first justified epoch, the fork choice will start with origin root
|
||||
// instead of zero hashes.
|
||||
headStartRoot := bytesutil.ToBytes32(j.Root)
|
||||
if headStartRoot == params.BeaconConfig().ZeroHash {
|
||||
headStartRoot = s.genesisRoot
|
||||
headStartRoot = s.originBlockRoot
|
||||
}
|
||||
|
||||
// In order to process head, fork choice store requires justified info.
|
||||
@@ -66,7 +78,11 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.cfg.ForkChoiceStore = protoarray.New(j.Epoch, f.Epoch, bytesutil.ToBytes32(f.Root))
|
||||
if features.Get().EnableForkChoiceDoublyLinkedTree {
|
||||
s.cfg.ForkChoiceStore = doublylinkedtree.New(j.Epoch, f.Epoch)
|
||||
} else {
|
||||
s.cfg.ForkChoiceStore = protoarray.New(j.Epoch, f.Epoch, bytesutil.ToBytes32(f.Root))
|
||||
}
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block(), headStartRoot, f, j); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -132,16 +148,21 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
|
||||
"oldSlot": fmt.Sprintf("%d", headSlot),
|
||||
}).Debug("Chain reorg occurred")
|
||||
absoluteSlotDifference := slots.AbsoluteValueSlotDifference(newHeadSlot, headSlot)
|
||||
isOptimistic, err := s.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if node is optimistically synced")
|
||||
}
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Reorg,
|
||||
Data: ðpbv1.EventChainReorg{
|
||||
Slot: newHeadSlot,
|
||||
Depth: absoluteSlotDifference,
|
||||
OldHeadBlock: oldHeadRoot[:],
|
||||
NewHeadBlock: headRoot[:],
|
||||
OldHeadState: oldStateRoot,
|
||||
NewHeadState: newStateRoot,
|
||||
Epoch: slots.ToEpoch(newHeadSlot),
|
||||
Slot: newHeadSlot,
|
||||
Depth: absoluteSlotDifference,
|
||||
OldHeadBlock: oldHeadRoot[:],
|
||||
NewHeadBlock: headRoot[:],
|
||||
OldHeadState: oldStateRoot,
|
||||
NewHeadState: newStateRoot,
|
||||
Epoch: slots.ToEpoch(newHeadSlot),
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
},
|
||||
})
|
||||
|
||||
@@ -163,7 +184,7 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
|
||||
// Forward an event capturing a new chain head over a common event feed
|
||||
// done in a goroutine to avoid blocking the critical runtime main routine.
|
||||
go func() {
|
||||
if err := s.notifyNewHeadEvent(newHeadSlot, newHeadState, newStateRoot, headRoot[:]); err != nil {
|
||||
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, newHeadState, newStateRoot, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not notify event feed of new chain head")
|
||||
}
|
||||
}()
|
||||
@@ -248,16 +269,16 @@ func (s *Service) headBlock() block.SignedBeaconBlock {
|
||||
// It does a full copy on head state for immutability.
|
||||
// This is a lock free version.
|
||||
func (s *Service) headState(ctx context.Context) state.BeaconState {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.headState")
|
||||
_, span := trace.StartSpan(ctx, "blockChain.headState")
|
||||
defer span.End()
|
||||
|
||||
return s.head.state.Copy()
|
||||
}
|
||||
|
||||
// This returns the genesis validator root of the head state.
|
||||
// This returns the genesis validators root of the head state.
|
||||
// This is a lock free version.
|
||||
func (s *Service) headGenesisValidatorRoot() [32]byte {
|
||||
return bytesutil.ToBytes32(s.head.state.GenesisValidatorRoot())
|
||||
func (s *Service) headGenesisValidatorsRoot() [32]byte {
|
||||
return bytesutil.ToBytes32(s.head.state.GenesisValidatorsRoot())
|
||||
}
|
||||
|
||||
// This returns the validator referenced by the provided index in
|
||||
@@ -267,73 +288,30 @@ func (s *Service) headValidatorAtIndex(index types.ValidatorIndex) (state.ReadOn
|
||||
return s.head.state.ValidatorAtIndexReadOnly(index)
|
||||
}
|
||||
|
||||
// This returns the validator index referenced by the provided pubkey in
|
||||
// the head state.
|
||||
// This is a lock free version.
|
||||
func (s *Service) headValidatorIndexAtPubkey(pubKey [fieldparams.BLSPubkeyLength]byte) (types.ValidatorIndex, bool) {
|
||||
return s.head.state.ValidatorIndexByPubkey(pubKey)
|
||||
}
|
||||
|
||||
// Returns true if head state exists.
|
||||
// This is the lock free version.
|
||||
func (s *Service) hasHeadState() bool {
|
||||
return s.head != nil && s.head.state != nil
|
||||
}
|
||||
|
||||
// This caches justified state balances to be used for fork choice.
|
||||
func (s *Service) cacheJustifiedStateBalances(ctx context.Context, justifiedRoot [32]byte) error {
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.clearInitSyncBlocks()
|
||||
|
||||
var justifiedState state.BeaconState
|
||||
var err error
|
||||
if justifiedRoot == s.genesisRoot {
|
||||
justifiedState, err = s.cfg.BeaconDB.GenesisState(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
justifiedState, err = s.cfg.StateGen.StateByRoot(ctx, justifiedRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if justifiedState == nil || justifiedState.IsNil() {
|
||||
return errors.New("justified state can't be nil")
|
||||
}
|
||||
|
||||
epoch := time.CurrentEpoch(justifiedState)
|
||||
|
||||
justifiedBalances := make([]uint64, justifiedState.NumValidators())
|
||||
if err := justifiedState.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
if helpers.IsActiveValidatorUsingTrie(val, epoch) {
|
||||
justifiedBalances[idx] = val.EffectiveBalance()
|
||||
} else {
|
||||
justifiedBalances[idx] = 0
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.justifiedBalancesLock.Lock()
|
||||
defer s.justifiedBalancesLock.Unlock()
|
||||
s.justifiedBalances = justifiedBalances
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) getJustifiedBalances() []uint64 {
|
||||
s.justifiedBalancesLock.RLock()
|
||||
defer s.justifiedBalancesLock.RUnlock()
|
||||
return s.justifiedBalances
|
||||
}
|
||||
|
||||
// Notifies a common event feed of a new chain head event. Called right after a new
|
||||
// chain head is determined, set, and saved to disk.
|
||||
func (s *Service) notifyNewHeadEvent(
|
||||
ctx context.Context,
|
||||
newHeadSlot types.Slot,
|
||||
newHeadState state.BeaconState,
|
||||
newHeadStateRoot,
|
||||
newHeadRoot []byte,
|
||||
) error {
|
||||
previousDutyDependentRoot := s.genesisRoot[:]
|
||||
currentDutyDependentRoot := s.genesisRoot[:]
|
||||
previousDutyDependentRoot := s.originBlockRoot[:]
|
||||
currentDutyDependentRoot := s.originBlockRoot[:]
|
||||
|
||||
var previousDutyEpoch types.Epoch
|
||||
currentDutyEpoch := slots.ToEpoch(newHeadSlot)
|
||||
@@ -360,6 +338,10 @@ func (s *Service) notifyNewHeadEvent(
|
||||
return errors.Wrap(err, "could not get duty dependent root")
|
||||
}
|
||||
}
|
||||
isOptimistic, err := s.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if node is optimistically synced")
|
||||
}
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.NewHead,
|
||||
Data: ðpbv1.EventHead{
|
||||
@@ -369,6 +351,7 @@ func (s *Service) notifyNewHeadEvent(
|
||||
EpochTransition: slots.IsEpochStart(newHeadSlot),
|
||||
PreviousDutyDependentRoot: previousDutyDependentRoot,
|
||||
CurrentDutyDependentRoot: currentDutyDependentRoot,
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
},
|
||||
})
|
||||
return nil
|
||||
|
||||
@@ -130,7 +130,7 @@ func (s *Service) domainWithHeadState(ctx context.Context, slot types.Slot, doma
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return signing.Domain(headState.Fork(), slots.ToEpoch(headState.Slot()), domain, headState.GenesisValidatorRoot())
|
||||
return signing.Domain(headState.Fork(), slots.ToEpoch(headState.Slot()), domain, headState.GenesisValidatorsRoot())
|
||||
}
|
||||
|
||||
// returns the head state that is advanced up to `slot`. It utilizes the cache `syncCommitteeHeadState` by retrieving using `slot` as key.
|
||||
@@ -157,11 +157,9 @@ func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot types.Slot
|
||||
if headState == nil || headState.IsNil() {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
if slot > headState.Slot() {
|
||||
headState, err = transition.ProcessSlots(ctx, headState, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headState, err = transition.ProcessSlotsIfPossible(ctx, headState, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
syncHeadStateMiss.Inc()
|
||||
err = syncCommitteeHeadStateCache.Put(slot, headState)
|
||||
|
||||
@@ -122,7 +122,7 @@ func TestService_HeadSyncCommitteeDomain(t *testing.T) {
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorRoot())
|
||||
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
d, err := c.HeadSyncCommitteeDomain(context.Background(), 0)
|
||||
@@ -136,7 +136,7 @@ func TestService_HeadSyncContributionProofDomain(t *testing.T) {
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainContributionAndProof, s.GenesisValidatorRoot())
|
||||
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainContributionAndProof, s.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
d, err := c.HeadSyncContributionProofDomain(context.Background(), 0)
|
||||
@@ -150,7 +150,7 @@ func TestService_HeadSyncSelectionProofDomain(t *testing.T) {
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommitteeSelectionProof, s.GenesisValidatorRoot())
|
||||
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommitteeSelectionProof, s.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
d, err := c.HeadSyncSelectionProofDomain(context.Background(), 0)
|
||||
|
||||
@@ -39,9 +39,10 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
|
||||
util.NewBeaconBlock()
|
||||
oldBlock := wrapper.WrappedPhase0SignedBeaconBlock(
|
||||
oldBlock, err := wrapper.WrappedSignedBeaconBlock(
|
||||
util.NewBeaconBlock(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), oldBlock))
|
||||
oldRoot, err := oldBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -55,7 +56,9 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
newHeadSignedBlock.Block.Slot = 1
|
||||
newHeadBlock := newHeadSignedBlock.Block
|
||||
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(newHeadSignedBlock)))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(newHeadSignedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
headState, err := util.NewBeaconState()
|
||||
@@ -80,9 +83,10 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
|
||||
oldBlock := wrapper.WrappedPhase0SignedBeaconBlock(
|
||||
oldBlock, err := wrapper.WrappedSignedBeaconBlock(
|
||||
util.NewBeaconBlock(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), oldBlock))
|
||||
oldRoot, err := oldBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -98,7 +102,9 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
newHeadSignedBlock.Block.ParentRoot = reorgChainParent[:]
|
||||
newHeadBlock := newHeadSignedBlock.Block
|
||||
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(newHeadSignedBlock)))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(newHeadSignedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
headState, err := util.NewBeaconState()
|
||||
@@ -123,13 +129,15 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
ctx := context.Background()
|
||||
|
||||
state, _ := util.DeterministicGenesisState(t, 100)
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: r[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), state, r))
|
||||
require.NoError(t, service.cacheJustifiedStateBalances(context.Background(), r))
|
||||
require.DeepEqual(t, service.getJustifiedBalances(), state.Balances(), "Incorrect justified balances")
|
||||
balances, err := service.justifiedBalances.get(ctx, r)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, balances, state.Balances(), "Incorrect justified balances")
|
||||
}
|
||||
|
||||
func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
|
||||
@@ -137,13 +145,15 @@ func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
service.justifiedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{}
|
||||
service.bestJustifiedCheckpt = ðpb.Checkpoint{}
|
||||
service.store.SetJustifiedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{})
|
||||
service.store.SetBestJustifiedCheckpt(ðpb.Checkpoint{})
|
||||
|
||||
require.NoError(t, service.updateHead(context.Background(), []uint64{}))
|
||||
}
|
||||
@@ -156,11 +166,11 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
cfg: &config{
|
||||
StateNotifier: notifier,
|
||||
},
|
||||
genesisRoot: [32]byte{1},
|
||||
originBlockRoot: [32]byte{1},
|
||||
}
|
||||
newHeadStateRoot := [32]byte{2}
|
||||
newHeadRoot := [32]byte{3}
|
||||
err := srv.notifyNewHeadEvent(1, bState, newHeadStateRoot[:], newHeadRoot[:])
|
||||
err := srv.notifyNewHeadEvent(context.Background(), 1, bState, newHeadStateRoot[:], newHeadRoot[:])
|
||||
require.NoError(t, err)
|
||||
events := notifier.ReceivedEvents()
|
||||
require.Equal(t, 1, len(events))
|
||||
@@ -172,8 +182,8 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
Block: newHeadRoot[:],
|
||||
State: newHeadStateRoot[:],
|
||||
EpochTransition: false,
|
||||
PreviousDutyDependentRoot: srv.genesisRoot[:],
|
||||
CurrentDutyDependentRoot: srv.genesisRoot[:],
|
||||
PreviousDutyDependentRoot: srv.originBlockRoot[:],
|
||||
CurrentDutyDependentRoot: srv.originBlockRoot[:],
|
||||
}
|
||||
require.DeepSSZEqual(t, wanted, eventHead)
|
||||
})
|
||||
@@ -185,7 +195,7 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
cfg: &config{
|
||||
StateNotifier: notifier,
|
||||
},
|
||||
genesisRoot: genesisRoot,
|
||||
originBlockRoot: genesisRoot,
|
||||
}
|
||||
epoch1Start, err := slots.EpochStart(1)
|
||||
require.NoError(t, err)
|
||||
@@ -195,7 +205,7 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
|
||||
newHeadStateRoot := [32]byte{2}
|
||||
newHeadRoot := [32]byte{3}
|
||||
err = srv.notifyNewHeadEvent(epoch2Start, bState, newHeadStateRoot[:], newHeadRoot[:])
|
||||
err = srv.notifyNewHeadEvent(context.Background(), epoch2Start, bState, newHeadStateRoot[:], newHeadRoot[:])
|
||||
require.NoError(t, err)
|
||||
events := notifier.ReceivedEvents()
|
||||
require.Equal(t, 1, len(events))
|
||||
@@ -231,7 +241,9 @@ func TestSaveOrphanedAtts(t *testing.T) {
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now()
|
||||
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r))
|
||||
|
||||
require.Equal(t, len(b.Block.Body.Attestations), service.cfg.AttPool.AggregatedAttestationCount())
|
||||
@@ -257,7 +269,9 @@ func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r))
|
||||
|
||||
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
|
||||
@@ -1,99 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/emicklei/dot"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
)
|
||||
|
||||
const template = `<html>
|
||||
<head>
|
||||
<script src="//cdnjs.cloudflare.com/ajax/libs/viz.js/2.1.2/viz.js"></script>
|
||||
<script src="//cdnjs.cloudflare.com/ajax/libs/viz.js/2.1.2/full.render.js"></script>
|
||||
<body>
|
||||
<script type="application/javascript">
|
||||
var graph = ` + "`%s`;" + `
|
||||
var viz = new Viz();
|
||||
viz.renderSVGElement(graph) // reading the graph.
|
||||
.then(function(element) {
|
||||
document.body.appendChild(element); // appends to document.
|
||||
})
|
||||
.catch(error => {
|
||||
// Create a new Viz instance (@see Caveats page for more info)
|
||||
viz = new Viz();
|
||||
// Possibly display the error
|
||||
console.error(error);
|
||||
});
|
||||
</script>
|
||||
</head>
|
||||
</body>
|
||||
</html>`
|
||||
|
||||
// TreeHandler is a handler to serve /tree page in metrics.
|
||||
func (s *Service) TreeHandler(w http.ResponseWriter, r *http.Request) {
|
||||
headState, err := s.HeadState(r.Context())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get head state")
|
||||
return
|
||||
}
|
||||
if headState == nil || headState.IsNil() {
|
||||
if _, err := w.Write([]byte("Unavailable during initial syncing")); err != nil {
|
||||
log.WithError(err).Error("Failed to render p2p info page")
|
||||
}
|
||||
}
|
||||
|
||||
nodes := s.cfg.ForkChoiceStore.Nodes()
|
||||
|
||||
graph := dot.NewGraph(dot.Directed)
|
||||
graph.Attr("rankdir", "RL")
|
||||
graph.Attr("labeljust", "l")
|
||||
|
||||
dotNodes := make([]*dot.Node, len(nodes))
|
||||
avgBalance := uint64(averageBalance(headState.Balances()))
|
||||
|
||||
for i := len(nodes) - 1; i >= 0; i-- {
|
||||
// Construct label for each node.
|
||||
slot := fmt.Sprintf("%d", nodes[i].Slot())
|
||||
weight := fmt.Sprintf("%d", nodes[i].Weight()/1e9) // Convert unit Gwei to unit ETH.
|
||||
votes := fmt.Sprintf("%d", nodes[i].Weight()/1e9/avgBalance)
|
||||
index := fmt.Sprintf("%d", i)
|
||||
g := nodes[i].Graffiti()
|
||||
graffiti := hex.EncodeToString(g[:8])
|
||||
label := "slot: " + slot + "\n votes: " + votes + "\n weight: " + weight + "\n graffiti: " + graffiti
|
||||
var dotN dot.Node
|
||||
if nodes[i].Parent() != ^uint64(0) {
|
||||
dotN = graph.Node(index).Box().Attr("label", label)
|
||||
}
|
||||
|
||||
if nodes[i].Slot() == s.HeadSlot() &&
|
||||
nodes[i].BestDescendant() == ^uint64(0) &&
|
||||
nodes[i].Parent() != ^uint64(0) {
|
||||
dotN = dotN.Attr("color", "green")
|
||||
}
|
||||
|
||||
dotNodes[i] = &dotN
|
||||
}
|
||||
|
||||
for i := len(nodes) - 1; i >= 0; i-- {
|
||||
if nodes[i].Parent() != ^uint64(0) && nodes[i].Parent() < uint64(len(dotNodes)) {
|
||||
graph.Edge(*dotNodes[i], *dotNodes[nodes[i].Parent()])
|
||||
}
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Header().Set("Content-Type", "text/html")
|
||||
if _, err := fmt.Fprintf(w, template, graph.String()); err != nil {
|
||||
log.WithError(err).Error("Failed to render p2p info page")
|
||||
}
|
||||
}
|
||||
|
||||
func averageBalance(balances []uint64) float64 {
|
||||
total := uint64(0)
|
||||
for i := 0; i < len(balances); i++ {
|
||||
total += balances[i]
|
||||
}
|
||||
return float64(total) / float64(len(balances)) / float64(params.BeaconConfig().GweiPerEth)
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
)
|
||||
|
||||
func TestService_TreeHandler(t *testing.T) {
|
||||
req, err := http.NewRequest("GET", "/tree", nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetBalances([]uint64{params.BeaconConfig().GweiPerEth}))
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
[32]byte{'a'},
|
||||
),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
s, err := NewService(ctx)
|
||||
require.NoError(t, err)
|
||||
s.cfg = cfg
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, [32]byte{'a'}, [32]byte{'g'}, [32]byte{'c'}, 0, 0))
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.ProcessBlock(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'c'}, 0, 0))
|
||||
s.setHead([32]byte{'a'}, wrapper.WrappedPhase0SignedBeaconBlock(util.NewBeaconBlock()), headState)
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler := http.HandlerFunc(s.TreeHandler)
|
||||
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
assert.Equal(t, http.StatusOK, rr.Code)
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
@@ -17,7 +18,7 @@ import (
|
||||
var log = logrus.WithField("prefix", "blockchain")
|
||||
|
||||
// logs state transition related data every slot.
|
||||
func logStateTransitionData(b block.BeaconBlock) {
|
||||
func logStateTransitionData(b block.BeaconBlock) error {
|
||||
log := log.WithField("slot", b.Slot())
|
||||
if len(b.Body().Attestations()) > 0 {
|
||||
log = log.WithField("attestations", len(b.Body().Attestations()))
|
||||
@@ -34,13 +35,23 @@ func logStateTransitionData(b block.BeaconBlock) {
|
||||
if len(b.Body().VoluntaryExits()) > 0 {
|
||||
log = log.WithField("voluntaryExits", len(b.Body().VoluntaryExits()))
|
||||
}
|
||||
if b.Version() == version.Altair {
|
||||
if b.Version() == version.Altair || b.Version() == version.Bellatrix {
|
||||
agg, err := b.Body().SyncAggregate()
|
||||
if err == nil {
|
||||
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
|
||||
}
|
||||
if b.Version() == version.Bellatrix {
|
||||
p, err := b.Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash)))
|
||||
log = log.WithField("txCount", len(p.Transactions))
|
||||
}
|
||||
log.Info("Finished applying state transition")
|
||||
return nil
|
||||
}
|
||||
|
||||
func logBlockSyncStatus(block block.BeaconBlock, blockRoot [32]byte, finalized *ethpb.Checkpoint, receivedTime time.Time, genesisTime uint64) error {
|
||||
|
||||
@@ -3,6 +3,7 @@ package blockchain
|
||||
import (
|
||||
"testing"
|
||||
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
@@ -11,6 +12,17 @@ import (
|
||||
)
|
||||
|
||||
func Test_logStateTransitionData(t *testing.T) {
|
||||
payloadBlk := ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
SyncAggregate: ðpb.SyncAggregate{},
|
||||
ExecutionPayload: &enginev1.ExecutionPayload{
|
||||
BlockHash: []byte{1, 2, 3},
|
||||
Transactions: [][]byte{{}, {}},
|
||||
},
|
||||
},
|
||||
}
|
||||
wrappedPayloadBlk, err := wrapper.WrappedBeaconBlock(payloadBlk)
|
||||
require.NoError(t, err)
|
||||
tests := []struct {
|
||||
name string
|
||||
b block.BeaconBlock
|
||||
@@ -55,11 +67,15 @@ func Test_logStateTransitionData(t *testing.T) {
|
||||
VoluntaryExits: []*ethpb.SignedVoluntaryExit{{}}}}),
|
||||
want: "\"Finished applying state transition\" attestations=1 attesterSlashings=1 deposits=1 prefix=blockchain proposerSlashings=1 slot=0 voluntaryExits=1",
|
||||
},
|
||||
{name: "has payload",
|
||||
b: wrappedPayloadBlk,
|
||||
want: "\"Finished applying state transition\" payloadHash=0x010203 prefix=blockchain slot=0 syncBitsCount=0 txCount=2",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
hook := logTest.NewGlobal()
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
logStateTransitionData(tt.b)
|
||||
require.NoError(t, logStateTransitionData(tt.b))
|
||||
require.LogsContain(t, hook, tt.want)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -130,6 +130,14 @@ var (
|
||||
Name: "sync_head_state_hit",
|
||||
Help: "The number of sync head state requests that are present in the cache.",
|
||||
})
|
||||
stateBalanceCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "state_balance_cache_hit",
|
||||
Help: "Count the number of state balance cache hits.",
|
||||
})
|
||||
stateBalanceCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "state_balance_cache_miss",
|
||||
Help: "Count the number of state balance cache hits.",
|
||||
})
|
||||
)
|
||||
|
||||
// reportSlotMetrics reports slot related metrics.
|
||||
@@ -245,7 +253,7 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case version.Altair:
|
||||
case version.Altair, version.Bellatrix:
|
||||
v, b, err = altair.InitializePrecomputeValidators(ctx, headState)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
49
beacon-chain/blockchain/mock_engine_test.go
Normal file
49
beacon-chain/blockchain/mock_engine_test.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
)
|
||||
|
||||
type mockEngineService struct {
|
||||
newPayloadError error
|
||||
forkchoiceError error
|
||||
blks map[[32]byte]*enginev1.ExecutionBlock
|
||||
}
|
||||
|
||||
func (m *mockEngineService) NewPayload(context.Context, *enginev1.ExecutionPayload) ([]byte, error) {
|
||||
return nil, m.newPayloadError
|
||||
}
|
||||
|
||||
func (m *mockEngineService) ForkchoiceUpdated(context.Context, *enginev1.ForkchoiceState, *enginev1.PayloadAttributes) (*enginev1.PayloadIDBytes, []byte, error) {
|
||||
return nil, nil, m.forkchoiceError
|
||||
}
|
||||
|
||||
func (*mockEngineService) GetPayloadV1(
|
||||
_ context.Context, _ enginev1.PayloadIDBytes,
|
||||
) *enginev1.ExecutionPayload {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*mockEngineService) GetPayload(context.Context, [8]byte) (*enginev1.ExecutionPayload, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (*mockEngineService) ExchangeTransitionConfiguration(context.Context, *enginev1.TransitionConfiguration) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*mockEngineService) LatestExecutionBlock(context.Context) (*enginev1.ExecutionBlock, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *mockEngineService) ExecutionBlockByHash(_ context.Context, hash common.Hash) (*enginev1.ExecutionBlock, error) {
|
||||
blk, ok := m.blks[common.BytesToHash(hash.Bytes())]
|
||||
if !ok {
|
||||
return nil, errors.New("block not found")
|
||||
}
|
||||
return blk, nil
|
||||
}
|
||||
50
beacon-chain/blockchain/mock_test.go
Normal file
50
beacon-chain/blockchain/mock_test.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
)
|
||||
|
||||
func testServiceOptsWithDB(t *testing.T) []Option {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
return []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
}
|
||||
|
||||
// warning: only use these opts when you are certain there are no db calls
|
||||
// in your code path. this is a lightweight way to satisfy the stategen/beacondb
|
||||
// initialization requirements w/o the overhead of db init.
|
||||
func testServiceOptsNoDB() []Option {
|
||||
return []Option{
|
||||
withStateBalanceCache(satisfactoryStateBalanceCache()),
|
||||
}
|
||||
}
|
||||
|
||||
type mockStateByRooter struct {
|
||||
state state.BeaconState
|
||||
err error
|
||||
}
|
||||
|
||||
var _ stateByRooter = &mockStateByRooter{}
|
||||
|
||||
func (m mockStateByRooter) StateByRoot(_ context.Context, _ [32]byte) (state.BeaconState, error) {
|
||||
return m.state, m.err
|
||||
}
|
||||
|
||||
// returns an instance of the state balance cache that can be used
|
||||
// to satisfy the requirement for one in NewService, but which will
|
||||
// always return an error if used.
|
||||
func satisfactoryStateBalanceCache() *stateBalanceCache {
|
||||
err := errors.New("satisfactoryStateBalanceCache doesn't perform real caching")
|
||||
return &stateBalanceCache{stateGen: mockStateByRooter{err: err}}
|
||||
}
|
||||
72
beacon-chain/blockchain/new_slot.go
Normal file
72
beacon-chain/blockchain/new_slot.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
// NewSlot mimics the implementation of `on_tick` in fork choice consensus spec.
|
||||
// It resets the proposer boost root in fork choice, and it updates store's justified checkpoint
|
||||
// if a better checkpoint on the store's finalized checkpoint chain.
|
||||
// This should only be called at the start of every slot interval.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// # Reset store.proposer_boost_root if this is a new slot
|
||||
// if current_slot > previous_slot:
|
||||
// store.proposer_boost_root = Root()
|
||||
//
|
||||
// # Not a new epoch, return
|
||||
// if not (current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0):
|
||||
// return
|
||||
//
|
||||
// # Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain
|
||||
// if store.best_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
||||
// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
||||
// ancestor_at_finalized_slot = get_ancestor(store, store.best_justified_checkpoint.root, finalized_slot)
|
||||
// if ancestor_at_finalized_slot == store.finalized_checkpoint.root:
|
||||
// store.justified_checkpoint = store.best_justified_checkpoint
|
||||
func (s *Service) NewSlot(ctx context.Context, slot types.Slot) error {
|
||||
|
||||
// Reset proposer boost root in fork choice.
|
||||
if err := s.cfg.ForkChoiceStore.ResetBoostedProposerRoot(ctx); err != nil {
|
||||
return errors.Wrap(err, "could not reset boosted proposer root in fork choice")
|
||||
}
|
||||
|
||||
// Return if it's not a new epoch.
|
||||
if !slots.IsEpochStart(slot) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain
|
||||
bj := s.store.BestJustifiedCheckpt()
|
||||
if bj == nil {
|
||||
return errNilBestJustifiedInStore
|
||||
}
|
||||
j := s.store.JustifiedCheckpt()
|
||||
if j == nil {
|
||||
return errNilJustifiedInStore
|
||||
}
|
||||
f := s.store.FinalizedCheckpt()
|
||||
if f == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
if bj.Epoch > j.Epoch {
|
||||
finalizedSlot, err := slots.EpochStart(f.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r, err := s.ancestor(ctx, bj.Root, finalizedSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bytes.Equal(r, f.Root) {
|
||||
s.store.SetJustifiedCheckpt(bj)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
100
beacon-chain/blockchain/new_slot_test.go
Normal file
100
beacon-chain/blockchain/new_slot_test.go
Normal file
@@ -0,0 +1,100 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/store"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func TestService_newSlot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, 0, 0)) // genesis
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 32, [32]byte{'a'}, [32]byte{}, [32]byte{}, 0, 0)) // finalized
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 64, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 0, 0)) // justified
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 96, [32]byte{'c'}, [32]byte{'a'}, [32]byte{}, 0, 0)) // best justified
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 97, [32]byte{'d'}, [32]byte{}, [32]byte{}, 0, 0)) // bad
|
||||
|
||||
type args struct {
|
||||
slot types.Slot
|
||||
finalized *ethpb.Checkpoint
|
||||
justified *ethpb.Checkpoint
|
||||
bestJustified *ethpb.Checkpoint
|
||||
shouldEqual bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
}{
|
||||
{
|
||||
name: "Not epoch boundary. No change",
|
||||
args: args{
|
||||
slot: params.BeaconConfig().SlotsPerEpoch + 1,
|
||||
finalized: ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'a'}, 32)},
|
||||
justified: ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'b'}, 32)},
|
||||
bestJustified: ðpb.Checkpoint{Epoch: 3, Root: bytesutil.PadTo([]byte{'c'}, 32)},
|
||||
shouldEqual: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Justified higher than best justified. No change",
|
||||
args: args{
|
||||
slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
finalized: ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'a'}, 32)},
|
||||
justified: ðpb.Checkpoint{Epoch: 3, Root: bytesutil.PadTo([]byte{'b'}, 32)},
|
||||
bestJustified: ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'c'}, 32)},
|
||||
shouldEqual: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Best justified not on the same chain as finalized. No change",
|
||||
args: args{
|
||||
slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
finalized: ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'a'}, 32)},
|
||||
justified: ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'b'}, 32)},
|
||||
bestJustified: ðpb.Checkpoint{Epoch: 3, Root: bytesutil.PadTo([]byte{'d'}, 32)},
|
||||
shouldEqual: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Best justified on the same chain as finalized. Yes change",
|
||||
args: args{
|
||||
slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
finalized: ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'a'}, 32)},
|
||||
justified: ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'b'}, 32)},
|
||||
bestJustified: ðpb.Checkpoint{Epoch: 3, Root: bytesutil.PadTo([]byte{'c'}, 32)},
|
||||
shouldEqual: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
store := store.New(test.args.justified, test.args.finalized)
|
||||
store.SetBestJustifiedCheckpt(test.args.bestJustified)
|
||||
service.store = store
|
||||
|
||||
require.NoError(t, service.NewSlot(ctx, test.args.slot))
|
||||
if test.args.shouldEqual {
|
||||
require.DeepSSZEqual(t, service.store.BestJustifiedCheckpt(), service.store.JustifiedCheckpt())
|
||||
} else {
|
||||
require.DeepNotSSZEqual(t, service.store.BestJustifiedCheckpt(), service.store.JustifiedCheckpt())
|
||||
}
|
||||
}
|
||||
}
|
||||
189
beacon-chain/blockchain/optimistic_sync.go
Normal file
189
beacon-chain/blockchain/optimistic_sync.go
Normal file
@@ -0,0 +1,189 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
|
||||
// 1. Re-organizes the execution payload chain and corresponding state to make head_block_hash the head.
|
||||
// 2. Applies finality to the execution state: it irreversibly persists the chain of all execution payloads and corresponding state, up to and including finalized_block_hash.
|
||||
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.BeaconBlock, headRoot [32]byte, finalizedRoot [32]byte) (*enginev1.PayloadIDBytes, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyForkchoiceUpdate")
|
||||
defer span.End()
|
||||
|
||||
if headBlk == nil || headBlk.IsNil() || headBlk.Body().IsNil() {
|
||||
return nil, errors.New("nil head block")
|
||||
}
|
||||
// Must not call fork choice updated until the transition conditions are met on the Pow network.
|
||||
isExecutionBlk, err := blocks.IsExecutionBlock(headBlk.Body())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not determine if block is execution block")
|
||||
}
|
||||
if !isExecutionBlk {
|
||||
return nil, nil
|
||||
}
|
||||
headPayload, err := headBlk.Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution payload")
|
||||
}
|
||||
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, s.ensureRootNotZeros(finalizedRoot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get finalized block")
|
||||
}
|
||||
var finalizedHash []byte
|
||||
if blocks.IsPreBellatrixVersion(finalizedBlock.Block().Version()) {
|
||||
finalizedHash = params.BeaconConfig().ZeroHash[:]
|
||||
} else {
|
||||
payload, err := finalizedBlock.Block().Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get finalized block execution payload")
|
||||
}
|
||||
finalizedHash = payload.BlockHash
|
||||
}
|
||||
|
||||
fcs := &enginev1.ForkchoiceState{
|
||||
HeadBlockHash: headPayload.BlockHash,
|
||||
SafeBlockHash: headPayload.BlockHash,
|
||||
FinalizedBlockHash: finalizedHash,
|
||||
}
|
||||
|
||||
// payload attribute is only required when requesting payload, here we are just updating fork choice, so it is nil.
|
||||
payloadID, _, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, nil /*payload attribute*/)
|
||||
if err != nil {
|
||||
switch err {
|
||||
case v1.ErrAcceptedSyncingPayloadStatus:
|
||||
log.WithFields(logrus.Fields{
|
||||
"headSlot": headBlk.Slot(),
|
||||
"headHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
|
||||
"finalizedHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash)),
|
||||
}).Info("Called fork choice updated with optimistic block")
|
||||
return payloadID, nil
|
||||
default:
|
||||
return nil, errors.Wrap(err, "could not notify forkchoice update from execution engine")
|
||||
}
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, headRoot); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set block to valid")
|
||||
}
|
||||
return payloadID, nil
|
||||
}
|
||||
|
||||
// notifyForkchoiceUpdate signals execution engine on a new payload
|
||||
func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion, postStateVersion int,
|
||||
preStateHeader, postStateHeader *ethpb.ExecutionPayloadHeader, blk block.SignedBeaconBlock, root [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
|
||||
defer span.End()
|
||||
|
||||
// Execution payload is only supported in Bellatrix and beyond. Pre
|
||||
// merge blocks are never optimistic
|
||||
if blocks.IsPreBellatrixVersion(postStateVersion) {
|
||||
return s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, root)
|
||||
}
|
||||
if err := helpers.BeaconBlockIsNil(blk); err != nil {
|
||||
return err
|
||||
}
|
||||
body := blk.Block().Body()
|
||||
enabled, err := blocks.IsExecutionEnabledUsingHeader(postStateHeader, body)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not determine if execution is enabled")
|
||||
}
|
||||
if !enabled {
|
||||
return s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, root)
|
||||
}
|
||||
payload, err := body.ExecutionPayload()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get execution payload")
|
||||
}
|
||||
_, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
|
||||
if err != nil {
|
||||
switch err {
|
||||
case v1.ErrAcceptedSyncingPayloadStatus:
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blk.Block().Slot(),
|
||||
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
|
||||
}).Info("Called new payload with optimistic block")
|
||||
return nil
|
||||
default:
|
||||
return errors.Wrap(err, "could not validate execution payload from execution engine")
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, root); err != nil {
|
||||
return errors.Wrap(err, "could not set optimistic status")
|
||||
}
|
||||
|
||||
// During the transition event, the transition block should be verified for sanity.
|
||||
if blocks.IsPreBellatrixVersion(preStateVersion) {
|
||||
// Handle case where pre-state is Altair but block contains payload.
|
||||
// To reach here, the block must have contained a valid payload.
|
||||
return s.validateMergeBlock(ctx, blk)
|
||||
}
|
||||
atTransition, err := blocks.IsMergeTransitionBlockUsingPreStatePayloadHeader(preStateHeader, body)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if merge block is terminal")
|
||||
}
|
||||
if !atTransition {
|
||||
return nil
|
||||
}
|
||||
return s.validateMergeBlock(ctx, blk)
|
||||
}
|
||||
|
||||
// optimisticCandidateBlock returns true if this block can be optimistically synced.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def is_optimistic_candidate_block(opt_store: OptimisticStore, current_slot: Slot, block: BeaconBlock) -> bool:
|
||||
// if is_execution_block(opt_store.blocks[block.parent_root]):
|
||||
// return True
|
||||
//
|
||||
// justified_root = opt_store.block_states[opt_store.head_block_root].current_justified_checkpoint.root
|
||||
// if is_execution_block(opt_store.blocks[justified_root]):
|
||||
// return True
|
||||
//
|
||||
// if block.slot + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY <= current_slot:
|
||||
// return True
|
||||
//
|
||||
// return False
|
||||
func (s *Service) optimisticCandidateBlock(ctx context.Context, blk block.BeaconBlock) (bool, error) {
|
||||
if blk.Slot()+params.BeaconConfig().SafeSlotsToImportOptimistically <= s.CurrentSlot() {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
parent, err := s.cfg.BeaconDB.Block(ctx, bytesutil.ToBytes32(blk.ParentRoot()))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if parent == nil {
|
||||
return false, errNilParentInDB
|
||||
}
|
||||
|
||||
parentIsExecutionBlock, err := blocks.IsExecutionBlock(parent.Block().Body())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if parentIsExecutionBlock {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
j := s.store.JustifiedCheckpt()
|
||||
if j == nil {
|
||||
return false, errNilJustifiedInStore
|
||||
}
|
||||
jBlock, err := s.cfg.BeaconDB.Block(ctx, bytesutil.ToBytes32(j.Root))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return blocks.IsExecutionBlock(jBlock.Block().Body())
|
||||
}
|
||||
692
beacon-chain/blockchain/optimistic_sync_test.go
Normal file
692
beacon-chain/blockchain/optimistic_sync_test.go
Normal file
@@ -0,0 +1,692 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
engine "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
altairBlk, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlockAltair())
|
||||
require.NoError(t, err)
|
||||
altairBlkRoot, err := altairBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlockBellatrix())
|
||||
require.NoError(t, err)
|
||||
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, altairBlk))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, bellatrixBlk))
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blk block.BeaconBlock
|
||||
finalizedRoot [32]byte
|
||||
newForkchoiceErr error
|
||||
errString string
|
||||
}{
|
||||
{
|
||||
name: "nil block",
|
||||
errString: "nil head block",
|
||||
},
|
||||
{
|
||||
name: "phase0 block",
|
||||
blk: func() block.BeaconBlock {
|
||||
b, err := wrapper.WrappedBeaconBlock(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}})
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "altair block",
|
||||
blk: func() block.BeaconBlock {
|
||||
b, err := wrapper.WrappedBeaconBlock(ðpb.BeaconBlockAltair{Body: ðpb.BeaconBlockBodyAltair{}})
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "not execution block",
|
||||
blk: func() block.BeaconBlock {
|
||||
b, err := wrapper.WrappedBeaconBlock(ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "happy case: finalized root is altair block",
|
||||
blk: func() block.BeaconBlock {
|
||||
b, err := wrapper.WrappedBeaconBlock(ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}(),
|
||||
finalizedRoot: altairBlkRoot,
|
||||
},
|
||||
{
|
||||
name: "happy case: finalized root is bellatrix block",
|
||||
blk: func() block.BeaconBlock {
|
||||
b, err := wrapper.WrappedBeaconBlock(ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}(),
|
||||
finalizedRoot: bellatrixBlkRoot,
|
||||
},
|
||||
{
|
||||
name: "forkchoice updated with optimistic block",
|
||||
blk: func() block.BeaconBlock {
|
||||
b, err := wrapper.WrappedBeaconBlock(ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}(),
|
||||
newForkchoiceErr: engine.ErrAcceptedSyncingPayloadStatus,
|
||||
finalizedRoot: bellatrixBlkRoot,
|
||||
},
|
||||
{
|
||||
name: "forkchoice updated with invalid block",
|
||||
blk: func() block.BeaconBlock {
|
||||
b, err := wrapper.WrappedBeaconBlock(ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}(),
|
||||
newForkchoiceErr: engine.ErrInvalidPayloadStatus,
|
||||
finalizedRoot: bellatrixBlkRoot,
|
||||
errString: "could not notify forkchoice update from execution engine: payload status is INVALID",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
engine := &mockEngineService{forkchoiceError: tt.newForkchoiceErr}
|
||||
service.cfg.ExecutionEngineCaller = engine
|
||||
_, err := service.notifyForkchoiceUpdate(ctx, tt.blk, service.headRoot(), tt.finalizedRoot)
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_NotifyNewPayload(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.TerminalTotalDifficulty = "2"
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
phase0State, _ := util.DeterministicGenesisState(t, 1)
|
||||
altairState, _ := util.DeterministicGenesisStateAltair(t, 1)
|
||||
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{},
|
||||
},
|
||||
},
|
||||
}
|
||||
a := ðpb.SignedBeaconBlockAltair{
|
||||
Block: ðpb.BeaconBlockAltair{
|
||||
Body: ðpb.BeaconBlockBodyAltair{},
|
||||
},
|
||||
}
|
||||
altairBlk, err := wrapper.WrappedSignedBeaconBlock(a)
|
||||
require.NoError(t, err)
|
||||
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
preState state.BeaconState
|
||||
postState state.BeaconState
|
||||
blk block.SignedBeaconBlock
|
||||
newPayloadErr error
|
||||
errString string
|
||||
}{
|
||||
{
|
||||
name: "phase 0 post state",
|
||||
postState: phase0State,
|
||||
preState: phase0State,
|
||||
},
|
||||
{
|
||||
name: "altair post state",
|
||||
postState: altairState,
|
||||
preState: altairState,
|
||||
},
|
||||
{
|
||||
name: "nil beacon block",
|
||||
postState: bellatrixState,
|
||||
preState: bellatrixState,
|
||||
errString: "signed beacon block can't be nil",
|
||||
},
|
||||
{
|
||||
name: "new payload with optimistic block",
|
||||
postState: bellatrixState,
|
||||
preState: bellatrixState,
|
||||
blk: bellatrixBlk,
|
||||
newPayloadErr: engine.ErrAcceptedSyncingPayloadStatus,
|
||||
},
|
||||
{
|
||||
name: "new payload with invalid block",
|
||||
postState: bellatrixState,
|
||||
preState: bellatrixState,
|
||||
blk: bellatrixBlk,
|
||||
newPayloadErr: engine.ErrInvalidPayloadStatus,
|
||||
errString: "could not validate execution payload from execution engine: payload status is INVALID",
|
||||
},
|
||||
{
|
||||
name: "altair pre state, altair block",
|
||||
postState: bellatrixState,
|
||||
preState: altairState,
|
||||
blk: altairBlk,
|
||||
},
|
||||
{
|
||||
name: "altair pre state, happy case",
|
||||
postState: bellatrixState,
|
||||
preState: altairState,
|
||||
blk: func() block.SignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "could not get merge block",
|
||||
postState: bellatrixState,
|
||||
preState: bellatrixState,
|
||||
blk: bellatrixBlk,
|
||||
errString: "could not get merge block parent hash and total difficulty",
|
||||
},
|
||||
{
|
||||
name: "not at merge transition",
|
||||
postState: bellatrixState,
|
||||
preState: bellatrixState,
|
||||
blk: func() block.SignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "could not get merge block",
|
||||
postState: bellatrixState,
|
||||
preState: bellatrixState,
|
||||
blk: bellatrixBlk,
|
||||
errString: "could not get merge block parent hash and total difficulty",
|
||||
},
|
||||
{
|
||||
name: "happy case",
|
||||
postState: bellatrixState,
|
||||
preState: bellatrixState,
|
||||
blk: func() block.SignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}(),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
engine := &mockEngineService{newPayloadError: tt.newPayloadErr, blks: map[[32]byte]*v1.ExecutionBlock{}}
|
||||
engine.blks[[32]byte{'a'}] = &v1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
|
||||
TotalDifficulty: "0x2",
|
||||
}
|
||||
engine.blks[[32]byte{'b'}] = &v1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
|
||||
TotalDifficulty: "0x1",
|
||||
}
|
||||
service.cfg.ExecutionEngineCaller = engine
|
||||
var payload *ethpb.ExecutionPayloadHeader
|
||||
if tt.preState.Version() == version.Bellatrix {
|
||||
payload, err = tt.preState.LatestExecutionPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
root := [32]byte{'a'}
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, root, root, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
postVersion, postHeader, err := getStateVersionAndPayload(tt.postState)
|
||||
require.NoError(t, err)
|
||||
err = service.notifyNewPayload(ctx, tt.preState.Version(), postVersion, payload, postHeader, tt.blk, root)
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.TerminalTotalDifficulty = "2"
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
engine := &mockEngineService{blks: map[[32]byte]*v1.ExecutionBlock{}}
|
||||
engine.blks[[32]byte{'a'}] = &v1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
|
||||
TotalDifficulty: "0x2",
|
||||
}
|
||||
engine.blks[[32]byte{'b'}] = &v1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
|
||||
TotalDifficulty: "0x1",
|
||||
}
|
||||
service.cfg.ExecutionEngineCaller = engine
|
||||
payload, err := bellatrixState.LatestExecutionPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
root := [32]byte{'c'}
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 1, root, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
postVersion, postHeader, err := getStateVersionAndPayload(bellatrixState)
|
||||
require.NoError(t, err)
|
||||
err = service.notifyNewPayload(ctx, bellatrixState.Version(), postVersion, payload, postHeader, bellatrixBlk, root)
|
||||
require.NoError(t, err)
|
||||
optimistic, err := service.IsOptimisticForRoot(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, optimistic)
|
||||
}
|
||||
|
||||
func Test_IsOptimisticCandidateBlock(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
params.BeaconConfig().SafeSlotsToImportOptimistically = 128
|
||||
service.genesisTime = time.Now().Add(-time.Second * 12 * 2 * 128)
|
||||
|
||||
parentBlk := util.NewBeaconBlockBellatrix()
|
||||
wrappedParentBlock, err := wrapper.WrappedSignedBeaconBlock(parentBlk)
|
||||
require.NoError(t, err)
|
||||
parentRoot, err := wrappedParentBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blk block.BeaconBlock
|
||||
justified block.SignedBeaconBlock
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "deep block",
|
||||
blk: func(tt *testing.T) block.BeaconBlock {
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Slot = 1
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
wr, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
|
||||
require.NoError(tt, err)
|
||||
return wr
|
||||
}(t),
|
||||
justified: func(tt *testing.T) block.SignedBeaconBlock {
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Slot = 32
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
wr, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(tt, err)
|
||||
return wr
|
||||
}(t),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "shallow block, Altair justified chkpt",
|
||||
blk: func(tt *testing.T) block.BeaconBlock {
|
||||
blk := util.NewBeaconBlockAltair()
|
||||
blk.Block.Slot = 200
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
wr, err := wrapper.WrappedAltairBeaconBlock(blk.Block)
|
||||
require.NoError(tt, err)
|
||||
return wr
|
||||
}(t),
|
||||
justified: func(tt *testing.T) block.SignedBeaconBlock {
|
||||
blk := util.NewBeaconBlockAltair()
|
||||
blk.Block.Slot = 32
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
wr, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(tt, err)
|
||||
return wr
|
||||
}(t),
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "shallow block, Bellatrix justified chkpt without execution",
|
||||
blk: func(tt *testing.T) block.BeaconBlock {
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Slot = 200
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
wr, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
|
||||
require.NoError(tt, err)
|
||||
return wr
|
||||
}(t),
|
||||
justified: func(tt *testing.T) block.SignedBeaconBlock {
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Slot = 32
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
wr, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(tt, err)
|
||||
return wr
|
||||
}(t),
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "shallow block, execution enabled justified chkpt",
|
||||
blk: func(tt *testing.T) block.BeaconBlock {
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Slot = 200
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
wr, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
|
||||
require.NoError(tt, err)
|
||||
return wr
|
||||
}(t),
|
||||
justified: func(tt *testing.T) block.SignedBeaconBlock {
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Slot = 32
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
blk.Block.Body.ExecutionPayload.FeeRecipient = bytesutil.PadTo([]byte{'a'}, fieldparams.FeeRecipientLength)
|
||||
blk.Block.Body.ExecutionPayload.StateRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
blk.Block.Body.ExecutionPayload.ReceiptsRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
blk.Block.Body.ExecutionPayload.LogsBloom = bytesutil.PadTo([]byte{'a'}, fieldparams.LogsBloomLength)
|
||||
blk.Block.Body.ExecutionPayload.PrevRandao = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
blk.Block.Body.ExecutionPayload.BaseFeePerGas = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
blk.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
wr, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(tt, err)
|
||||
return wr
|
||||
}(t),
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
jroot, err := tt.justified.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, tt.justified))
|
||||
service.store.SetJustifiedCheckpt(
|
||||
ðpb.Checkpoint{
|
||||
Root: jroot[:],
|
||||
Epoch: slots.ToEpoch(tt.justified.Block().Slot()),
|
||||
})
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrappedParentBlock))
|
||||
|
||||
candidate, err := service.optimisticCandidateBlock(ctx, tt.blk)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.want, candidate, tt.name)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_IsOptimisticShallowExecutionParent(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
params.BeaconConfig().SafeSlotsToImportOptimistically = 128
|
||||
service.genesisTime = time.Now().Add(-time.Second * 12 * 2 * 128)
|
||||
payload := &v1.ExecutionPayload{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
BaseFeePerGas: bytesutil.PadTo([]byte{1, 2, 3, 4}, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, 32),
|
||||
BlockNumber: 100,
|
||||
}
|
||||
body := ðpb.BeaconBlockBodyBellatrix{ExecutionPayload: payload}
|
||||
block := ðpb.BeaconBlockBellatrix{Body: body, Slot: 200}
|
||||
rawSigned := ðpb.SignedBeaconBlockBellatrix{Block: block}
|
||||
blk := util.HydrateSignedBeaconBlockBellatrix(rawSigned)
|
||||
wr, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wr))
|
||||
blkRoot, err := wr.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
childBlock := util.NewBeaconBlockBellatrix()
|
||||
childBlock.Block.ParentRoot = blkRoot[:]
|
||||
// shallow block
|
||||
childBlock.Block.Slot = 201
|
||||
wrappedChild, err := wrapper.WrappedSignedBeaconBlock(childBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrappedChild))
|
||||
candidate, err := service.optimisticCandidateBlock(ctx, wrappedChild.Block())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, candidate)
|
||||
}
|
||||
|
||||
func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
stateGen := stategen.New(beaconDB)
|
||||
fcs := protoarray.New(0, 0, [32]byte{})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stateGen),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesisBlk := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
wr, err := wrapper.WrappedSignedBeaconBlock(genesisBlk)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wr))
|
||||
genesisRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, genesisRoot, params.BeaconConfig().ZeroHash,
|
||||
params.BeaconConfig().ZeroHash, 0, 0))
|
||||
genesisSummary := ðpb.StateSummary{
|
||||
Root: genesisStateRoot[:],
|
||||
Slot: 0,
|
||||
}
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, genesisSummary))
|
||||
|
||||
// Get last validated checkpoint
|
||||
origCheckpoint, err := service.cfg.BeaconDB.LastValidatedCheckpoint(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, origCheckpoint))
|
||||
|
||||
// Optimistic finalized checkpoint
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 320
|
||||
blk.Block.ParentRoot = genesisRoot[:]
|
||||
wr, err = wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wr))
|
||||
opRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
opCheckpoint := ðpb.Checkpoint{
|
||||
Root: opRoot[:],
|
||||
Epoch: 10,
|
||||
}
|
||||
opStateSummary := ðpb.StateSummary{
|
||||
Root: opRoot[:],
|
||||
Slot: 320,
|
||||
}
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, opStateSummary))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 320, opRoot, genesisRoot,
|
||||
params.BeaconConfig().ZeroHash, 10, 10))
|
||||
assert.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, opRoot))
|
||||
require.NoError(t, service.updateFinalized(ctx, opCheckpoint))
|
||||
cp, err := service.cfg.BeaconDB.LastValidatedCheckpoint(ctx)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, origCheckpoint.Root, cp.Root)
|
||||
require.Equal(t, origCheckpoint.Epoch, cp.Epoch)
|
||||
|
||||
// Validated finalized checkpoint
|
||||
blk = util.NewBeaconBlock()
|
||||
blk.Block.Slot = 640
|
||||
blk.Block.ParentRoot = opRoot[:]
|
||||
wr, err = wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wr))
|
||||
validRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
validCheckpoint := ðpb.Checkpoint{
|
||||
Root: validRoot[:],
|
||||
Epoch: 20,
|
||||
}
|
||||
validSummary := ðpb.StateSummary{
|
||||
Root: validRoot[:],
|
||||
Slot: 640,
|
||||
}
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, validSummary))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 640, validRoot, params.BeaconConfig().ZeroHash,
|
||||
params.BeaconConfig().ZeroHash, 20, 20))
|
||||
require.NoError(t, fcs.SetOptimisticToValid(ctx, validRoot))
|
||||
assert.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, validRoot))
|
||||
require.NoError(t, service.updateFinalized(ctx, validCheckpoint))
|
||||
cp, err = service.cfg.BeaconDB.LastValidatedCheckpoint(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
optimistic, err := service.IsOptimisticForRoot(ctx, validRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, optimistic)
|
||||
require.DeepEqual(t, validCheckpoint.Root, cp.Root)
|
||||
require.Equal(t, validCheckpoint.Epoch, cp.Epoch)
|
||||
}
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -50,6 +51,14 @@ func WithChainStartFetcher(f powchain.ChainStartFetcher) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithExecutionEngineCaller to call execution engine.
|
||||
func WithExecutionEngineCaller(c enginev1.Caller) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.ExecutionEngineCaller = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDepositCache for deposit lifecycle after chain inclusion.
|
||||
func WithDepositCache(c *depositcache.DepositCache) Option {
|
||||
return func(s *Service) error {
|
||||
@@ -130,6 +139,13 @@ func WithSlasherAttestationsFeed(f *event.Feed) Option {
|
||||
}
|
||||
}
|
||||
|
||||
func withStateBalanceCache(c *stateBalanceCache) Option {
|
||||
return func(s *Service) error {
|
||||
s.justifiedBalances = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithFinalizedStateAtStartUp to store finalized state at start up.
|
||||
func WithFinalizedStateAtStartUp(st state.BeaconState) Option {
|
||||
return func(s *Service) error {
|
||||
|
||||
141
beacon-chain/blockchain/pow_block.go
Normal file
141
beacon-chain/blockchain/pow_block.go
Normal file
@@ -0,0 +1,141 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// validateMergeBlock validates terminal block hash in the event of manual overrides before checking for total difficulty.
|
||||
//
|
||||
// def validate_merge_block(block: BeaconBlock) -> None:
|
||||
// if TERMINAL_BLOCK_HASH != Hash32():
|
||||
// # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
|
||||
// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
||||
// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH
|
||||
// return
|
||||
//
|
||||
// pow_block = get_pow_block(block.body.execution_payload.parent_hash)
|
||||
// # Check if `pow_block` is available
|
||||
// assert pow_block is not None
|
||||
// pow_parent = get_pow_block(pow_block.parent_hash)
|
||||
// # Check if `pow_parent` is available
|
||||
// assert pow_parent is not None
|
||||
// # Check if `pow_block` is a valid terminal PoW block
|
||||
// assert is_valid_terminal_pow_block(pow_block, pow_parent)
|
||||
func (s *Service) validateMergeBlock(ctx context.Context, b block.SignedBeaconBlock) error {
|
||||
if err := helpers.BeaconBlockIsNil(b); err != nil {
|
||||
return err
|
||||
}
|
||||
payload, err := b.Block().Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if payload == nil {
|
||||
return errors.New("nil execution payload")
|
||||
}
|
||||
if err := validateTerminalBlockHash(b.Block().Slot(), payload); err != nil {
|
||||
return errors.Wrap(err, "could not validate terminal block hash")
|
||||
}
|
||||
mergeBlockParentHash, mergeBlockTD, err := s.getBlkParentHashAndTD(ctx, payload.ParentHash)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get merge block parent hash and total difficulty")
|
||||
}
|
||||
_, mergeBlockParentTD, err := s.getBlkParentHashAndTD(ctx, mergeBlockParentHash)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get merge parent block total difficulty")
|
||||
}
|
||||
valid, err := validateTerminalBlockDifficulties(mergeBlockTD, mergeBlockParentTD)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !valid {
|
||||
return fmt.Errorf("invalid TTD, configTTD: %s, currentTTD: %s, parentTTD: %s",
|
||||
params.BeaconConfig().TerminalTotalDifficulty, mergeBlockTD, mergeBlockParentTD)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": b.Block().Slot(),
|
||||
"mergeBlockHash": common.BytesToHash(payload.ParentHash).String(),
|
||||
"mergeBlockParentHash": common.BytesToHash(mergeBlockParentHash).String(),
|
||||
"terminalTotalDifficulty": params.BeaconConfig().TerminalTotalDifficulty,
|
||||
"mergeBlockTotalDifficulty": mergeBlockTD,
|
||||
"mergeBlockParentTotalDifficulty": mergeBlockParentTD,
|
||||
}).Info("Validated terminal block")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getBlkParentHashAndTD retrieves the parent hash and total difficulty of the given block.
|
||||
func (s *Service) getBlkParentHashAndTD(ctx context.Context, blkHash []byte) ([]byte, *uint256.Int, error) {
|
||||
blk, err := s.cfg.ExecutionEngineCaller.ExecutionBlockByHash(ctx, common.BytesToHash(blkHash))
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get pow block")
|
||||
}
|
||||
if blk == nil {
|
||||
return nil, nil, errors.New("pow block is nil")
|
||||
}
|
||||
blkTDBig, err := hexutil.DecodeBig(blk.TotalDifficulty)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not decode merge block total difficulty")
|
||||
}
|
||||
blkTDUint256, overflows := uint256.FromBig(blkTDBig)
|
||||
if overflows {
|
||||
return nil, nil, errors.New("total difficulty overflows")
|
||||
}
|
||||
return blk.ParentHash, blkTDUint256, nil
|
||||
}
|
||||
|
||||
// validateTerminalBlockHash validates if the merge block is a valid terminal PoW block.
|
||||
// spec code:
|
||||
// if TERMINAL_BLOCK_HASH != Hash32():
|
||||
// # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
|
||||
// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
||||
// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH
|
||||
// return
|
||||
func validateTerminalBlockHash(blkSlot types.Slot, payload *enginev1.ExecutionPayload) error {
|
||||
if bytesutil.ToBytes32(params.BeaconConfig().TerminalBlockHash.Bytes()) == [32]byte{} {
|
||||
return nil
|
||||
}
|
||||
if params.BeaconConfig().TerminalBlockHashActivationEpoch > slots.ToEpoch(blkSlot) {
|
||||
return errors.New("terminal block hash activation epoch not reached")
|
||||
}
|
||||
if !bytes.Equal(payload.ParentHash, params.BeaconConfig().TerminalBlockHash.Bytes()) {
|
||||
return errors.New("parent hash does not match terminal block hash")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateTerminalBlockDifficulties validates terminal pow block by comparing own total difficulty with parent's total difficulty.
|
||||
//
|
||||
// def is_valid_terminal_pow_block(block: PowBlock, parent: PowBlock) -> bool:
|
||||
// is_total_difficulty_reached = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
|
||||
// is_parent_total_difficulty_valid = parent.total_difficulty < TERMINAL_TOTAL_DIFFICULTY
|
||||
// return is_total_difficulty_reached and is_parent_total_difficulty_valid
|
||||
func validateTerminalBlockDifficulties(currentDifficulty *uint256.Int, parentDifficulty *uint256.Int) (bool, error) {
|
||||
b, ok := new(big.Int).SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
|
||||
if !ok {
|
||||
return false, errors.New("failed to parse terminal total difficulty")
|
||||
}
|
||||
ttd, of := uint256.FromBig(b)
|
||||
if of {
|
||||
return false, errors.New("overflow terminal total difficulty")
|
||||
}
|
||||
totalDifficultyReached := currentDifficulty.Cmp(ttd) >= 0
|
||||
parentTotalDifficultyValid := ttd.Cmp(parentDifficulty) > 0
|
||||
return totalDifficultyReached && parentTotalDifficultyValid, nil
|
||||
}
|
||||
210
beacon-chain/blockchain/pow_block_test.go
Normal file
210
beacon-chain/blockchain/pow_block_test.go
Normal file
@@ -0,0 +1,210 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/holiman/uint256"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func Test_validTerminalPowBlock(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
currentDifficulty *uint256.Int
|
||||
parentDifficulty *uint256.Int
|
||||
ttd uint64
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "current > ttd, parent > ttd",
|
||||
currentDifficulty: uint256.NewInt(2),
|
||||
parentDifficulty: uint256.NewInt(2),
|
||||
ttd: 1,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "current < ttd, parent < ttd",
|
||||
currentDifficulty: uint256.NewInt(2),
|
||||
parentDifficulty: uint256.NewInt(2),
|
||||
ttd: 3,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "current == ttd, parent == ttd",
|
||||
currentDifficulty: uint256.NewInt(2),
|
||||
parentDifficulty: uint256.NewInt(2),
|
||||
ttd: 2,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "current > ttd, parent == ttd",
|
||||
currentDifficulty: uint256.NewInt(2),
|
||||
parentDifficulty: uint256.NewInt(1),
|
||||
ttd: 1,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "current == ttd, parent < ttd",
|
||||
currentDifficulty: uint256.NewInt(2),
|
||||
parentDifficulty: uint256.NewInt(1),
|
||||
ttd: 2,
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "current > ttd, parent < ttd",
|
||||
currentDifficulty: uint256.NewInt(3),
|
||||
parentDifficulty: uint256.NewInt(1),
|
||||
ttd: 2,
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.TerminalTotalDifficulty = fmt.Sprint(tt.ttd)
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
got, err := validateTerminalBlockDifficulties(tt.currentDifficulty, tt.parentDifficulty)
|
||||
require.NoError(t, err)
|
||||
if got != tt.want {
|
||||
t.Errorf("validateTerminalBlockDifficulties() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_validTerminalPowBlockSpecConfig(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.TerminalTotalDifficulty = "115792089237316195423570985008687907853269984665640564039457584007913129638912"
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
i, _ := new(big.Int).SetString("115792089237316195423570985008687907853269984665640564039457584007913129638912", 10)
|
||||
current, of := uint256.FromBig(i)
|
||||
require.Equal(t, of, false)
|
||||
i, _ = new(big.Int).SetString("115792089237316195423570985008687907853269984665640564039457584007913129638911", 10)
|
||||
parent, of := uint256.FromBig(i)
|
||||
require.Equal(t, of, false)
|
||||
|
||||
got, err := validateTerminalBlockDifficulties(current, parent)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, got)
|
||||
}
|
||||
|
||||
func Test_validateMergeBlock(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.TerminalTotalDifficulty = "2"
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
engine := &mockEngineService{blks: map[[32]byte]*enginev1.ExecutionBlock{}}
|
||||
service.cfg.ExecutionEngineCaller = engine
|
||||
engine.blks[[32]byte{'a'}] = &enginev1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
|
||||
TotalDifficulty: "0x2",
|
||||
}
|
||||
engine.blks[[32]byte{'b'}] = &enginev1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
|
||||
TotalDifficulty: "0x1",
|
||||
}
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: 1,
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &enginev1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.validateMergeBlock(ctx, b))
|
||||
|
||||
cfg.TerminalTotalDifficulty = "1"
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
require.ErrorContains(t, "invalid TTD, configTTD: 1, currentTTD: 2, parentTTD: 1", service.validateMergeBlock(ctx, b))
|
||||
}
|
||||
|
||||
func Test_getBlkParentHashAndTD(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
engine := &mockEngineService{blks: map[[32]byte]*enginev1.ExecutionBlock{}}
|
||||
service.cfg.ExecutionEngineCaller = engine
|
||||
h := [32]byte{'a'}
|
||||
p := [32]byte{'b'}
|
||||
td := "0x1"
|
||||
engine.blks[h] = &enginev1.ExecutionBlock{
|
||||
ParentHash: p[:],
|
||||
TotalDifficulty: td,
|
||||
}
|
||||
parentHash, totalDifficulty, err := service.getBlkParentHashAndTD(ctx, h[:])
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, p, bytesutil.ToBytes32(parentHash))
|
||||
require.Equal(t, td, totalDifficulty.String())
|
||||
|
||||
_, _, err = service.getBlkParentHashAndTD(ctx, []byte{'c'})
|
||||
require.ErrorContains(t, "could not get pow block: block not found", err)
|
||||
|
||||
engine.blks[h] = nil
|
||||
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
|
||||
require.ErrorContains(t, "pow block is nil", err)
|
||||
|
||||
engine.blks[h] = &enginev1.ExecutionBlock{
|
||||
ParentHash: p[:],
|
||||
TotalDifficulty: "1",
|
||||
}
|
||||
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
|
||||
require.ErrorContains(t, "could not decode merge block total difficulty: hex string without 0x prefix", err)
|
||||
|
||||
engine.blks[h] = &enginev1.ExecutionBlock{
|
||||
ParentHash: p[:],
|
||||
TotalDifficulty: "0XFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
|
||||
}
|
||||
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
|
||||
require.ErrorContains(t, "could not decode merge block total difficulty: hex number > 256 bits", err)
|
||||
}
|
||||
|
||||
func Test_validateTerminalBlockHash(t *testing.T) {
|
||||
require.NoError(t, validateTerminalBlockHash(1, &enginev1.ExecutionPayload{}))
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.TerminalBlockHash = [32]byte{0x01}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
require.ErrorContains(t, "terminal block hash activation epoch not reached", validateTerminalBlockHash(1, &enginev1.ExecutionPayload{}))
|
||||
|
||||
cfg.TerminalBlockHashActivationEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
require.ErrorContains(t, "parent hash does not match terminal block hash", validateTerminalBlockHash(1, &enginev1.ExecutionPayload{}))
|
||||
|
||||
require.NoError(t, validateTerminalBlockHash(1, &enginev1.ExecutionPayload{ParentHash: cfg.TerminalBlockHash.Bytes()}))
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
@@ -9,12 +10,11 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/time"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// onAttestation is called whenever an attestation is received, verifies the attestation is valid and saves
|
||||
// OnAttestation is called whenever an attestation is received, verifies the attestation is valid and saves
|
||||
// it to the DB. As a stateless function, this does not hold nor delay attestation based on the spec descriptions.
|
||||
// The delay is handled by the caller in `processAttestations`.
|
||||
//
|
||||
@@ -36,7 +36,7 @@ import (
|
||||
//
|
||||
// # Update latest messages for attesting indices
|
||||
// update_latest_messages(store, indexed_attestation.attesting_indices, attestation)
|
||||
func (s *Service) onAttestation(ctx context.Context, a *ethpb.Attestation) error {
|
||||
func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onAttestation")
|
||||
defer span.End()
|
||||
|
||||
@@ -59,10 +59,10 @@ func (s *Service) onAttestation(ctx context.Context, a *ethpb.Attestation) error
|
||||
return err
|
||||
}
|
||||
|
||||
genesisTime := baseState.GenesisTime()
|
||||
genesisTime := uint64(s.genesisTime.Unix())
|
||||
|
||||
// Verify attestation target is from current epoch or previous epoch.
|
||||
if err := s.verifyAttTargetEpoch(ctx, genesisTime, uint64(time.Now().Unix()), tgt); err != nil {
|
||||
if err := verifyAttTargetEpoch(ctx, genesisTime, uint64(time.Now().Unix()), tgt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -42,11 +42,9 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (stat
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if epochStartSlot > baseState.Slot() {
|
||||
baseState, err = transition.ProcessSlots(ctx, baseState, epochStartSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process slots up to epoch %d", c.Epoch)
|
||||
}
|
||||
baseState, err = transition.ProcessSlotsIfPossible(ctx, baseState, epochStartSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process slots up to epoch %d", c.Epoch)
|
||||
}
|
||||
|
||||
// Sharing the same state across caches is perfectly fine here, the fetching
|
||||
@@ -61,7 +59,7 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (stat
|
||||
}
|
||||
|
||||
// verifyAttTargetEpoch validates attestation is from the current or previous epoch.
|
||||
func (s *Service) verifyAttTargetEpoch(_ context.Context, genesisTime, nowTime uint64, c *ethpb.Checkpoint) error {
|
||||
func verifyAttTargetEpoch(_ context.Context, genesisTime, nowTime uint64, c *ethpb.Checkpoint) error {
|
||||
currentSlot := types.Slot((nowTime - genesisTime) / params.BeaconConfig().SecondsPerSlot)
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
var prevEpoch types.Epoch
|
||||
|
||||
@@ -3,12 +3,15 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -16,35 +19,37 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/time"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
func TestStore_OnAttestation_ErrorConditions_ProtoArray(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, [32]byte{})),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
_, err = blockTree1(t, beaconDB, []byte{'g'})
|
||||
require.NoError(t, err)
|
||||
|
||||
BlkWithOutState := util.NewBeaconBlock()
|
||||
BlkWithOutState.Block.Slot = 0
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(BlkWithOutState)))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(BlkWithOutState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
BlkWithOutStateRoot, err := BlkWithOutState.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
BlkWithStateBadAtt := util.NewBeaconBlock()
|
||||
BlkWithStateBadAtt.Block.Slot = 1
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(BlkWithStateBadAtt)))
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(BlkWithStateBadAtt)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
BlkWithStateBadAttRoot, err := BlkWithStateBadAtt.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -55,7 +60,9 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
|
||||
BlkWithValidState := util.NewBeaconBlock()
|
||||
BlkWithValidState.Block.Slot = 2
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(BlkWithValidState)))
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(BlkWithValidState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
|
||||
BlkWithValidStateRoot, err := BlkWithValidState.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -104,9 +111,9 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
name: "process nil field (a.Target) in attestation",
|
||||
a: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
BeaconBlockRoot: make([]byte, fieldparams.RootLength),
|
||||
Target: nil,
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
},
|
||||
AggregationBits: make([]byte, 1),
|
||||
Signature: make([]byte, 96),
|
||||
@@ -117,7 +124,7 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := service.onAttestation(ctx, tt.a)
|
||||
err := service.OnAttestation(ctx, tt.a)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
} else {
|
||||
@@ -127,20 +134,132 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_OnAttestation_Ok(t *testing.T) {
|
||||
func TestStore_OnAttestation_ErrorConditions_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(doublylinkedtree.New(0, 0)),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = blockTree1(t, beaconDB, []byte{'g'})
|
||||
require.NoError(t, err)
|
||||
|
||||
BlkWithOutState := util.NewBeaconBlock()
|
||||
BlkWithOutState.Block.Slot = 0
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(BlkWithOutState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
BlkWithOutStateRoot, err := BlkWithOutState.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
BlkWithStateBadAtt := util.NewBeaconBlock()
|
||||
BlkWithStateBadAtt.Block.Slot = 1
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(BlkWithStateBadAtt)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
BlkWithStateBadAttRoot, err := BlkWithStateBadAtt.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetSlot(100*params.BeaconConfig().SlotsPerEpoch))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot))
|
||||
|
||||
BlkWithValidState := util.NewBeaconBlock()
|
||||
BlkWithValidState.Block.Slot = 2
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(BlkWithValidState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
|
||||
BlkWithValidStateRoot, err := BlkWithValidState.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s, err = util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
err = s.SetFork(ðpb.Fork{
|
||||
Epoch: 0,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithValidStateRoot))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
a *ethpb.Attestation
|
||||
wantedErr string
|
||||
}{
|
||||
{
|
||||
name: "attestation's data slot not aligned with target vote",
|
||||
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Root: make([]byte, 32)}}}),
|
||||
wantedErr: "slot 32 does not match target epoch 0",
|
||||
},
|
||||
{
|
||||
name: "no pre state for attestations's target block",
|
||||
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}}),
|
||||
wantedErr: "could not get pre state for epoch 0",
|
||||
},
|
||||
{
|
||||
name: "process attestation doesn't match current epoch",
|
||||
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Epoch: 100,
|
||||
Root: BlkWithStateBadAttRoot[:]}}}),
|
||||
wantedErr: "target epoch 100 does not match current epoch",
|
||||
},
|
||||
{
|
||||
name: "process nil attestation",
|
||||
a: nil,
|
||||
wantedErr: "attestation can't be nil",
|
||||
},
|
||||
{
|
||||
name: "process nil field (a.Data) in attestation",
|
||||
a: ðpb.Attestation{},
|
||||
wantedErr: "attestation's data can't be nil",
|
||||
},
|
||||
{
|
||||
name: "process nil field (a.Target) in attestation",
|
||||
a: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
BeaconBlockRoot: make([]byte, fieldparams.RootLength),
|
||||
Target: nil,
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
},
|
||||
AggregationBits: make([]byte, 1),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
wantedErr: "attestation's target can't be nil",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := service.OnAttestation(ctx, tt.a)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_OnAttestation_Ok_ProtoArray(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, genesisState.SetGenesisTime(uint64(time.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
|
||||
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
att, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
@@ -149,29 +268,54 @@ func TestStore_OnAttestation_Ok(t *testing.T) {
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, tRoot, 1, 1))
|
||||
require.NoError(t, service.onAttestation(ctx, att[0]))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0]))
|
||||
}
|
||||
|
||||
func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fcs := doublylinkedtree.New(0, 0)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
att, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
tRoot := bytesutil.ToBytes32(att[0].Data.Target.Root)
|
||||
copied := genesisState.Copy()
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0]))
|
||||
}
|
||||
|
||||
func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
err = s.SetFinalizedCheckpoint(ðpb.Checkpoint{Root: bytesutil.PadTo([]byte{'A'}, 32)})
|
||||
err = s.SetFinalizedCheckpoint(ðpb.Checkpoint{Root: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)})
|
||||
require.NoError(t, err)
|
||||
val := ðpb.Validator{
|
||||
PublicKey: bytesutil.PadTo([]byte("foo"), 48),
|
||||
WithdrawalCredentials: bytesutil.PadTo([]byte("bar"), 32),
|
||||
WithdrawalCredentials: bytesutil.PadTo([]byte("bar"), fieldparams.RootLength),
|
||||
}
|
||||
err = s.SetValidators([]*ethpb.Validator{val})
|
||||
require.NoError(t, err)
|
||||
@@ -180,23 +324,23 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
r := [32]byte{'g'}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, r))
|
||||
|
||||
service.justifiedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
||||
service.bestJustifiedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
||||
service.prevFinalizedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
||||
service.store.SetJustifiedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
service.store.SetBestJustifiedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
service.store.SetPrevFinalizedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
|
||||
r = bytesutil.ToBytes32([]byte{'A'})
|
||||
cp1 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)}
|
||||
cp1 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'A'}, 32)}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)}))
|
||||
|
||||
s1, err := service.getAttPreState(ctx, cp1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot(), "Unexpected state slot")
|
||||
|
||||
cp2 := ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'B'}, 32)}
|
||||
cp2 := ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength)}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'B'})))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'B'}, 32)}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength)}))
|
||||
s2, err := service.getAttPreState(ctx, cp2)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
|
||||
@@ -214,13 +358,13 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
|
||||
|
||||
require.NoError(t, s.SetSlot(params.BeaconConfig().SlotsPerEpoch+1))
|
||||
service.justifiedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
||||
service.bestJustifiedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
||||
service.prevFinalizedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
||||
cp3 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, 32)}
|
||||
service.store.SetJustifiedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
service.store.SetBestJustifiedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
service.store.SetPrevFinalizedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
cp3 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'C'}, 32)}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}))
|
||||
s3, err := service.getAttPreState(ctx, cp3)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, s.Slot(), s3.Slot(), "Unexpected state slot")
|
||||
@@ -230,17 +374,16 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
epoch := types.Epoch(1)
|
||||
baseState, _ := util.DeterministicGenesisState(t, 1)
|
||||
checkpoint := ðpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("hi"), 32)}
|
||||
checkpoint := ðpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("hi"), fieldparams.RootLength)}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
|
||||
returned, err := service.getAttPreState(ctx, checkpoint)
|
||||
require.NoError(t, err)
|
||||
@@ -251,7 +394,7 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
assert.Equal(t, returned.Slot(), cached.Slot(), "State should have been cached")
|
||||
|
||||
epoch = 2
|
||||
newCheckpoint := ðpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("bye"), 32)}
|
||||
newCheckpoint := ðpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("bye"), fieldparams.RootLength)}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root)))
|
||||
returned, err = service.getAttPreState(ctx, newCheckpoint)
|
||||
require.NoError(t, err)
|
||||
@@ -268,52 +411,31 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
|
||||
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
require.NoError(t, service.verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Root: make([]byte, 32)}))
|
||||
require.NoError(t, verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)}))
|
||||
}
|
||||
|
||||
func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
require.NoError(t, service.verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Epoch: 1}))
|
||||
require.NoError(t, verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Epoch: 1}))
|
||||
}
|
||||
|
||||
func TestAttEpoch_NotMatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
nowTime := 2 * uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
err = service.verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Root: make([]byte, 32)})
|
||||
err := verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)})
|
||||
assert.ErrorContains(t, "target epoch 0 does not match current epoch 2 or prev epoch 1", err)
|
||||
}
|
||||
|
||||
func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
d := util.HydrateAttestationData(ðpb.AttestationData{})
|
||||
assert.ErrorContains(t, "signed beacon block can't be nil", service.verifyBeaconBlock(ctx, d))
|
||||
@@ -321,16 +443,16 @@ func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
|
||||
|
||||
func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 2
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
d := ðpb.AttestationData{Slot: 1, BeaconBlockRoot: r[:]}
|
||||
@@ -340,16 +462,16 @@ func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
|
||||
|
||||
func TestVerifyBeaconBlock_OK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 2
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
d := ðpb.AttestationData{Slot: 2, BeaconBlockRoot: r[:]}
|
||||
@@ -357,27 +479,69 @@ func TestVerifyBeaconBlock_OK(t *testing.T) {
|
||||
assert.NoError(t, service.verifyBeaconBlock(ctx, d), "Did not receive the wanted error")
|
||||
}
|
||||
|
||||
func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
|
||||
func TestVerifyFinalizedConsistency_InconsistentRoot_ProtoArray(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b32)))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b32)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Epoch: 1}
|
||||
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: 1})
|
||||
b33 := util.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b33)))
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b33)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
|
||||
require.ErrorContains(t, "Root and finalized store are not consistent", err)
|
||||
}
|
||||
|
||||
func TestVerifyFinalizedConsistency_InconsistentRoot_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fcs := doublylinkedtree.New(0, 0)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b32)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: 1})
|
||||
b33 := util.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b33)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -387,25 +551,27 @@ func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
|
||||
|
||||
func TestVerifyFinalizedConsistency_OK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b32)))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b32)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Epoch: 1, Root: r32[:]}
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: r32[:], Epoch: 1})
|
||||
|
||||
b33 := util.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b33)))
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b33)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -415,19 +581,17 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) {
|
||||
|
||||
func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Epoch: 1, Root: r32[:]}
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: r32[:], Epoch: 1})
|
||||
|
||||
b33 := util.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
@@ -435,8 +599,8 @@ func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, b32.Block.Slot, r32, [32]byte{}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, b33.Block.Slot, r33, r32, [32]byte{}, 0, 0))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, b32.Block.Slot, r32, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, b33.Block.Slot, r33, r32, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
|
||||
_, err = service.cfg.ForkChoiceStore.Head(ctx, 0, r32, []uint64{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -6,11 +6,13 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
coreTime "github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
@@ -21,6 +23,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -98,14 +101,36 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
return err
|
||||
}
|
||||
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil {
|
||||
return err
|
||||
}
|
||||
postStateVersion, postStateHeader, err := getStateVersionAndPayload(postState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.notifyNewPayload(ctx, preStateVersion, postStateVersion, preStateHeader, postStateHeader, signed, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not verify new payload")
|
||||
}
|
||||
|
||||
// We add a proposer score boost to fork choice for the block root if applicable, right after
|
||||
// running a successful state transition for the block.
|
||||
secondsIntoSlot := uint64(time.Since(s.genesisTime).Seconds()) % params.BeaconConfig().SecondsPerSlot
|
||||
if err := s.cfg.ForkChoiceStore.BoostProposerRoot(ctx, &forkchoicetypes.ProposerBoostRootArgs{
|
||||
BlockRoot: blockRoot,
|
||||
BlockSlot: signed.Block().Slot(),
|
||||
CurrentSlot: slots.SinceGenesis(s.genesisTime),
|
||||
SecondsIntoSlot: secondsIntoSlot,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If slasher is configured, forward the attestations in the block via
|
||||
// an event feed for processing.
|
||||
@@ -135,25 +160,40 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
}
|
||||
|
||||
// Update justified check point.
|
||||
currJustifiedEpoch := s.justifiedCheckpt.Epoch
|
||||
justified := s.store.JustifiedCheckpt()
|
||||
if justified == nil {
|
||||
return errNilJustifiedInStore
|
||||
}
|
||||
currJustifiedEpoch := justified.Epoch
|
||||
if postState.CurrentJustifiedCheckpoint().Epoch > currJustifiedEpoch {
|
||||
if err := s.updateJustified(ctx, postState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
newFinalized := postState.FinalizedCheckpointEpoch() > s.finalizedCheckpt.Epoch
|
||||
finalized := s.store.FinalizedCheckpt()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
newFinalized := postState.FinalizedCheckpointEpoch() > finalized.Epoch
|
||||
if newFinalized {
|
||||
if err := s.finalizedImpliesNewJustified(ctx, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save new justified")
|
||||
}
|
||||
s.prevFinalizedCheckpt = s.finalizedCheckpt
|
||||
s.finalizedCheckpt = postState.FinalizedCheckpoint()
|
||||
s.store.SetPrevFinalizedCheckpt(finalized)
|
||||
s.store.SetFinalizedCheckpt(postState.FinalizedCheckpoint())
|
||||
s.store.SetPrevJustifiedCheckpt(justified)
|
||||
s.store.SetJustifiedCheckpt(postState.CurrentJustifiedCheckpoint())
|
||||
}
|
||||
|
||||
if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil {
|
||||
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(justified.Root))
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", justified.Root)
|
||||
return errors.Wrap(err, msg)
|
||||
}
|
||||
if err := s.updateHead(ctx, balances); err != nil {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, s.headBlock().Block(), s.headRoot(), bytesutil.ToBytes32(finalized.Root)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.pruneCanonicalAttsFromPool(ctx, blockRoot, signed); err != nil {
|
||||
return err
|
||||
@@ -171,18 +211,16 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
})
|
||||
|
||||
// Updating next slot state cache can happen in the background. It shouldn't block rest of the process.
|
||||
if features.Get().EnableNextSlotStateCache {
|
||||
go func() {
|
||||
// Use a custom deadline here, since this method runs asynchronously.
|
||||
// We ignore the parent method's context and instead create a new one
|
||||
// with a custom deadline, therefore using the background context instead.
|
||||
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
|
||||
defer cancel()
|
||||
if err := transition.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
|
||||
log.WithError(err).Debug("could not update next slot state cache")
|
||||
}
|
||||
}()
|
||||
}
|
||||
go func() {
|
||||
// Use a custom deadline here, since this method runs asynchronously.
|
||||
// We ignore the parent method's context and instead create a new one
|
||||
// with a custom deadline, therefore using the background context instead.
|
||||
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
|
||||
defer cancel()
|
||||
if err := transition.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
|
||||
log.WithError(err).Debug("could not update next slot state cache")
|
||||
}
|
||||
}()
|
||||
|
||||
// Save justified check point to db.
|
||||
if postState.CurrentJustifiedCheckpoint().Epoch > currJustifiedEpoch {
|
||||
@@ -200,14 +238,19 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
if err := s.cfg.ForkChoiceStore.Prune(ctx, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not prune proto array fork choice nodes")
|
||||
}
|
||||
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(ctx, fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if node is optimistically synced")
|
||||
}
|
||||
go func() {
|
||||
// Send an event regarding the new finalized checkpoint over a common event feed.
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.FinalizedCheckpoint,
|
||||
Data: ðpbv1.EventFinalizedCheckpoint{
|
||||
Epoch: postState.FinalizedCheckpoint().Epoch,
|
||||
Block: postState.FinalizedCheckpoint().Root,
|
||||
State: signed.Block().StateRoot(),
|
||||
Epoch: postState.FinalizedCheckpoint().Epoch,
|
||||
Block: postState.FinalizedCheckpoint().Root,
|
||||
State: signed.Block().StateRoot(),
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
},
|
||||
})
|
||||
|
||||
@@ -228,6 +271,24 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
return s.handleEpochBoundary(ctx, postState)
|
||||
}
|
||||
|
||||
func getStateVersionAndPayload(st state.BeaconState) (int, *ethpb.ExecutionPayloadHeader, error) {
|
||||
if st == nil {
|
||||
return 0, nil, errors.New("nil state")
|
||||
}
|
||||
var preStateHeader *ethpb.ExecutionPayloadHeader
|
||||
var err error
|
||||
preStateVersion := st.Version()
|
||||
switch preStateVersion {
|
||||
case version.Phase0, version.Altair:
|
||||
default:
|
||||
preStateHeader, err = st.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
}
|
||||
return preStateVersion, preStateHeader, nil
|
||||
}
|
||||
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlock,
|
||||
blockRoots [][32]byte) ([]*ethpb.Checkpoint, []*ethpb.Checkpoint, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
|
||||
@@ -236,6 +297,11 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
|
||||
if len(blks) == 0 || len(blockRoots) == 0 {
|
||||
return nil, nil, errors.New("no blocks provided")
|
||||
}
|
||||
|
||||
if len(blks) != len(blockRoots) {
|
||||
return nil, nil, errWrongBlockCount
|
||||
}
|
||||
|
||||
if err := helpers.BeaconBlockIsNil(blks[0]); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -255,14 +321,29 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
|
||||
|
||||
jCheckpoints := make([]*ethpb.Checkpoint, len(blks))
|
||||
fCheckpoints := make([]*ethpb.Checkpoint, len(blks))
|
||||
sigSet := &bls.SignatureSet{
|
||||
sigSet := &bls.SignatureBatch{
|
||||
Signatures: [][]byte{},
|
||||
PublicKeys: []bls.PublicKey{},
|
||||
Messages: [][32]byte{},
|
||||
}
|
||||
var set *bls.SignatureSet
|
||||
type versionAndHeader struct {
|
||||
version int
|
||||
header *ethpb.ExecutionPayloadHeader
|
||||
}
|
||||
preVersionAndHeaders := make([]*versionAndHeader, len(blks))
|
||||
postVersionAndHeaders := make([]*versionAndHeader, len(blks))
|
||||
var set *bls.SignatureBatch
|
||||
boundaries := make(map[[32]byte]state.BeaconState)
|
||||
for i, b := range blks {
|
||||
v, h, err := getStateVersionAndPayload(preState)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
preVersionAndHeaders[i] = &versionAndHeader{
|
||||
version: v,
|
||||
header: h,
|
||||
}
|
||||
|
||||
set, preState, err = transition.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -270,12 +351,18 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
|
||||
// Save potential boundary states.
|
||||
if slots.IsEpochStart(preState.Slot()) {
|
||||
boundaries[blockRoots[i]] = preState.Copy()
|
||||
if err := s.handleEpochBoundary(ctx, preState); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not handle epoch boundary state")
|
||||
}
|
||||
}
|
||||
jCheckpoints[i] = preState.CurrentJustifiedCheckpoint()
|
||||
fCheckpoints[i] = preState.FinalizedCheckpoint()
|
||||
|
||||
v, h, err = getStateVersionAndPayload(preState)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
postVersionAndHeaders[i] = &versionAndHeader{
|
||||
version: v,
|
||||
header: h,
|
||||
}
|
||||
sigSet.Join(set)
|
||||
}
|
||||
verify, err := sigSet.Verify()
|
||||
@@ -285,6 +372,26 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
|
||||
if !verify {
|
||||
return nil, nil, errors.New("batch block signature verification failed")
|
||||
}
|
||||
|
||||
// blocks have been verified, add them to forkchoice and call the engine
|
||||
for i, b := range blks {
|
||||
s.saveInitSyncBlock(blockRoots[i], b)
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, b.Block(), blockRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := s.notifyNewPayload(ctx,
|
||||
preVersionAndHeaders[i].version,
|
||||
postVersionAndHeaders[i].version,
|
||||
preVersionAndHeaders[i].header,
|
||||
postVersionAndHeaders[i].header, b, blockRoots[i]); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, b.Block(), blockRoots[i], bytesutil.ToBytes32(fCheckpoints[i].Root)); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for r, st := range boundaries {
|
||||
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
|
||||
return nil, nil, err
|
||||
@@ -306,12 +413,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
|
||||
// their state summaries and split them off to relative hot/cold storage.
|
||||
func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed block.SignedBeaconBlock,
|
||||
blockRoot [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
b := signed.Block()
|
||||
|
||||
s.saveInitSyncBlock(blockRoot, signed)
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, b, blockRoot, fCheckpoint, jCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: signed.Block().Slot(),
|
||||
Root: blockRoot[:],
|
||||
@@ -327,19 +429,27 @@ func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed block.
|
||||
s.clearInitSyncBlocks()
|
||||
}
|
||||
|
||||
if jCheckpoint.Epoch > s.justifiedCheckpt.Epoch {
|
||||
justified := s.store.JustifiedCheckpt()
|
||||
if justified == nil {
|
||||
return errNilJustifiedInStore
|
||||
}
|
||||
if jCheckpoint.Epoch > justified.Epoch {
|
||||
if err := s.updateJustifiedInitSync(ctx, jCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
finalized := s.store.FinalizedCheckpt()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
// Update finalized check point. Prune the block cache and helper caches on every new finalized epoch.
|
||||
if fCheckpoint.Epoch > s.finalizedCheckpt.Epoch {
|
||||
if fCheckpoint.Epoch > finalized.Epoch {
|
||||
if err := s.updateFinalized(ctx, fCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
s.prevFinalizedCheckpt = s.finalizedCheckpt
|
||||
s.finalizedCheckpt = fCheckpoint
|
||||
s.store.SetPrevFinalizedCheckpt(finalized)
|
||||
s.store.SetFinalizedCheckpt(fCheckpoint)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -418,13 +528,27 @@ func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk block.Be
|
||||
return err
|
||||
}
|
||||
// Feed in block to fork choice store.
|
||||
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
|
||||
blk.Slot(), root, bytesutil.ToBytes32(blk.ParentRoot()), bytesutil.ToBytes32(blk.Body().Graffiti()),
|
||||
jCheckpoint.Epoch,
|
||||
fCheckpoint.Epoch); err != nil {
|
||||
return errors.Wrap(err, "could not process block for proto array fork choice")
|
||||
|
||||
payloadHash, err := getBlockPayloadHash(blk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return s.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx,
|
||||
blk.Slot(), root, bytesutil.ToBytes32(blk.ParentRoot()), payloadHash,
|
||||
jCheckpoint.Epoch,
|
||||
fCheckpoint.Epoch)
|
||||
}
|
||||
|
||||
func getBlockPayloadHash(blk block.BeaconBlock) ([32]byte, error) {
|
||||
payloadHash := [32]byte{}
|
||||
if blocks.IsPreBellatrixVersion(blk.Version()) {
|
||||
return payloadHash, nil
|
||||
}
|
||||
payload, err := blk.Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return payloadHash, err
|
||||
}
|
||||
return bytesutil.ToBytes32(payload.BlockHash), nil
|
||||
}
|
||||
|
||||
// This saves post state info to DB or cache. This also saves post state info to fork choice store.
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/monitoring/tracing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -45,7 +44,7 @@ func (s *Service) getBlockPreState(ctx context.Context, b block.BeaconBlock) (st
|
||||
}
|
||||
|
||||
// Verify block slot time is not from the future.
|
||||
if err := slots.VerifyTime(preState.GenesisTime(), b.Slot(), params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), b.Slot(), params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -92,7 +91,11 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b block.BeaconBlock) er
|
||||
func (s *Service) VerifyBlkDescendant(ctx context.Context, root [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.VerifyBlkDescendant")
|
||||
defer span.End()
|
||||
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(s.finalizedCheckpt.Root))
|
||||
finalized := s.store.FinalizedCheckpt()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
|
||||
finalizedBlkSigned, err := s.cfg.BeaconDB.Block(ctx, fRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -110,7 +113,7 @@ func (s *Service) VerifyBlkDescendant(ctx context.Context, root [32]byte) error
|
||||
}
|
||||
|
||||
if !bytes.Equal(bFinalizedRoot, fRoot[:]) {
|
||||
err := fmt.Errorf("block %#x is not a descendent of the current finalized block slot %d, %#x != %#x",
|
||||
err := fmt.Errorf("block %#x is not a descendant of the current finalized block slot %d, %#x != %#x",
|
||||
bytesutil.Trunc(root[:]), finalizedBlk.Slot(), bytesutil.Trunc(bFinalizedRoot),
|
||||
bytesutil.Trunc(fRoot[:]))
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -122,7 +125,11 @@ func (s *Service) VerifyBlkDescendant(ctx context.Context, root [32]byte) error
|
||||
// verifyBlkFinalizedSlot validates input block is not less than or equal
|
||||
// to current finalized slot.
|
||||
func (s *Service) verifyBlkFinalizedSlot(b block.BeaconBlock) error {
|
||||
finalizedSlot, err := slots.EpochStart(s.finalizedCheckpt.Epoch)
|
||||
finalized := s.store.FinalizedCheckpt()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
finalizedSlot, err := slots.EpochStart(finalized.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -160,8 +167,8 @@ func (s *Service) shouldUpdateCurrentJustified(ctx context.Context, newJustified
|
||||
if slots.SinceEpochStarts(s.CurrentSlot()) < params.BeaconConfig().SafeSlotsToUpdateJustified {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
jSlot, err := slots.EpochStart(s.justifiedCheckpt.Epoch)
|
||||
justified := s.store.JustifiedCheckpt()
|
||||
jSlot, err := slots.EpochStart(justified.Epoch)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -170,7 +177,7 @@ func (s *Service) shouldUpdateCurrentJustified(ctx context.Context, newJustified
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !bytes.Equal(b, s.justifiedCheckpt.Root) {
|
||||
if !bytes.Equal(b, justified.Root) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -182,8 +189,12 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
|
||||
defer span.End()
|
||||
|
||||
cpt := state.CurrentJustifiedCheckpoint()
|
||||
if cpt.Epoch > s.bestJustifiedCheckpt.Epoch {
|
||||
s.bestJustifiedCheckpt = cpt
|
||||
bestJustified := s.store.BestJustifiedCheckpt()
|
||||
if bestJustified == nil {
|
||||
return errNilBestJustifiedInStore
|
||||
}
|
||||
if cpt.Epoch > bestJustified.Epoch {
|
||||
s.store.SetBestJustifiedCheckpt(cpt)
|
||||
}
|
||||
canUpdate, err := s.shouldUpdateCurrentJustified(ctx, cpt)
|
||||
if err != nil {
|
||||
@@ -191,11 +202,12 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
|
||||
}
|
||||
|
||||
if canUpdate {
|
||||
s.prevJustifiedCheckpt = s.justifiedCheckpt
|
||||
s.justifiedCheckpt = cpt
|
||||
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
|
||||
return err
|
||||
justified := s.store.JustifiedCheckpt()
|
||||
if justified == nil {
|
||||
return errNilJustifiedInStore
|
||||
}
|
||||
s.store.SetPrevJustifiedCheckpt(justified)
|
||||
s.store.SetJustifiedCheckpt(cpt)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -205,13 +217,18 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
|
||||
// caches justified checkpoint balances for fork choice and save justified checkpoint in DB.
|
||||
// This method does not have defense against fork choice bouncing attack, which is why it's only recommend to be used during initial syncing.
|
||||
func (s *Service) updateJustifiedInitSync(ctx context.Context, cp *ethpb.Checkpoint) error {
|
||||
s.prevJustifiedCheckpt = s.justifiedCheckpt
|
||||
s.justifiedCheckpt = cp
|
||||
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
|
||||
justified := s.store.JustifiedCheckpt()
|
||||
if justified == nil {
|
||||
return errNilJustifiedInStore
|
||||
}
|
||||
s.store.SetPrevJustifiedCheckpt(justified)
|
||||
|
||||
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cp); err != nil {
|
||||
return err
|
||||
}
|
||||
s.store.SetJustifiedCheckpt(cp)
|
||||
|
||||
return s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cp)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) error {
|
||||
@@ -230,10 +247,19 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
|
||||
}
|
||||
|
||||
fRoot := bytesutil.ToBytes32(cp.Root)
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(ctx, fRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !optimistic {
|
||||
err = s.cfg.BeaconDB.SaveLastValidatedCheckpoint(ctx, cp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := s.cfg.StateGen.MigrateToCold(ctx, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not migrate to cold")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -310,49 +336,6 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot types.Slot)
|
||||
return s.ancestorByDB(ctx, bytesutil.ToBytes32(b.ParentRoot()), slot)
|
||||
}
|
||||
|
||||
// This updates justified check point in store, if the new justified is later than stored justified or
|
||||
// the store's justified is not in chain with finalized check point.
|
||||
//
|
||||
// Spec definition:
|
||||
// # Potentially update justified if different from store
|
||||
// if store.justified_checkpoint != state.current_justified_checkpoint:
|
||||
// # Update justified if new justified is later than store justified
|
||||
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
||||
// store.justified_checkpoint = state.current_justified_checkpoint
|
||||
// return
|
||||
// # Update justified if store justified is not in chain with finalized checkpoint
|
||||
// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
||||
// ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot)
|
||||
// if ancestor_at_finalized_slot != store.finalized_checkpoint.root:
|
||||
// store.justified_checkpoint = state.current_justified_checkpoint
|
||||
func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state state.BeaconState) error {
|
||||
// Update justified if it's different than the one cached in the store.
|
||||
if !attestation.CheckPointIsEqual(s.justifiedCheckpt, state.CurrentJustifiedCheckpoint()) {
|
||||
if state.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
|
||||
s.justifiedCheckpt = state.CurrentJustifiedCheckpoint()
|
||||
return s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
||||
}
|
||||
|
||||
// Update justified if store justified is not in chain with finalized check point.
|
||||
finalizedSlot, err := slots.EpochStart(s.finalizedCheckpt.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
justifiedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
||||
anc, err := s.ancestor(ctx, justifiedRoot[:], finalizedSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(anc, s.finalizedCheckpt.Root) {
|
||||
s.justifiedCheckpt = state.CurrentJustifiedCheckpoint()
|
||||
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This retrieves missing blocks from DB (ie. the blocks that couldn't be received over sync) and inserts them to fork choice store.
|
||||
// This is useful for block tree visualizer and additional vote accounting.
|
||||
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk block.BeaconBlock,
|
||||
@@ -363,7 +346,11 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk block.B
|
||||
parentRoot := bytesutil.ToBytes32(blk.ParentRoot())
|
||||
slot := blk.Slot()
|
||||
// Fork choice only matters from last finalized slot.
|
||||
fSlot, err := slots.EpochStart(s.finalizedCheckpt.Epoch)
|
||||
finalized := s.store.FinalizedCheckpt()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
fSlot, err := slots.EpochStart(finalized.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -388,14 +375,17 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk block.B
|
||||
for i := len(pendingNodes) - 1; i >= 0; i-- {
|
||||
b := pendingNodes[i]
|
||||
r := pendingRoots[i]
|
||||
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
|
||||
b.Slot(), r, bytesutil.ToBytes32(b.ParentRoot()), bytesutil.ToBytes32(b.Body().Graffiti()),
|
||||
payloadHash, err := getBlockPayloadHash(blk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx,
|
||||
b.Slot(), r, bytesutil.ToBytes32(b.ParentRoot()), payloadHash,
|
||||
jCheckpoint.Epoch,
|
||||
fCheckpoint.Epoch); err != nil {
|
||||
return errors.Wrap(err, "could not process block for proto array fork choice")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -442,7 +432,7 @@ func (s *Service) deletePoolAtts(atts []*ethpb.Attestation) error {
|
||||
// fork choice justification routine.
|
||||
func (s *Service) ensureRootNotZeros(root [32]byte) [32]byte {
|
||||
if root == params.BeaconConfig().ZeroHash {
|
||||
return s.genesisRoot
|
||||
return s.originBlockRoot
|
||||
}
|
||||
return root
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
@@ -41,7 +42,7 @@ func (s *Service) ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Att
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveAttestationNoPubsub")
|
||||
defer span.End()
|
||||
|
||||
if err := s.onAttestation(ctx, att); err != nil {
|
||||
if err := s.OnAttestation(ctx, att); err != nil {
|
||||
return errors.Wrap(err, "could not process attestation")
|
||||
}
|
||||
|
||||
@@ -87,6 +88,9 @@ func (s *Service) VerifyFinalizedConsistency(ctx context.Context, root []byte) e
|
||||
}
|
||||
|
||||
f := s.FinalizedCheckpt()
|
||||
if f == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
ss, err := slots.EpochStart(f.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -103,38 +107,87 @@ func (s *Service) VerifyFinalizedConsistency(ctx context.Context, root []byte) e
|
||||
}
|
||||
|
||||
// This routine processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
func (s *Service) processAttestationsRoutine(subscribedToStateEvents chan<- struct{}) {
|
||||
func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
|
||||
// Wait for state to be initialized.
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
subscribedToStateEvents <- struct{}{}
|
||||
<-stateChannel
|
||||
stateSub.Unsubscribe()
|
||||
|
||||
if s.genesisTime.IsZero() {
|
||||
log.Warn("ProcessAttestations routine waiting for genesis time")
|
||||
for s.genesisTime.IsZero() {
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
log.Warn("Genesis time received, now available to process attestations")
|
||||
}
|
||||
|
||||
st := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
|
||||
for {
|
||||
stateSub := stateFeed.Subscribe(stateChannel)
|
||||
go func() {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
stateSub.Unsubscribe()
|
||||
return
|
||||
case <-st.C():
|
||||
// Continue when there's no fork choice attestation, there's nothing to process and update head.
|
||||
// This covers the condition when the node is still initial syncing to the head of the chain.
|
||||
if s.cfg.AttPool.ForkchoiceAttestationCount() == 0 {
|
||||
continue
|
||||
case <-stateChannel:
|
||||
stateSub.Unsubscribe()
|
||||
break
|
||||
}
|
||||
|
||||
if s.genesisTime.IsZero() {
|
||||
log.Warn("ProcessAttestations routine waiting for genesis time")
|
||||
for s.genesisTime.IsZero() {
|
||||
if err := s.ctx.Err(); err != nil {
|
||||
log.WithError(err).Error("Giving up waiting for genesis time")
|
||||
return
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
s.processAttestations(s.ctx)
|
||||
if err := s.updateHead(s.ctx, s.getJustifiedBalances()); err != nil {
|
||||
log.Warnf("Resolving fork due to new attestation: %v", err)
|
||||
log.Warn("Genesis time received, now available to process attestations")
|
||||
}
|
||||
|
||||
st := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case <-st.C():
|
||||
if err := s.NewSlot(s.ctx, s.CurrentSlot()); err != nil {
|
||||
log.WithError(err).Error("Could not process new slot")
|
||||
return
|
||||
}
|
||||
|
||||
// Continue when there's no fork choice attestation, there's nothing to process and update head.
|
||||
// This covers the condition when the node is still initial syncing to the head of the chain.
|
||||
if s.cfg.AttPool.ForkchoiceAttestationCount() == 0 {
|
||||
continue
|
||||
}
|
||||
s.processAttestations(s.ctx)
|
||||
|
||||
justified := s.store.JustifiedCheckpt()
|
||||
if justified == nil {
|
||||
log.WithError(errNilJustifiedInStore).Error("Could not get justified checkpoint")
|
||||
continue
|
||||
}
|
||||
balances, err := s.justifiedBalances.get(s.ctx, bytesutil.ToBytes32(justified.Root))
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Unable to get justified balances for root %v", justified.Root)
|
||||
continue
|
||||
}
|
||||
prevHead := s.headRoot()
|
||||
if err := s.updateHead(s.ctx, balances); err != nil {
|
||||
log.WithError(err).Warn("Resolving fork due to new attestation")
|
||||
}
|
||||
s.notifyEngineIfChangedHead(prevHead)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// This calls notify Forkchoice Update in the event that the head has changed
|
||||
func (s *Service) notifyEngineIfChangedHead(prevHead [32]byte) {
|
||||
if s.headRoot() == prevHead {
|
||||
return
|
||||
}
|
||||
finalized := s.store.FinalizedCheckpt()
|
||||
if finalized == nil {
|
||||
log.WithError(errNilFinalizedInStore).Error("could not get finalized checkpoint")
|
||||
return
|
||||
}
|
||||
_, err := s.notifyForkchoiceUpdate(s.ctx,
|
||||
s.headBlock().Block(),
|
||||
s.headRoot(),
|
||||
bytesutil.ToBytes32(finalized.Root),
|
||||
)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not notify forkchoice update")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -9,9 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -42,22 +40,24 @@ func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) {
|
||||
|
||||
func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b32)))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b32)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b33 := util.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b33)))
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b33)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -71,22 +71,24 @@ func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
|
||||
|
||||
func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b32)))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b32)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b33 := util.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b33)))
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b33)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -101,18 +103,12 @@ func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
|
||||
func TestProcessAttestations_Ok(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
opts = append(opts, WithAttestationPool(attestations.NewPool()))
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
AttPool: attestations.NewPool(),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
service.cfg = cfg
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
@@ -123,9 +119,45 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, tRoot, 1, 1))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
|
||||
service.processAttestations(ctx)
|
||||
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations()))
|
||||
require.LogsDoNotContain(t, hook, "Could not process attestation for fork choice")
|
||||
}
|
||||
|
||||
func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
opts := testServiceOptsWithDB(t)
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.notifyEngineIfChangedHead(service.headRoot())
|
||||
hookErr := "could not notify forkchoice update"
|
||||
finalizedErr := "could not get finalized checkpoint"
|
||||
require.LogsDoNotContain(t, hook, finalizedErr)
|
||||
require.LogsDoNotContain(t, hook, hookErr)
|
||||
service.notifyEngineIfChangedHead([32]byte{'a'})
|
||||
require.LogsContain(t, hook, finalizedErr)
|
||||
|
||||
hook.Reset()
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
finalized := ðpb.Checkpoint{Root: r[:], Epoch: 0}
|
||||
service.head = &head{
|
||||
slot: 1,
|
||||
root: r,
|
||||
block: wsb,
|
||||
}
|
||||
service.store.SetFinalizedCheckpt(finalized)
|
||||
service.notifyEngineIfChangedHead([32]byte{'b'})
|
||||
require.LogsDoNotContain(t, hook, finalizedErr)
|
||||
require.LogsDoNotContain(t, hook, hookErr)
|
||||
}
|
||||
|
||||
@@ -53,14 +53,20 @@ func (s *Service) ReceiveBlock(ctx context.Context, block block.SignedBeaconBloc
|
||||
}
|
||||
|
||||
// Reports on block and fork choice metrics.
|
||||
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), s.finalizedCheckpt)
|
||||
finalized := s.store.FinalizedCheckpt()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
|
||||
|
||||
// Log block sync status.
|
||||
if err := logBlockSyncStatus(blockCopy.Block(), blockRoot, s.finalizedCheckpt, receivedTime, uint64(s.genesisTime.Unix())); err != nil {
|
||||
if err := logBlockSyncStatus(blockCopy.Block(), blockRoot, finalized, receivedTime, uint64(s.genesisTime.Unix())); err != nil {
|
||||
return err
|
||||
}
|
||||
// Log state transition data.
|
||||
logStateTransitionData(blockCopy.Block())
|
||||
if err := logStateTransitionData(blockCopy.Block()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -82,6 +88,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []block.SignedBe
|
||||
|
||||
for i, b := range blocks {
|
||||
blockCopy := b.Copy()
|
||||
// TODO(10261) check optimistic status
|
||||
if err = s.handleBlockAfterBatchVerify(ctx, blockCopy, blkRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
@@ -98,13 +105,21 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []block.SignedBe
|
||||
})
|
||||
|
||||
// Reports on blockCopy and fork choice metrics.
|
||||
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), s.finalizedCheckpt)
|
||||
finalized := s.store.FinalizedCheckpt()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
|
||||
}
|
||||
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.wsVerifier.VerifyWeakSubjectivity(s.ctx, s.finalizedCheckpt.Epoch); err != nil {
|
||||
finalized := s.store.FinalizedCheckpt()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
if err := s.wsVerifier.VerifyWeakSubjectivity(s.ctx, finalized.Epoch); err != nil {
|
||||
// log.Fatalf will prevent defer from being called
|
||||
span.End()
|
||||
// Exit run time if the node failed to verify weak subjectivity checkpoint.
|
||||
@@ -148,8 +163,12 @@ func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
|
||||
currentEpoch := slots.ToEpoch(s.CurrentSlot())
|
||||
// Prevent `sinceFinality` going underflow.
|
||||
var sinceFinality types.Epoch
|
||||
if currentEpoch > s.finalizedCheckpt.Epoch {
|
||||
sinceFinality = currentEpoch - s.finalizedCheckpt.Epoch
|
||||
finalized := s.store.FinalizedCheckpt()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
if currentEpoch > finalized.Epoch {
|
||||
sinceFinality = currentEpoch - finalized.Epoch
|
||||
}
|
||||
|
||||
if sinceFinality >= epochsSinceFinalitySaveHotStateDB {
|
||||
|
||||
@@ -33,6 +33,7 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
return blk
|
||||
}
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bc := params.BeaconConfig()
|
||||
bc.ShardCommitteePeriod = 0 // Required for voluntary exits test in reasonable time.
|
||||
params.OverrideBeaconConfig(bc)
|
||||
@@ -124,30 +125,28 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
genesisBlockRoot := bytesutil.ToBytes32(nil)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
genesisBlockRoot,
|
||||
),
|
||||
AttPool: attestations.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithFinalizedStateAtStartUp(genesis),
|
||||
}
|
||||
s, err := NewService(ctx)
|
||||
s, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = cfg
|
||||
require.NoError(t, s.saveGenesisData(ctx, genesis))
|
||||
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{Root: gRoot[:]}
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
root, err := tt.args.block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = s.ReceiveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(tt.args.block), root)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(tt.args.block)
|
||||
require.NoError(t, err)
|
||||
err = s.ReceiveBlock(ctx, wsb, root)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
} else {
|
||||
@@ -166,33 +165,31 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
genesisBlockRoot := bytesutil.ToBytes32(nil)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
genesisBlockRoot,
|
||||
),
|
||||
AttPool: attestations.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
s, err := NewService(ctx)
|
||||
|
||||
s, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = cfg
|
||||
require.NoError(t, s.saveGenesisData(ctx, genesis))
|
||||
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{Root: gRoot[:]}
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
require.NoError(t, s.ReceiveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b), root))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.ReceiveBlock(ctx, wsb, root))
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
@@ -200,7 +197,7 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
||||
}
|
||||
// Verify fork choice has processed the block. (Genesis block and the new block)
|
||||
assert.Equal(t, 2, len(s.cfg.ForkChoiceStore.Nodes()))
|
||||
assert.Equal(t, 2, s.cfg.ForkChoiceStore.NodeCount())
|
||||
}
|
||||
|
||||
func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
@@ -250,19 +247,14 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
genesisBlockRoot, err := genesis.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
genesisBlockRoot,
|
||||
),
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
s, err := NewService(ctx)
|
||||
s, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = cfg
|
||||
err = s.saveGenesisData(ctx, genesis)
|
||||
require.NoError(t, err)
|
||||
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
@@ -270,10 +262,12 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{Root: gRoot[:]}
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
root, err := tt.args.block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
blks := []block.SignedBeaconBlock{wrapper.WrappedPhase0SignedBeaconBlock(tt.args.block)}
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(tt.args.block)
|
||||
require.NoError(t, err)
|
||||
blks := []block.SignedBeaconBlock{wsb}
|
||||
roots := [][32]byte{root}
|
||||
err = s.ReceiveBlockBatch(ctx, blks, roots)
|
||||
if tt.wantedErr != "" {
|
||||
@@ -287,40 +281,41 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_HasInitSyncBlock(t *testing.T) {
|
||||
s, err := NewService(context.Background())
|
||||
opts := testServiceOptsNoDB()
|
||||
opts = append(opts, WithStateNotifier(&blockchainTesting.MockStateNotifier{}))
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = &config{StateNotifier: &blockchainTesting.MockStateNotifier{}}
|
||||
r := [32]byte{'a'}
|
||||
if s.HasInitSyncBlock(r) {
|
||||
t.Error("Should not have block")
|
||||
}
|
||||
s.saveInitSyncBlock(r, wrapper.WrappedPhase0SignedBeaconBlock(util.NewBeaconBlock()))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
s.saveInitSyncBlock(r, wsb)
|
||||
if !s.HasInitSyncBlock(r) {
|
||||
t.Error("Should have block")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := NewService(context.Background())
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = &config{StateGen: stategen.New(beaconDB)}
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{}
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{})
|
||||
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
assert.LogsContain(t, hook, "Entering mode to save hot states in DB")
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := NewService(context.Background())
|
||||
opts := testServiceOptsWithDB(t)
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = &config{StateGen: stategen.New(beaconDB)}
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{}
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{})
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
@@ -329,12 +324,11 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := NewService(context.Background())
|
||||
opts := testServiceOptsWithDB(t)
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = &config{StateGen: stategen.New(beaconDB)}
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{Epoch: 10000000}
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: 10000000})
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
@@ -12,6 +13,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/store"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
@@ -20,19 +22,23 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
f "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/time"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -45,26 +51,22 @@ const headSyncMinEpochsAfterCheckpoint = 128
|
||||
// Service represents a service that handles the internal
|
||||
// logic of managing the full PoS beacon chain.
|
||||
type Service struct {
|
||||
cfg *config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
genesisTime time.Time
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
genesisRoot [32]byte
|
||||
justifiedCheckpt *ethpb.Checkpoint
|
||||
prevJustifiedCheckpt *ethpb.Checkpoint
|
||||
bestJustifiedCheckpt *ethpb.Checkpoint
|
||||
finalizedCheckpt *ethpb.Checkpoint
|
||||
prevFinalizedCheckpt *ethpb.Checkpoint
|
||||
cfg *config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
genesisTime time.Time
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
// originBlockRoot is the genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
|
||||
originBlockRoot [32]byte
|
||||
nextEpochBoundarySlot types.Slot
|
||||
boundaryRoots [][32]byte
|
||||
checkpointStateCache *cache.CheckpointStateCache
|
||||
initSyncBlocks map[[32]byte]block.SignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
justifiedBalances []uint64
|
||||
justifiedBalancesLock sync.RWMutex
|
||||
justifiedBalances *stateBalanceCache
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
store *store.Store
|
||||
}
|
||||
|
||||
// config options for the service.
|
||||
@@ -84,7 +86,9 @@ type config struct {
|
||||
StateGen *stategen.State
|
||||
SlasherAttestationsFeed *event.Feed
|
||||
WeakSubjectivityCheckpt *ethpb.Checkpoint
|
||||
BlockFetcher powchain.POWBlockFetcher
|
||||
FinalizedStateAtStartUp state.BeaconState
|
||||
ExecutionEngineCaller enginev1.Caller
|
||||
}
|
||||
|
||||
// NewService instantiates a new block service instance that will
|
||||
@@ -97,8 +101,8 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
boundaryRoots: [][32]byte{},
|
||||
checkpointStateCache: cache.NewCheckpointStateCache(),
|
||||
initSyncBlocks: make(map[[32]byte]block.SignedBeaconBlock),
|
||||
justifiedBalances: make([]uint64, 0),
|
||||
cfg: &config{},
|
||||
store: &store.Store{},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if err := opt(srv); err != nil {
|
||||
@@ -106,6 +110,12 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
}
|
||||
}
|
||||
var err error
|
||||
if srv.justifiedBalances == nil {
|
||||
srv.justifiedBalances, err = newStateBalanceCache(srv.cfg.StateGen)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
srv.wsVerifier, err = NewWeakSubjectivityVerifier(srv.cfg.WeakSubjectivityCheckpt, srv.cfg.BeaconDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -115,182 +125,18 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
|
||||
// Start a blockchain service's main event loop.
|
||||
func (s *Service) Start() {
|
||||
beaconState := s.cfg.FinalizedStateAtStartUp
|
||||
saved := s.cfg.FinalizedStateAtStartUp
|
||||
|
||||
// Make sure that attestation processor is subscribed and ready for state initializing event.
|
||||
attestationProcessorSubscribed := make(chan struct{}, 1)
|
||||
|
||||
// If the chain has already been initialized, simply start the block processing routine.
|
||||
if beaconState != nil && !beaconState.IsNil() {
|
||||
log.Info("Blockchain data already exists in DB, initializing...")
|
||||
s.genesisTime = time.Unix(int64(beaconState.GenesisTime()), 0)
|
||||
s.cfg.AttService.SetGenesisTime(beaconState.GenesisTime())
|
||||
if err := s.initializeChainInfo(s.ctx); err != nil {
|
||||
log.Fatalf("Could not set up chain info: %v", err)
|
||||
if saved != nil && !saved.IsNil() {
|
||||
if err := s.StartFromSavedState(saved); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// We start a counter to genesis, if needed.
|
||||
gState, err := s.cfg.BeaconDB.GenesisState(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not retrieve genesis state: %v", err)
|
||||
}
|
||||
gRoot, err := gState.HashTreeRoot(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not hash tree root genesis state: %v", err)
|
||||
}
|
||||
go slots.CountdownToGenesis(s.ctx, s.genesisTime, uint64(gState.NumValidators()), gRoot)
|
||||
|
||||
justifiedCheckpoint, err := s.cfg.BeaconDB.JustifiedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not get justified checkpoint: %v", err)
|
||||
}
|
||||
finalizedCheckpoint, err := s.cfg.BeaconDB.FinalizedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not get finalized checkpoint: %v", err)
|
||||
}
|
||||
|
||||
// Resume fork choice.
|
||||
s.justifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint)
|
||||
if err := s.cacheJustifiedStateBalances(s.ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(s.justifiedCheckpt.Root))); err != nil {
|
||||
log.Fatalf("Could not cache justified state balances: %v", err)
|
||||
}
|
||||
s.prevJustifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint)
|
||||
s.bestJustifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint)
|
||||
s.finalizedCheckpt = ethpb.CopyCheckpoint(finalizedCheckpoint)
|
||||
s.prevFinalizedCheckpt = ethpb.CopyCheckpoint(finalizedCheckpoint)
|
||||
s.resumeForkChoice(justifiedCheckpoint, finalizedCheckpoint)
|
||||
|
||||
ss, err := slots.EpochStart(s.finalizedCheckpt.Epoch)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not get start slot of finalized epoch: %v", err)
|
||||
}
|
||||
h := s.headBlock().Block()
|
||||
if h.Slot() > ss {
|
||||
log.WithFields(logrus.Fields{
|
||||
"startSlot": ss,
|
||||
"endSlot": h.Slot(),
|
||||
}).Info("Loading blocks to fork choice store, this may take a while.")
|
||||
if err := s.fillInForkChoiceMissingBlocks(s.ctx, h, s.finalizedCheckpt, s.justifiedCheckpt); err != nil {
|
||||
log.Fatalf("Could not fill in fork choice store missing blocks: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// not attempting to save initial sync blocks here, because there shouldn't be until
|
||||
// after the statefeed.Initialized event is fired (below)
|
||||
if err := s.wsVerifier.VerifyWeakSubjectivity(s.ctx, s.finalizedCheckpt.Epoch); err != nil {
|
||||
// Exit run time if the node failed to verify weak subjectivity checkpoint.
|
||||
log.Fatalf("could not verify initial checkpoint provided for chain sync, with err=: %v", err)
|
||||
}
|
||||
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: s.genesisTime,
|
||||
GenesisValidatorsRoot: beaconState.GenesisValidatorRoot(),
|
||||
},
|
||||
})
|
||||
} else {
|
||||
log.Info("Waiting to reach the validator deposit threshold to start the beacon chain...")
|
||||
if s.cfg.ChainStartFetcher == nil {
|
||||
log.Fatal("Not configured web3Service for POW chain")
|
||||
return // return need for TestStartUninitializedChainWithoutConfigPOWChain.
|
||||
if err := s.startFromPOWChain(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
go func() {
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
<-attestationProcessorSubscribed
|
||||
for {
|
||||
select {
|
||||
case event := <-stateChannel:
|
||||
if event.Type == statefeed.ChainStarted {
|
||||
data, ok := event.Data.(*statefeed.ChainStartedData)
|
||||
if !ok {
|
||||
log.Error("event data is not type *statefeed.ChainStartedData")
|
||||
return
|
||||
}
|
||||
log.WithField("starttime", data.StartTime).Debug("Received chain start event")
|
||||
s.processChainStartTime(s.ctx, data.StartTime)
|
||||
return
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("Subscription to state notifier failed")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
go s.processAttestationsRoutine(attestationProcessorSubscribed)
|
||||
}
|
||||
|
||||
// processChainStartTime initializes a series of deposits from the ChainStart deposits in the eth1
|
||||
// deposit contract, initializes the beacon chain's state, and kicks off the beacon chain.
|
||||
func (s *Service) processChainStartTime(ctx context.Context, genesisTime time.Time) {
|
||||
preGenesisState := s.cfg.ChainStartFetcher.PreGenesisState()
|
||||
initializedState, err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.cfg.ChainStartFetcher.ChainStartEth1Data())
|
||||
if err != nil {
|
||||
log.Fatalf("Could not initialize beacon chain: %v", err)
|
||||
}
|
||||
// We start a counter to genesis, if needed.
|
||||
gRoot, err := initializedState.HashTreeRoot(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not hash tree root genesis state: %v", err)
|
||||
}
|
||||
go slots.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()), gRoot)
|
||||
|
||||
// We send out a state initialized event to the rest of the services
|
||||
// running in the beacon node.
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: genesisTime,
|
||||
GenesisValidatorsRoot: initializedState.GenesisValidatorRoot(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// initializes the state and genesis block of the beacon chain to persistent storage
|
||||
// based on a genesis timestamp value obtained from the ChainStart event emitted
|
||||
// by the ETH1.0 Deposit Contract and the POWChain service of the node.
|
||||
func (s *Service) initializeBeaconChain(
|
||||
ctx context.Context,
|
||||
genesisTime time.Time,
|
||||
preGenesisState state.BeaconState,
|
||||
eth1data *ethpb.Eth1Data) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.Service.initializeBeaconChain")
|
||||
defer span.End()
|
||||
s.genesisTime = genesisTime
|
||||
unixTime := uint64(genesisTime.Unix())
|
||||
|
||||
genesisState, err := transition.OptimizedGenesisBeaconState(unixTime, preGenesisState, eth1data)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not initialize genesis state")
|
||||
}
|
||||
|
||||
if err := s.saveGenesisData(ctx, genesisState); err != nil {
|
||||
return nil, errors.Wrap(err, "could not save genesis data")
|
||||
}
|
||||
|
||||
log.Info("Initialized beacon chain genesis state")
|
||||
|
||||
// Clear out all pre-genesis data now that the state is initialized.
|
||||
s.cfg.ChainStartFetcher.ClearPreGenesisData()
|
||||
|
||||
// Update committee shuffled indices for genesis epoch.
|
||||
if err := helpers.UpdateCommitteeCache(genesisState, 0 /* genesis epoch */); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(ctx, genesisState); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.cfg.AttService.SetGenesisTime(genesisState.GenesisTime())
|
||||
|
||||
return genesisState, nil
|
||||
s.spawnProcessAttestationsRoutine(s.cfg.StateNotifier.StateFeed())
|
||||
}
|
||||
|
||||
// Stop the blockchain service's main event loop and associated goroutines.
|
||||
@@ -310,7 +156,7 @@ func (s *Service) Stop() error {
|
||||
// Status always returns nil unless there is an error condition that causes
|
||||
// this service to be unhealthy.
|
||||
func (s *Service) Status() error {
|
||||
if s.genesisRoot == params.BeaconConfig().ZeroHash {
|
||||
if s.originBlockRoot == params.BeaconConfig().ZeroHash {
|
||||
return errors.New("genesis state has not been created")
|
||||
}
|
||||
if runtime.NumGoroutine() > s.cfg.MaxRoutines {
|
||||
@@ -319,64 +165,126 @@ func (s *Service) Status() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// This gets called when beacon chain is first initialized to save genesis data (state, block, and more) in db.
|
||||
func (s *Service) saveGenesisData(ctx context.Context, genesisState state.BeaconState) error {
|
||||
if err := s.cfg.BeaconDB.SaveGenesisData(ctx, genesisState); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis data")
|
||||
}
|
||||
genesisBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
if err != nil || genesisBlk == nil || genesisBlk.IsNil() {
|
||||
return fmt.Errorf("could not load genesis block: %v", err)
|
||||
}
|
||||
genesisBlkRoot, err := genesisBlk.Block().HashTreeRoot()
|
||||
func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
log.Info("Blockchain data already exists in DB, initializing...")
|
||||
s.genesisTime = time.Unix(int64(saved.GenesisTime()), 0) // lint:ignore uintcast -- Genesis time will not exceed int64 in your lifetime.
|
||||
s.cfg.AttService.SetGenesisTime(saved.GenesisTime())
|
||||
|
||||
originRoot, err := s.originRootFromSavedState(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get genesis block root")
|
||||
}
|
||||
|
||||
s.genesisRoot = genesisBlkRoot
|
||||
s.cfg.StateGen.SaveFinalizedState(0 /*slot*/, genesisBlkRoot, genesisState)
|
||||
|
||||
// Finalized checkpoint at genesis is a zero hash.
|
||||
genesisCheckpoint := genesisState.FinalizedCheckpoint()
|
||||
|
||||
s.justifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
|
||||
if err := s.cacheJustifiedStateBalances(ctx, genesisBlkRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
s.prevJustifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
|
||||
s.bestJustifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
|
||||
s.finalizedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
|
||||
s.prevFinalizedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
|
||||
s.originBlockRoot = originRoot
|
||||
|
||||
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
|
||||
genesisBlk.Block().Slot(),
|
||||
genesisBlkRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
[32]byte{},
|
||||
genesisCheckpoint.Epoch,
|
||||
genesisCheckpoint.Epoch); err != nil {
|
||||
log.Fatalf("Could not process genesis block for fork choice: %v", err)
|
||||
if err := s.initializeHeadFromDB(s.ctx); err != nil {
|
||||
return errors.Wrap(err, "could not set up chain info")
|
||||
}
|
||||
spawnCountdownIfPreGenesis(s.ctx, s.genesisTime, s.cfg.BeaconDB)
|
||||
|
||||
justified, err := s.cfg.BeaconDB.JustifiedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get justified checkpoint")
|
||||
}
|
||||
finalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint")
|
||||
}
|
||||
s.store = store.New(justified, finalized)
|
||||
|
||||
var store f.ForkChoicer
|
||||
fRoot := bytesutil.ToBytes32(finalized.Root)
|
||||
if features.Get().EnableForkChoiceDoublyLinkedTree {
|
||||
store = doublylinkedtree.New(justified.Epoch, finalized.Epoch)
|
||||
} else {
|
||||
store = protoarray.New(justified.Epoch, finalized.Epoch, fRoot)
|
||||
}
|
||||
s.cfg.ForkChoiceStore = store
|
||||
fb, err := s.cfg.BeaconDB.Block(s.ctx, s.ensureRootNotZeros(fRoot))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint block")
|
||||
}
|
||||
if fb == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
payloadHash, err := getBlockPayloadHash(fb.Block())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get execution payload hash")
|
||||
}
|
||||
fSlot := fb.Block().Slot()
|
||||
if err := store.InsertOptimisticBlock(s.ctx, fSlot, fRoot, params.BeaconConfig().ZeroHash,
|
||||
payloadHash, justified.Epoch, finalized.Epoch); err != nil {
|
||||
return errors.Wrap(err, "could not insert finalized block to forkchoice")
|
||||
}
|
||||
|
||||
s.setHead(genesisBlkRoot, genesisBlk, genesisState)
|
||||
lastValidatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get last validated checkpoint")
|
||||
}
|
||||
if bytes.Equal(finalized.Root, lastValidatedCheckpoint.Root) {
|
||||
if err := store.SetOptimisticToValid(s.ctx, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not set finalized block as validated")
|
||||
}
|
||||
}
|
||||
|
||||
h := s.headBlock().Block()
|
||||
if h.Slot() > fSlot {
|
||||
log.WithFields(logrus.Fields{
|
||||
"startSlot": fSlot,
|
||||
"endSlot": h.Slot(),
|
||||
}).Info("Loading blocks to fork choice store, this may take a while.")
|
||||
if err := s.fillInForkChoiceMissingBlocks(s.ctx, h, finalized, justified); err != nil {
|
||||
return errors.Wrap(err, "could not fill in fork choice store missing blocks")
|
||||
}
|
||||
}
|
||||
|
||||
// not attempting to save initial sync blocks here, because there shouldn't be any until
|
||||
// after the statefeed.Initialized event is fired (below)
|
||||
if err := s.wsVerifier.VerifyWeakSubjectivity(s.ctx, finalized.Epoch); err != nil {
|
||||
// Exit run time if the node failed to verify weak subjectivity checkpoint.
|
||||
return errors.Wrap(err, "could not verify initial checkpoint provided for chain sync")
|
||||
}
|
||||
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: s.genesisTime,
|
||||
GenesisValidatorsRoot: saved.GenesisValidatorsRoot(),
|
||||
},
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This gets called to initialize chain info variables using the finalized checkpoint stored in DB
|
||||
func (s *Service) initializeChainInfo(ctx context.Context) error {
|
||||
func (s *Service) originRootFromSavedState(ctx context.Context) ([32]byte, error) {
|
||||
// first check if we have started from checkpoint sync and have a root
|
||||
originRoot, err := s.cfg.BeaconDB.OriginCheckpointBlockRoot(ctx)
|
||||
if err == nil {
|
||||
return originRoot, nil
|
||||
}
|
||||
if !errors.Is(err, db.ErrNotFound) {
|
||||
return originRoot, errors.Wrap(err, "could not retrieve checkpoint sync chain origin data from db")
|
||||
}
|
||||
|
||||
// we got here because OriginCheckpointBlockRoot gave us an ErrNotFound. this means the node was started from a genesis state,
|
||||
// so we should have a value for GenesisBlock
|
||||
genesisBlock, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get genesis block from db")
|
||||
return originRoot, errors.Wrap(err, "could not get genesis block from db")
|
||||
}
|
||||
if err := helpers.BeaconBlockIsNil(genesisBlock); err != nil {
|
||||
return err
|
||||
return originRoot, err
|
||||
}
|
||||
genesisBlkRoot, err := genesisBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get signing root of genesis block")
|
||||
return genesisBlkRoot, errors.Wrap(err, "could not get signing root of genesis block")
|
||||
}
|
||||
s.genesisRoot = genesisBlkRoot
|
||||
return genesisBlkRoot, nil
|
||||
}
|
||||
|
||||
// initializeHeadFromDB uses the finalized checkpoint and head block found in the database to set the current head
|
||||
// note that this may block until stategen replays blocks between the finalized and head blocks
|
||||
// if the head sync flag was specified and the gap between the finalized and head blocks is at least 128 epochs long
|
||||
func (s *Service) initializeHeadFromDB(ctx context.Context) error {
|
||||
finalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint from db")
|
||||
@@ -443,11 +351,147 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// This is called when a client starts from non-genesis slot. This passes last justified and finalized
|
||||
// information to fork choice service to initializes fork choice store.
|
||||
func (s *Service) resumeForkChoice(justifiedCheckpoint, finalizedCheckpoint *ethpb.Checkpoint) {
|
||||
store := protoarray.New(justifiedCheckpoint.Epoch, finalizedCheckpoint.Epoch, bytesutil.ToBytes32(finalizedCheckpoint.Root))
|
||||
s.cfg.ForkChoiceStore = store
|
||||
func (s *Service) startFromPOWChain() error {
|
||||
log.Info("Waiting to reach the validator deposit threshold to start the beacon chain...")
|
||||
if s.cfg.ChainStartFetcher == nil {
|
||||
return errors.New("not configured web3Service for POW chain")
|
||||
}
|
||||
go func() {
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case event := <-stateChannel:
|
||||
if event.Type == statefeed.ChainStarted {
|
||||
data, ok := event.Data.(*statefeed.ChainStartedData)
|
||||
if !ok {
|
||||
log.Error("event data is not type *statefeed.ChainStartedData")
|
||||
return
|
||||
}
|
||||
log.WithField("starttime", data.StartTime).Debug("Received chain start event")
|
||||
s.onPowchainStart(s.ctx, data.StartTime)
|
||||
return
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("Subscription to state notifier failed")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// onPowchainStart initializes a series of deposits from the ChainStart deposits in the eth1
|
||||
// deposit contract, initializes the beacon chain's state, and kicks off the beacon chain.
|
||||
func (s *Service) onPowchainStart(ctx context.Context, genesisTime time.Time) {
|
||||
preGenesisState := s.cfg.ChainStartFetcher.PreGenesisState()
|
||||
initializedState, err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.cfg.ChainStartFetcher.ChainStartEth1Data())
|
||||
if err != nil {
|
||||
log.Fatalf("Could not initialize beacon chain: %v", err)
|
||||
}
|
||||
// We start a counter to genesis, if needed.
|
||||
gRoot, err := initializedState.HashTreeRoot(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not hash tree root genesis state: %v", err)
|
||||
}
|
||||
go slots.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()), gRoot)
|
||||
|
||||
// We send out a state initialized event to the rest of the services
|
||||
// running in the beacon node.
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: genesisTime,
|
||||
GenesisValidatorsRoot: initializedState.GenesisValidatorsRoot(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// initializes the state and genesis block of the beacon chain to persistent storage
|
||||
// based on a genesis timestamp value obtained from the ChainStart event emitted
|
||||
// by the ETH1.0 Deposit Contract and the POWChain service of the node.
|
||||
func (s *Service) initializeBeaconChain(
|
||||
ctx context.Context,
|
||||
genesisTime time.Time,
|
||||
preGenesisState state.BeaconState,
|
||||
eth1data *ethpb.Eth1Data) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.Service.initializeBeaconChain")
|
||||
defer span.End()
|
||||
s.genesisTime = genesisTime
|
||||
unixTime := uint64(genesisTime.Unix())
|
||||
|
||||
genesisState, err := transition.OptimizedGenesisBeaconState(unixTime, preGenesisState, eth1data)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not initialize genesis state")
|
||||
}
|
||||
|
||||
if err := s.saveGenesisData(ctx, genesisState); err != nil {
|
||||
return nil, errors.Wrap(err, "could not save genesis data")
|
||||
}
|
||||
|
||||
log.Info("Initialized beacon chain genesis state")
|
||||
|
||||
// Clear out all pre-genesis data now that the state is initialized.
|
||||
s.cfg.ChainStartFetcher.ClearPreGenesisData()
|
||||
|
||||
// Update committee shuffled indices for genesis epoch.
|
||||
if err := helpers.UpdateCommitteeCache(genesisState, 0 /* genesis epoch */); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(ctx, genesisState); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.cfg.AttService.SetGenesisTime(genesisState.GenesisTime())
|
||||
|
||||
return genesisState, nil
|
||||
}
|
||||
|
||||
// This gets called when beacon chain is first initialized to save genesis data (state, block, and more) in db.
|
||||
func (s *Service) saveGenesisData(ctx context.Context, genesisState state.BeaconState) error {
|
||||
if err := s.cfg.BeaconDB.SaveGenesisData(ctx, genesisState); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis data")
|
||||
}
|
||||
genesisBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
if err != nil || genesisBlk == nil || genesisBlk.IsNil() {
|
||||
return fmt.Errorf("could not load genesis block: %v", err)
|
||||
}
|
||||
genesisBlkRoot, err := genesisBlk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get genesis block root")
|
||||
}
|
||||
|
||||
s.originBlockRoot = genesisBlkRoot
|
||||
s.cfg.StateGen.SaveFinalizedState(0 /*slot*/, genesisBlkRoot, genesisState)
|
||||
|
||||
// Finalized checkpoint at genesis is a zero hash.
|
||||
genesisCheckpoint := genesisState.FinalizedCheckpoint()
|
||||
s.store = store.New(genesisCheckpoint, genesisCheckpoint)
|
||||
|
||||
payloadHash, err := getBlockPayloadHash(genesisBlk.Block())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx,
|
||||
genesisBlk.Block().Slot(),
|
||||
genesisBlkRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
payloadHash,
|
||||
genesisCheckpoint.Epoch,
|
||||
genesisCheckpoint.Epoch); err != nil {
|
||||
log.Fatalf("Could not process genesis block for fork choice: %v", err)
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, genesisBlkRoot); err != nil {
|
||||
log.Fatalf("Could not set optimistic status of genesis block to false: %v", err)
|
||||
}
|
||||
|
||||
s.setHead(genesisBlkRoot, genesisBlk, genesisState)
|
||||
return nil
|
||||
}
|
||||
|
||||
// This returns true if block has been processed before. Two ways to verify the block has been processed:
|
||||
@@ -461,3 +505,20 @@ func (s *Service) hasBlock(ctx context.Context, root [32]byte) bool {
|
||||
|
||||
return s.cfg.BeaconDB.HasBlock(ctx, root)
|
||||
}
|
||||
|
||||
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
|
||||
currentTime := prysmTime.Now()
|
||||
if currentTime.After(genesisTime) {
|
||||
return
|
||||
}
|
||||
|
||||
gState, err := db.GenesisState(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not retrieve genesis state: %v", err)
|
||||
}
|
||||
gRoot, err := gState.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not hash tree root genesis state: %v", err)
|
||||
}
|
||||
go slots.CountdownToGenesis(ctx, genesisTime, uint64(gState.NumValidators()), gRoot)
|
||||
}
|
||||
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/store"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
@@ -17,6 +19,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
@@ -108,25 +111,24 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &config{
|
||||
BeaconBlockBuf: 0,
|
||||
BeaconDB: beaconDB,
|
||||
DepositCache: depositCache,
|
||||
ChainStartFetcher: web3Service,
|
||||
P2p: &mockBroadcaster{},
|
||||
StateNotifier: &mockBeaconNode{},
|
||||
AttPool: attestations.NewPool(),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, params.BeaconConfig().ZeroHash),
|
||||
AttService: attService,
|
||||
stateGen := stategen.New(beaconDB)
|
||||
// Safe a state in stategen to purposes of testing a service stop / shutdown.
|
||||
require.NoError(t, stateGen.SaveState(ctx, bytesutil.ToBytes32(bState.FinalizedCheckpoint().Root), bState))
|
||||
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithDepositCache(depositCache),
|
||||
WithChainStartFetcher(web3Service),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithP2PBroadcaster(&mockBroadcaster{}),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, params.BeaconConfig().ZeroHash)),
|
||||
WithAttestationService(attService),
|
||||
WithStateGen(stateGen),
|
||||
}
|
||||
|
||||
// Safe a state in stategen to purposes of testing a service stop / shutdown.
|
||||
require.NoError(t, cfg.StateGen.SaveState(ctx, bytesutil.ToBytes32(bState.FinalizedCheckpoint().Root), bState))
|
||||
|
||||
chainService, err := NewService(ctx)
|
||||
chainService, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err, "Unable to setup chain service")
|
||||
chainService.cfg = cfg
|
||||
chainService.genesisTime = time.Unix(1, 0) // non-zero time
|
||||
|
||||
return chainService
|
||||
@@ -142,7 +144,9 @@ func TestChainStartStop_Initialized(t *testing.T) {
|
||||
genesisBlk := util.NewBeaconBlock()
|
||||
blkRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlk)))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(genesisBlk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetSlot(1))
|
||||
@@ -151,6 +155,11 @@ func TestChainStartStop_Initialized(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{Root: blkRoot[:]}))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: blkRoot[:]}))
|
||||
ss := ðpb.StateSummary{
|
||||
Slot: 1,
|
||||
Root: blkRoot[:],
|
||||
}
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, ss))
|
||||
chainService.cfg.FinalizedStateAtStartUp = s
|
||||
// Test the start function.
|
||||
chainService.Start()
|
||||
@@ -172,12 +181,16 @@ func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
|
||||
genesisBlk := util.NewBeaconBlock()
|
||||
blkRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlk)))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(genesisBlk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, beaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: blkRoot[:]}))
|
||||
chainService.cfg.FinalizedStateAtStartUp = s
|
||||
// Test the start function.
|
||||
chainService.Start()
|
||||
@@ -241,7 +254,9 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
|
||||
genesisBlk := util.NewBeaconBlock()
|
||||
blkRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlk)))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(genesisBlk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetSlot(0))
|
||||
@@ -253,8 +268,8 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
|
||||
// Test the start function.
|
||||
chainService.Start()
|
||||
|
||||
require.DeepEqual(t, blkRoot[:], chainService.finalizedCheckpt.Root, "Finalize Checkpoint root is incorrect")
|
||||
require.DeepEqual(t, params.BeaconConfig().ZeroHash[:], chainService.justifiedCheckpt.Root, "Justified Checkpoint root is incorrect")
|
||||
require.DeepEqual(t, blkRoot[:], chainService.store.FinalizedCheckpt().Root, "Finalize Checkpoint root is incorrect")
|
||||
require.DeepEqual(t, params.BeaconConfig().ZeroHash[:], chainService.store.JustifiedCheckpt().Root, "Justified Checkpoint root is incorrect")
|
||||
|
||||
require.NoError(t, chainService.Stop(), "Unable to stop chain service")
|
||||
|
||||
@@ -268,7 +283,9 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
|
||||
headBlock := util.NewBeaconBlock()
|
||||
@@ -277,16 +294,22 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(finalizedSlot))
|
||||
require.NoError(t, headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(headBlock)))
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(headBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}}
|
||||
c.cfg.FinalizedStateAtStartUp = headState
|
||||
require.NoError(t, c.initializeChainInfo(ctx))
|
||||
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
|
||||
require.NoError(t, err)
|
||||
stateGen := stategen.New(beaconDB)
|
||||
c, err := NewService(ctx, WithDatabase(beaconDB), WithStateGen(stateGen), WithAttestationService(attSrv), WithStateNotifier(&mock.MockStateNotifier{}), WithFinalizedStateAtStartUp(headState))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
|
||||
require.NoError(t, c.StartFromSavedState(headState))
|
||||
headBlk, err := c.HeadBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, headBlock, headBlk.Proto(), "Head block incorrect")
|
||||
@@ -299,7 +322,7 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
if !bytes.Equal(headRoot[:], r) {
|
||||
t.Error("head slot incorrect")
|
||||
}
|
||||
assert.Equal(t, genesisRoot, c.genesisRoot, "Genesis block root incorrect")
|
||||
assert.Equal(t, genesisRoot, c.originBlockRoot, "Genesis block root incorrect")
|
||||
}
|
||||
|
||||
func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
|
||||
@@ -310,7 +333,9 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
|
||||
headBlock := util.NewBeaconBlock()
|
||||
@@ -319,19 +344,32 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(finalizedSlot))
|
||||
require.NoError(t, headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(headBlock)))
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}}
|
||||
require.NoError(t, c.initializeChainInfo(ctx))
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(headBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
|
||||
require.NoError(t, err)
|
||||
ss := ðpb.StateSummary{
|
||||
Slot: finalizedSlot,
|
||||
Root: headRoot[:],
|
||||
}
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, ss))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: headRoot[:], Epoch: slots.ToEpoch(finalizedSlot)}))
|
||||
stateGen := stategen.New(beaconDB)
|
||||
c, err := NewService(ctx, WithDatabase(beaconDB), WithStateGen(stateGen), WithAttestationService(attSrv), WithStateNotifier(&mock.MockStateNotifier{}), WithFinalizedStateAtStartUp(headState))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, c.StartFromSavedState(headState))
|
||||
s, err := c.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
|
||||
assert.Equal(t, genesisRoot, c.genesisRoot, "Genesis block root incorrect")
|
||||
assert.DeepEqual(t, genesis, c.head.block.Proto())
|
||||
assert.Equal(t, genesisRoot, c.originBlockRoot, "Genesis block root incorrect")
|
||||
assert.DeepEqual(t, headBlock, c.head.block.Proto())
|
||||
}
|
||||
|
||||
func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
|
||||
@@ -352,14 +390,18 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
|
||||
genesisRoot, err := genesisBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlock)))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(genesisBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
|
||||
finalizedBlock := util.NewBeaconBlock()
|
||||
finalizedBlock.Block.Slot = finalizedSlot
|
||||
finalizedBlock.Block.ParentRoot = genesisRoot[:]
|
||||
finalizedRoot, err := finalizedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(finalizedBlock)))
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(finalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
|
||||
// Set head slot close to the finalization point, no head sync is triggered.
|
||||
headBlock := util.NewBeaconBlock()
|
||||
@@ -367,28 +409,32 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
|
||||
headBlock.Block.ParentRoot = finalizedRoot[:]
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(headBlock)))
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(headBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(headBlock.Block.Slot))
|
||||
require.NoError(t, headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, finalizedRoot))
|
||||
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, finalizedRoot))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, headRoot))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{
|
||||
Epoch: slots.ToEpoch(finalizedBlock.Block.Slot),
|
||||
Root: finalizedRoot[:],
|
||||
}))
|
||||
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}}
|
||||
c.cfg.FinalizedStateAtStartUp = headState
|
||||
require.NoError(t, c.initializeChainInfo(ctx))
|
||||
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
|
||||
require.NoError(t, err)
|
||||
stateGen := stategen.New(beaconDB)
|
||||
c, err := NewService(ctx, WithDatabase(beaconDB), WithStateGen(stateGen), WithAttestationService(attSrv), WithStateNotifier(&mock.MockStateNotifier{}), WithFinalizedStateAtStartUp(headState))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.StartFromSavedState(headState))
|
||||
s, err := c.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
|
||||
assert.Equal(t, genesisRoot, c.genesisRoot, "Genesis block root incorrect")
|
||||
assert.Equal(t, genesisRoot, c.originBlockRoot, "Genesis block root incorrect")
|
||||
// Since head sync is not triggered, chain is initialized to the last finalization checkpoint.
|
||||
assert.DeepEqual(t, finalizedBlock, c.head.block.Proto())
|
||||
assert.LogsContain(t, hook, "resetting head from the checkpoint ('--head-sync' flag is ignored)")
|
||||
@@ -400,16 +446,18 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
|
||||
headBlock.Block.ParentRoot = finalizedRoot[:]
|
||||
headRoot, err = headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(headBlock)))
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(headBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, headRoot))
|
||||
|
||||
hook.Reset()
|
||||
require.NoError(t, c.initializeChainInfo(ctx))
|
||||
require.NoError(t, c.initializeHeadFromDB(ctx))
|
||||
s, err = c.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
|
||||
assert.Equal(t, genesisRoot, c.genesisRoot, "Genesis block root incorrect")
|
||||
assert.Equal(t, genesisRoot, c.originBlockRoot, "Genesis block root incorrect")
|
||||
// Head slot is far beyond the latest finalized checkpoint, head sync is triggered.
|
||||
assert.DeepEqual(t, headBlock, c.head.block.Proto())
|
||||
assert.LogsContain(t, hook, "Regenerating state from the last checkpoint at slot 225")
|
||||
@@ -429,7 +477,9 @@ func TestChainService_SaveHeadNoDB(t *testing.T) {
|
||||
newState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.StateGen.SaveState(ctx, r, newState))
|
||||
require.NoError(t, s.saveHeadNoDB(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk), r, newState))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.saveHeadNoDB(ctx, wsb, r, newState))
|
||||
|
||||
newB, err := s.cfg.BeaconDB.HeadBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -438,19 +488,43 @@ func TestChainService_SaveHeadNoDB(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasBlock_ForkChoiceAndDB(t *testing.T) {
|
||||
func TestHasBlock_ForkChoiceAndDB_ProtoArray(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
|
||||
finalizedCheckpt: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
|
||||
store: &store.Store{},
|
||||
}
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
|
||||
block := util.NewBeaconBlock()
|
||||
r, err := block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wsb.Block(), r, beaconState))
|
||||
|
||||
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
|
||||
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
|
||||
}
|
||||
|
||||
func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0), BeaconDB: beaconDB},
|
||||
store: &store.Store{},
|
||||
}
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
|
||||
block := util.NewBeaconBlock()
|
||||
r, err := block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wsb.Block(), r, beaconState))
|
||||
|
||||
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
|
||||
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
|
||||
@@ -468,7 +542,9 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
|
||||
block := util.NewBeaconBlock()
|
||||
r, err := block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.saveInitSyncBlock(r, wrapper.WrappedPhase0SignedBeaconBlock(block))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
s.saveInitSyncBlock(r, wsb)
|
||||
require.NoError(t, s.Stop())
|
||||
require.Equal(t, true, s.cfg.BeaconDB.HasBlock(ctx, r))
|
||||
}
|
||||
@@ -479,7 +555,7 @@ func TestProcessChainStartTime_ReceivedFeed(t *testing.T) {
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := service.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
service.processChainStartTime(context.Background(), time.Now())
|
||||
service.onPowchainStart(context.Background(), time.Now())
|
||||
|
||||
stateEvent := <-stateChannel
|
||||
require.Equal(t, int(stateEvent.Type), statefeed.Initialized)
|
||||
@@ -494,7 +570,9 @@ func BenchmarkHasBlockDB(b *testing.B) {
|
||||
cfg: &config{BeaconDB: beaconDB},
|
||||
}
|
||||
block := util.NewBeaconBlock()
|
||||
require.NoError(b, s.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block)))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
r, err := block.Block.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
|
||||
@@ -504,20 +582,46 @@ func BenchmarkHasBlockDB(b *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkHasBlockForkChoiceStore(b *testing.B) {
|
||||
func BenchmarkHasBlockForkChoiceStore_ProtoArray(b *testing.B) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(b)
|
||||
s := &Service{
|
||||
cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
|
||||
finalizedCheckpt: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
|
||||
store: &store.Store{},
|
||||
}
|
||||
block := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}}
|
||||
r, err := block.Block.HashTreeRoot()
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
|
||||
blk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}}
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
bs := ðpb.BeaconState{FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}}
|
||||
beaconState, err := v1.InitializeFromProto(bs)
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wsb.Block(), r, beaconState))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
require.Equal(b, true, s.cfg.ForkChoiceStore.HasNode(r), "Block is not in fork choice store")
|
||||
}
|
||||
}
|
||||
func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(b)
|
||||
s := &Service{
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0), BeaconDB: beaconDB},
|
||||
store: &store.Store{},
|
||||
}
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
|
||||
block := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}}
|
||||
r, err := block.Block.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
bs := ðpb.BeaconState{FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}}
|
||||
beaconState, err := v1.InitializeFromProto(bs)
|
||||
require.NoError(b, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wsb.Block(), r, beaconState))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
83
beacon-chain/blockchain/state_balance_cache.go
Normal file
83
beacon-chain/blockchain/state_balance_cache.go
Normal file
@@ -0,0 +1,83 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
)
|
||||
|
||||
var errNilStateFromStategen = errors.New("justified state can't be nil")
|
||||
|
||||
type stateBalanceCache struct {
|
||||
sync.Mutex
|
||||
balances []uint64
|
||||
root [32]byte
|
||||
stateGen stateByRooter
|
||||
}
|
||||
|
||||
type stateByRooter interface {
|
||||
StateByRoot(context.Context, [32]byte) (state.BeaconState, error)
|
||||
}
|
||||
|
||||
// newStateBalanceCache exists to remind us that stateBalanceCache needs a stagegen
|
||||
// to avoid nil pointer bugs when updating the cache in the read path (get())
|
||||
func newStateBalanceCache(sg *stategen.State) (*stateBalanceCache, error) {
|
||||
if sg == nil {
|
||||
return nil, errors.New("can't initialize state balance cache without stategen")
|
||||
}
|
||||
return &stateBalanceCache{stateGen: sg}, nil
|
||||
}
|
||||
|
||||
// update is called by get() when the requested root doesn't match
|
||||
// the previously read value. This cache assumes we only want to cache one
|
||||
// set of balances for a single root (the current justified root).
|
||||
//
|
||||
// warning: this is not thread-safe on its own, relies on get() for locking
|
||||
func (c *stateBalanceCache) update(ctx context.Context, justifiedRoot [32]byte) ([]uint64, error) {
|
||||
stateBalanceCacheMiss.Inc()
|
||||
justifiedState, err := c.stateGen.StateByRoot(ctx, justifiedRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if justifiedState == nil || justifiedState.IsNil() {
|
||||
return nil, errNilStateFromStategen
|
||||
}
|
||||
epoch := time.CurrentEpoch(justifiedState)
|
||||
|
||||
justifiedBalances := make([]uint64, justifiedState.NumValidators())
|
||||
var balanceAccumulator = func(idx int, val state.ReadOnlyValidator) error {
|
||||
if helpers.IsActiveValidatorUsingTrie(val, epoch) {
|
||||
justifiedBalances[idx] = val.EffectiveBalance()
|
||||
} else {
|
||||
justifiedBalances[idx] = 0
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err := justifiedState.ReadFromEveryValidator(balanceAccumulator); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.balances = justifiedBalances
|
||||
c.root = justifiedRoot
|
||||
return c.balances, nil
|
||||
}
|
||||
|
||||
// getBalances takes an explicit justifiedRoot so it can invalidate the singleton cache key
|
||||
// when the justified root changes, and takes a context so that the long-running stategen
|
||||
// read path can connect to the upstream cancellation/timeout chain.
|
||||
func (c *stateBalanceCache) get(ctx context.Context, justifiedRoot [32]byte) ([]uint64, error) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
if justifiedRoot != [32]byte{} && justifiedRoot == c.root {
|
||||
stateBalanceCacheHit.Inc()
|
||||
return c.balances, nil
|
||||
}
|
||||
|
||||
return c.update(ctx, justifiedRoot)
|
||||
}
|
||||
236
beacon-chain/blockchain/state_balance_cache_test.go
Normal file
236
beacon-chain/blockchain/state_balance_cache_test.go
Normal file
@@ -0,0 +1,236 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
type mockStateByRoot struct {
|
||||
state state.BeaconState
|
||||
err error
|
||||
}
|
||||
|
||||
func (m *mockStateByRoot) StateByRoot(context.Context, [32]byte) (state.BeaconState, error) {
|
||||
return m.state, m.err
|
||||
}
|
||||
|
||||
type testStateOpt func(*ethpb.BeaconStateAltair)
|
||||
|
||||
func testStateWithValidators(v []*ethpb.Validator) testStateOpt {
|
||||
return func(a *ethpb.BeaconStateAltair) {
|
||||
a.Validators = v
|
||||
}
|
||||
}
|
||||
|
||||
func testStateWithSlot(slot types.Slot) testStateOpt {
|
||||
return func(a *ethpb.BeaconStateAltair) {
|
||||
a.Slot = slot
|
||||
}
|
||||
}
|
||||
|
||||
func testStateFixture(opts ...testStateOpt) state.BeaconState {
|
||||
a := ðpb.BeaconStateAltair{}
|
||||
for _, o := range opts {
|
||||
o(a)
|
||||
}
|
||||
s, _ := v2.InitializeFromProtoUnsafe(a)
|
||||
return s
|
||||
}
|
||||
|
||||
func generateTestValidators(count int, opts ...func(*ethpb.Validator)) []*ethpb.Validator {
|
||||
vs := make([]*ethpb.Validator, count)
|
||||
var i uint32 = 0
|
||||
for ; i < uint32(count); i++ {
|
||||
pk := make([]byte, 48)
|
||||
binary.LittleEndian.PutUint32(pk, i)
|
||||
v := ðpb.Validator{PublicKey: pk}
|
||||
for _, o := range opts {
|
||||
o(v)
|
||||
}
|
||||
vs[i] = v
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
func oddValidatorsExpired(currentSlot types.Slot) func(*ethpb.Validator) {
|
||||
return func(v *ethpb.Validator) {
|
||||
pki := binary.LittleEndian.Uint64(v.PublicKey)
|
||||
if pki%2 == 0 {
|
||||
v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
|
||||
} else {
|
||||
v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) - 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func oddValidatorsQueued(currentSlot types.Slot) func(*ethpb.Validator) {
|
||||
return func(v *ethpb.Validator) {
|
||||
v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
|
||||
pki := binary.LittleEndian.Uint64(v.PublicKey)
|
||||
if pki%2 == 0 {
|
||||
v.ActivationEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) - 1)
|
||||
} else {
|
||||
v.ActivationEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func allValidatorsValid(currentSlot types.Slot) func(*ethpb.Validator) {
|
||||
return func(v *ethpb.Validator) {
|
||||
v.ActivationEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) - 1)
|
||||
v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
|
||||
}
|
||||
}
|
||||
|
||||
func balanceIsKeyTimes2(v *ethpb.Validator) {
|
||||
pki := binary.LittleEndian.Uint64(v.PublicKey)
|
||||
v.EffectiveBalance = uint64(pki) * 2
|
||||
}
|
||||
|
||||
func testHalfExpiredValidators() ([]*ethpb.Validator, []uint64) {
|
||||
balances := []uint64{0, 0, 4, 0, 8, 0, 12, 0, 16, 0}
|
||||
return generateTestValidators(10,
|
||||
oddValidatorsExpired(types.Slot(99)),
|
||||
balanceIsKeyTimes2), balances
|
||||
}
|
||||
|
||||
func testHalfQueuedValidators() ([]*ethpb.Validator, []uint64) {
|
||||
balances := []uint64{0, 0, 4, 0, 8, 0, 12, 0, 16, 0}
|
||||
return generateTestValidators(10,
|
||||
oddValidatorsQueued(types.Slot(99)),
|
||||
balanceIsKeyTimes2), balances
|
||||
}
|
||||
|
||||
func testAllValidValidators() ([]*ethpb.Validator, []uint64) {
|
||||
balances := []uint64{0, 2, 4, 6, 8, 10, 12, 14, 16, 18}
|
||||
return generateTestValidators(10,
|
||||
allValidatorsValid(types.Slot(99)),
|
||||
balanceIsKeyTimes2), balances
|
||||
}
|
||||
|
||||
func TestStateBalanceCache(t *testing.T) {
|
||||
type sbcTestCase struct {
|
||||
err error
|
||||
root [32]byte
|
||||
sbc *stateBalanceCache
|
||||
balances []uint64
|
||||
name string
|
||||
}
|
||||
sentinelCacheMiss := errors.New("Cache missed, as expected!")
|
||||
sentinelBalances := []uint64{1, 2, 3, 4, 5}
|
||||
halfExpiredValidators, halfExpiredBalances := testHalfExpiredValidators()
|
||||
halfQueuedValidators, halfQueuedBalances := testHalfQueuedValidators()
|
||||
allValidValidators, allValidBalances := testAllValidValidators()
|
||||
cases := []sbcTestCase{
|
||||
{
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
balances: sentinelBalances,
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{
|
||||
err: sentinelCacheMiss,
|
||||
},
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
balances: sentinelBalances,
|
||||
},
|
||||
name: "cache hit",
|
||||
},
|
||||
// this works by using a staterooter that returns a known error
|
||||
// so really we're testing the miss by making sure stategen got called
|
||||
// this also tells us stategen errors are propagated
|
||||
{
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{
|
||||
err: sentinelCacheMiss,
|
||||
},
|
||||
root: bytesutil.ToBytes32([]byte{'B'}),
|
||||
},
|
||||
err: sentinelCacheMiss,
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
name: "cache miss",
|
||||
},
|
||||
{
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{},
|
||||
root: bytesutil.ToBytes32([]byte{'B'}),
|
||||
},
|
||||
err: errNilStateFromStategen,
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
name: "error for nil state upon cache miss",
|
||||
},
|
||||
{
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{
|
||||
state: testStateFixture(
|
||||
testStateWithSlot(99),
|
||||
testStateWithValidators(halfExpiredValidators)),
|
||||
},
|
||||
},
|
||||
balances: halfExpiredBalances,
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
name: "test filtering by exit epoch",
|
||||
},
|
||||
{
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{
|
||||
state: testStateFixture(
|
||||
testStateWithSlot(99),
|
||||
testStateWithValidators(halfQueuedValidators)),
|
||||
},
|
||||
},
|
||||
balances: halfQueuedBalances,
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
name: "test filtering by activation epoch",
|
||||
},
|
||||
{
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{
|
||||
state: testStateFixture(
|
||||
testStateWithSlot(99),
|
||||
testStateWithValidators(allValidValidators)),
|
||||
},
|
||||
},
|
||||
balances: allValidBalances,
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
name: "happy path",
|
||||
},
|
||||
{
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{
|
||||
state: testStateFixture(
|
||||
testStateWithSlot(99),
|
||||
testStateWithValidators(allValidValidators)),
|
||||
},
|
||||
},
|
||||
balances: allValidBalances,
|
||||
root: [32]byte{},
|
||||
name: "zero root",
|
||||
},
|
||||
}
|
||||
ctx := context.Background()
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
cache := c.sbc
|
||||
cacheRootStart := cache.root
|
||||
b, err := cache.get(ctx, c.root)
|
||||
require.ErrorIs(t, err, c.err)
|
||||
require.DeepEqual(t, c.balances, b)
|
||||
if c.err != nil {
|
||||
// if there was an error somewhere, the root should not have changed (unless it already matched)
|
||||
require.Equal(t, cacheRootStart, cache.root)
|
||||
} else {
|
||||
// when successful, the cache should always end with a root matching the request
|
||||
require.Equal(t, c.root, cache.root)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
27
beacon-chain/blockchain/store/BUILD.bazel
Normal file
27
beacon-chain/blockchain/store/BUILD.bazel
Normal file
@@ -0,0 +1,27 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"new.go",
|
||||
"setter_getter.go",
|
||||
"type.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/store",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = ["//proto/prysm/v1alpha1:go_default_library"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"new_test.go",
|
||||
"setter_getter_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
)
|
||||
4
beacon-chain/blockchain/store/doc.go
Normal file
4
beacon-chain/blockchain/store/doc.go
Normal file
@@ -0,0 +1,4 @@
|
||||
// Package store implements the store object defined in the phase0 fork choice spec.
|
||||
// It serves as a helpful middleware layer in between blockchain pkg and fork choice protoarray pkg.
|
||||
// All the getters and setters are concurrent thread safe
|
||||
package store
|
||||
15
beacon-chain/blockchain/store/new.go
Normal file
15
beacon-chain/blockchain/store/new.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func New(justifiedCheckpt *ethpb.Checkpoint, finalizedCheckpt *ethpb.Checkpoint) *Store {
|
||||
return &Store{
|
||||
justifiedCheckpt: justifiedCheckpt,
|
||||
prevJustifiedCheckpt: justifiedCheckpt,
|
||||
bestJustifiedCheckpt: justifiedCheckpt,
|
||||
finalizedCheckpt: finalizedCheckpt,
|
||||
prevFinalizedCheckpt: finalizedCheckpt,
|
||||
}
|
||||
}
|
||||
25
beacon-chain/blockchain/store/new_test.go
Normal file
25
beacon-chain/blockchain/store/new_test.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
j := ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: []byte("hi"),
|
||||
}
|
||||
f := ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: []byte("hello"),
|
||||
}
|
||||
s := New(j, f)
|
||||
require.DeepSSZEqual(t, s.JustifiedCheckpt(), j)
|
||||
require.DeepSSZEqual(t, s.BestJustifiedCheckpt(), j)
|
||||
require.DeepSSZEqual(t, s.PrevJustifiedCheckpt(), j)
|
||||
require.DeepSSZEqual(t, s.FinalizedCheckpt(), f)
|
||||
require.DeepSSZEqual(t, s.PrevFinalizedCheckpt(), f)
|
||||
}
|
||||
73
beacon-chain/blockchain/store/setter_getter.go
Normal file
73
beacon-chain/blockchain/store/setter_getter.go
Normal file
@@ -0,0 +1,73 @@
|
||||
package store
|
||||
|
||||
import ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
|
||||
// PrevJustifiedCheckpt returns the previous justified checkpoint in the Store.
|
||||
func (s *Store) PrevJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.prevJustifiedCheckpt
|
||||
}
|
||||
|
||||
// BestJustifiedCheckpt returns the best justified checkpoint in the Store.
|
||||
func (s *Store) BestJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.bestJustifiedCheckpt
|
||||
}
|
||||
|
||||
// JustifiedCheckpt returns the justified checkpoint in the Store.
|
||||
func (s *Store) JustifiedCheckpt() *ethpb.Checkpoint {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.justifiedCheckpt
|
||||
}
|
||||
|
||||
// PrevFinalizedCheckpt returns the previous finalized checkpoint in the Store.
|
||||
func (s *Store) PrevFinalizedCheckpt() *ethpb.Checkpoint {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.prevFinalizedCheckpt
|
||||
}
|
||||
|
||||
// FinalizedCheckpt returns the finalized checkpoint in the Store.
|
||||
func (s *Store) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.finalizedCheckpt
|
||||
}
|
||||
|
||||
// SetPrevJustifiedCheckpt sets the previous justified checkpoint in the Store.
|
||||
func (s *Store) SetPrevJustifiedCheckpt(cp *ethpb.Checkpoint) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.prevJustifiedCheckpt = cp
|
||||
}
|
||||
|
||||
// SetBestJustifiedCheckpt sets the best justified checkpoint in the Store.
|
||||
func (s *Store) SetBestJustifiedCheckpt(cp *ethpb.Checkpoint) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.bestJustifiedCheckpt = cp
|
||||
}
|
||||
|
||||
// SetJustifiedCheckpt sets the justified checkpoint in the Store.
|
||||
func (s *Store) SetJustifiedCheckpt(cp *ethpb.Checkpoint) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.justifiedCheckpt = cp
|
||||
}
|
||||
|
||||
// SetFinalizedCheckpt sets the finalized checkpoint in the Store.
|
||||
func (s *Store) SetFinalizedCheckpt(cp *ethpb.Checkpoint) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.finalizedCheckpt = cp
|
||||
}
|
||||
|
||||
// SetPrevFinalizedCheckpt sets the previous finalized checkpoint in the Store.
|
||||
func (s *Store) SetPrevFinalizedCheckpt(cp *ethpb.Checkpoint) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.prevFinalizedCheckpt = cp
|
||||
}
|
||||
53
beacon-chain/blockchain/store/setter_getter_test.go
Normal file
53
beacon-chain/blockchain/store/setter_getter_test.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func Test_store_PrevJustifiedCheckpt(t *testing.T) {
|
||||
s := &Store{}
|
||||
var cp *ethpb.Checkpoint
|
||||
require.Equal(t, cp, s.PrevJustifiedCheckpt())
|
||||
cp = ðpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
|
||||
s.SetPrevJustifiedCheckpt(cp)
|
||||
require.Equal(t, cp, s.PrevJustifiedCheckpt())
|
||||
}
|
||||
|
||||
func Test_store_BestJustifiedCheckpt(t *testing.T) {
|
||||
s := &Store{}
|
||||
var cp *ethpb.Checkpoint
|
||||
require.Equal(t, cp, s.BestJustifiedCheckpt())
|
||||
cp = ðpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
|
||||
s.SetBestJustifiedCheckpt(cp)
|
||||
require.Equal(t, cp, s.BestJustifiedCheckpt())
|
||||
}
|
||||
|
||||
func Test_store_JustifiedCheckpt(t *testing.T) {
|
||||
s := &Store{}
|
||||
var cp *ethpb.Checkpoint
|
||||
require.Equal(t, cp, s.JustifiedCheckpt())
|
||||
cp = ðpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
|
||||
s.SetJustifiedCheckpt(cp)
|
||||
require.Equal(t, cp, s.JustifiedCheckpt())
|
||||
}
|
||||
|
||||
func Test_store_FinalizedCheckpt(t *testing.T) {
|
||||
s := &Store{}
|
||||
var cp *ethpb.Checkpoint
|
||||
require.Equal(t, cp, s.FinalizedCheckpt())
|
||||
cp = ðpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
|
||||
s.SetFinalizedCheckpt(cp)
|
||||
require.Equal(t, cp, s.FinalizedCheckpt())
|
||||
}
|
||||
|
||||
func Test_store_PrevFinalizedCheckpt(t *testing.T) {
|
||||
s := &Store{}
|
||||
var cp *ethpb.Checkpoint
|
||||
require.Equal(t, cp, s.PrevFinalizedCheckpt())
|
||||
cp = ðpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
|
||||
s.SetPrevFinalizedCheckpt(cp)
|
||||
require.Equal(t, cp, s.PrevFinalizedCheckpt())
|
||||
}
|
||||
28
beacon-chain/blockchain/store/type.go
Normal file
28
beacon-chain/blockchain/store/type.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// Store is defined in the fork choice consensus spec for tracking current time and various versions of checkpoints.
|
||||
//
|
||||
// Spec code:
|
||||
// class Store(object):
|
||||
// time: uint64
|
||||
// genesis_time: uint64
|
||||
// justified_checkpoint: Checkpoint
|
||||
// finalized_checkpoint: Checkpoint
|
||||
// best_justified_checkpoint: Checkpoint
|
||||
// proposerBoostRoot: Root
|
||||
type Store struct {
|
||||
justifiedCheckpt *ethpb.Checkpoint
|
||||
finalizedCheckpt *ethpb.Checkpoint
|
||||
bestJustifiedCheckpt *ethpb.Checkpoint
|
||||
sync.RWMutex
|
||||
// These are not part of the consensus spec, but we do use them to return gRPC API requests.
|
||||
// TODO(10094): Consider removing in v3.
|
||||
prevFinalizedCheckpt *ethpb.Checkpoint
|
||||
prevJustifiedCheckpt *ethpb.Checkpoint
|
||||
}
|
||||
@@ -8,7 +8,6 @@ go_library(
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//testing:__subpackages__",
|
||||
"//testing/fuzz:__pkg__",
|
||||
],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
@@ -19,9 +18,9 @@ go_library(
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/forkchoice/protoarray:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -18,9 +18,9 @@ import (
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -28,36 +28,44 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var ErrNilState = errors.New("nil state")
|
||||
|
||||
// ChainService defines the mock interface for testing
|
||||
type ChainService struct {
|
||||
State state.BeaconState
|
||||
Root []byte
|
||||
Block block.SignedBeaconBlock
|
||||
Optimistic bool
|
||||
ValidAttestation bool
|
||||
ValidatorsRoot [32]byte
|
||||
PublicKey [fieldparams.BLSPubkeyLength]byte
|
||||
FinalizedCheckPoint *ethpb.Checkpoint
|
||||
CurrentJustifiedCheckPoint *ethpb.Checkpoint
|
||||
PreviousJustifiedCheckPoint *ethpb.Checkpoint
|
||||
BlocksReceived []block.SignedBeaconBlock
|
||||
Slot *types.Slot // Pointer because 0 is a useful value, so checking against it can be incorrect.
|
||||
Balance *precompute.Balance
|
||||
Genesis time.Time
|
||||
ValidatorsRoot [32]byte
|
||||
CanonicalRoots map[[32]byte]bool
|
||||
Fork *ethpb.Fork
|
||||
ETH1Data *ethpb.Eth1Data
|
||||
InitSyncBlockRoots map[[32]byte]bool
|
||||
DB db.Database
|
||||
State state.BeaconState
|
||||
Block block.SignedBeaconBlock
|
||||
VerifyBlkDescendantErr error
|
||||
stateNotifier statefeed.Notifier
|
||||
BlocksReceived []block.SignedBeaconBlock
|
||||
SyncCommitteeIndices []types.CommitteeIndex
|
||||
blockNotifier blockfeed.Notifier
|
||||
opNotifier opfeed.Notifier
|
||||
ValidAttestation bool
|
||||
ForkChoiceStore *protoarray.Store
|
||||
VerifyBlkDescendantErr error
|
||||
Slot *types.Slot // Pointer because 0 is a useful value, so checking against it can be incorrect.
|
||||
SyncCommitteeIndices []types.CommitteeIndex
|
||||
Root []byte
|
||||
SyncCommitteeDomain []byte
|
||||
SyncSelectionProofDomain []byte
|
||||
SyncContributionProofDomain []byte
|
||||
PublicKey [48]byte
|
||||
SyncCommitteePubkeys [][]byte
|
||||
InitSyncBlockRoots map[[32]byte]bool
|
||||
Genesis time.Time
|
||||
ForkChoiceStore forkchoice.ForkChoicer
|
||||
}
|
||||
|
||||
// ForkChoicer mocks the same method in the chain service
|
||||
func (s *ChainService) ForkChoicer() forkchoice.ForkChoicer {
|
||||
return s.ForkChoiceStore
|
||||
}
|
||||
|
||||
// StateNotifier mocks the same method in the chain service.
|
||||
@@ -158,7 +166,7 @@ func (mon *MockOperationNotifier) OperationFeed() *event.Feed {
|
||||
// ReceiveBlockInitialSync mocks ReceiveBlockInitialSync method in chain service.
|
||||
func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block block.SignedBeaconBlock, _ [32]byte) error {
|
||||
if s.State == nil {
|
||||
s.State = &v1.BeaconState{}
|
||||
return ErrNilState
|
||||
}
|
||||
if !bytes.Equal(s.Root, block.Block().ParentRoot()) {
|
||||
return errors.Errorf("wanted %#x but got %#x", s.Root, block.Block().ParentRoot())
|
||||
@@ -185,7 +193,7 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block block.
|
||||
// ReceiveBlockBatch processes blocks in batches from initial-sync.
|
||||
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []block.SignedBeaconBlock, _ [][32]byte) error {
|
||||
if s.State == nil {
|
||||
s.State = &v1.BeaconState{}
|
||||
return ErrNilState
|
||||
}
|
||||
for _, block := range blks {
|
||||
if !bytes.Equal(s.Root, block.Block().ParentRoot()) {
|
||||
@@ -214,7 +222,7 @@ func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []block.Signe
|
||||
// ReceiveBlock mocks ReceiveBlock method in chain service.
|
||||
func (s *ChainService) ReceiveBlock(ctx context.Context, block block.SignedBeaconBlock, _ [32]byte) error {
|
||||
if s.State == nil {
|
||||
s.State = &v1.BeaconState{}
|
||||
return ErrNilState
|
||||
}
|
||||
if !bytes.Equal(s.Root, block.Block().ParentRoot()) {
|
||||
return errors.Errorf("wanted %#x but got %#x", s.Root, block.Block().ParentRoot())
|
||||
@@ -285,12 +293,12 @@ func (s *ChainService) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
}
|
||||
|
||||
// ReceiveAttestation mocks ReceiveAttestation method in chain service.
|
||||
func (s *ChainService) ReceiveAttestation(_ context.Context, _ *ethpb.Attestation) error {
|
||||
func (_ *ChainService) ReceiveAttestation(_ context.Context, _ *ethpb.Attestation) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveAttestationNoPubsub mocks ReceiveAttestationNoPubsub method in chain service.
|
||||
func (s *ChainService) ReceiveAttestationNoPubsub(context.Context, *ethpb.Attestation) error {
|
||||
func (_ *ChainService) ReceiveAttestationNoPubsub(context.Context, *ethpb.Attestation) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -317,18 +325,13 @@ func (s *ChainService) HeadETH1Data() *ethpb.Eth1Data {
|
||||
return s.ETH1Data
|
||||
}
|
||||
|
||||
// ProtoArrayStore mocks the same method in the chain service.
|
||||
func (s *ChainService) ProtoArrayStore() *protoarray.Store {
|
||||
return s.ForkChoiceStore
|
||||
}
|
||||
|
||||
// GenesisTime mocks the same method in the chain service.
|
||||
func (s *ChainService) GenesisTime() time.Time {
|
||||
return s.Genesis
|
||||
}
|
||||
|
||||
// GenesisValidatorRoot mocks the same method in the chain service.
|
||||
func (s *ChainService) GenesisValidatorRoot() [32]byte {
|
||||
// GenesisValidatorsRoot mocks the same method in the chain service.
|
||||
func (s *ChainService) GenesisValidatorsRoot() [32]byte {
|
||||
return s.ValidatorsRoot
|
||||
}
|
||||
|
||||
@@ -368,8 +371,8 @@ func (s *ChainService) HasInitSyncBlock(rt [32]byte) bool {
|
||||
return s.InitSyncBlockRoots[rt]
|
||||
}
|
||||
|
||||
// HeadGenesisValidatorRoot mocks HeadGenesisValidatorRoot method in chain service.
|
||||
func (s *ChainService) HeadGenesisValidatorRoot() [32]byte {
|
||||
// HeadGenesisValidatorsRoot mocks HeadGenesisValidatorsRoot method in chain service.
|
||||
func (_ *ChainService) HeadGenesisValidatorsRoot() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
@@ -379,7 +382,7 @@ func (s *ChainService) VerifyBlkDescendant(_ context.Context, _ [32]byte) error
|
||||
}
|
||||
|
||||
// VerifyLmdFfgConsistency mocks VerifyLmdFfgConsistency and always returns nil.
|
||||
func (s *ChainService) VerifyLmdFfgConsistency(_ context.Context, a *ethpb.Attestation) error {
|
||||
func (_ *ChainService) VerifyLmdFfgConsistency(_ context.Context, a *ethpb.Attestation) error {
|
||||
if !bytes.Equal(a.Data.BeaconBlockRoot, a.Data.Target.Root) {
|
||||
return errors.New("LMD and FFG miss matched")
|
||||
}
|
||||
@@ -395,7 +398,7 @@ func (s *ChainService) VerifyFinalizedConsistency(_ context.Context, r []byte) e
|
||||
}
|
||||
|
||||
// ChainHeads mocks ChainHeads and always return nil.
|
||||
func (s *ChainService) ChainHeads() ([][32]byte, []types.Slot) {
|
||||
func (_ *ChainService) ChainHeads() ([][32]byte, []types.Slot) {
|
||||
return [][32]byte{
|
||||
bytesutil.ToBytes32(bytesutil.PadTo([]byte("foo"), 32)),
|
||||
bytesutil.ToBytes32(bytesutil.PadTo([]byte("bar"), 32)),
|
||||
@@ -404,12 +407,12 @@ func (s *ChainService) ChainHeads() ([][32]byte, []types.Slot) {
|
||||
}
|
||||
|
||||
// HeadPublicKeyToValidatorIndex mocks HeadPublicKeyToValidatorIndex and always return 0 and true.
|
||||
func (s *ChainService) HeadPublicKeyToValidatorIndex(_ context.Context, _ [48]byte) (types.ValidatorIndex, bool) {
|
||||
func (_ *ChainService) HeadPublicKeyToValidatorIndex(_ [fieldparams.BLSPubkeyLength]byte) (types.ValidatorIndex, bool) {
|
||||
return 0, true
|
||||
}
|
||||
|
||||
// HeadValidatorIndexToPublicKey mocks HeadValidatorIndexToPublicKey and always return empty and nil.
|
||||
func (s *ChainService) HeadValidatorIndexToPublicKey(_ context.Context, _ types.ValidatorIndex) ([48]byte, error) {
|
||||
func (s *ChainService) HeadValidatorIndexToPublicKey(_ context.Context, _ types.ValidatorIndex) ([fieldparams.BLSPubkeyLength]byte, error) {
|
||||
return s.PublicKey, nil
|
||||
}
|
||||
|
||||
@@ -437,3 +440,13 @@ func (s *ChainService) HeadSyncSelectionProofDomain(_ context.Context, _ types.S
|
||||
func (s *ChainService) HeadSyncContributionProofDomain(_ context.Context, _ types.Slot) ([]byte, error) {
|
||||
return s.SyncContributionProofDomain, nil
|
||||
}
|
||||
|
||||
// IsOptimistic mocks the same method in the chain service.
|
||||
func (s *ChainService) IsOptimistic(_ context.Context) (bool, error) {
|
||||
return s.Optimistic, nil
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot mocks the same method in the chain service.
|
||||
func (s *ChainService) IsOptimisticForRoot(_ context.Context, _ [32]byte) (bool, error) {
|
||||
return s.Optimistic, nil
|
||||
}
|
||||
|
||||
@@ -36,6 +36,7 @@ func NewWeakSubjectivityVerifier(wsc *ethpb.Checkpoint, db weakSubjectivityDB) (
|
||||
// per 7342, a nil checkpoint, zero-root or zero-epoch should all fail validation
|
||||
// and return an error instead of creating a WeakSubjectivityVerifier that permits any chain history.
|
||||
if wsc == nil || len(wsc.Root) == 0 || wsc.Epoch == 0 {
|
||||
log.Warn("No valid weak subjectivity checkpoint specified, running without weak subjectivity verification")
|
||||
return &WeakSubjectivityVerifier{
|
||||
enabled: false,
|
||||
}, nil
|
||||
@@ -79,15 +80,17 @@ func (v *WeakSubjectivityVerifier) VerifyWeakSubjectivity(ctx context.Context, f
|
||||
if !v.db.HasBlock(ctx, v.root) {
|
||||
return errors.Wrap(errWSBlockNotFound, fmt.Sprintf("missing root %#x", v.root))
|
||||
}
|
||||
filter := filters.NewFilter().SetStartSlot(v.slot).SetEndSlot(v.slot + params.BeaconConfig().SlotsPerEpoch)
|
||||
endSlot := v.slot + params.BeaconConfig().SlotsPerEpoch
|
||||
filter := filters.NewFilter().SetStartSlot(v.slot).SetEndSlot(endSlot)
|
||||
// A node should have the weak subjectivity block corresponds to the correct epoch in the DB.
|
||||
log.Infof("Searching block roots for weak subjectivity root=%#x, between slots %d-%d", v.root, v.slot, endSlot)
|
||||
roots, err := v.db.BlockRoots(ctx, filter)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error while retrieving block roots to verify weak subjectivity")
|
||||
}
|
||||
for _, root := range roots {
|
||||
if v.root == root {
|
||||
log.Info("Weak subjectivity check has passed")
|
||||
log.Info("Weak subjectivity check has passed!!")
|
||||
v.verified = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -6,71 +6,81 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/store"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 32
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
b.Block.Slot = 1792480
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blockEpoch := slots.ToEpoch(b.Block.Slot)
|
||||
tests := []struct {
|
||||
wsVerified bool
|
||||
disabled bool
|
||||
wantErr error
|
||||
checkpt *ethpb.Checkpoint
|
||||
finalizedEpoch types.Epoch
|
||||
name string
|
||||
}{
|
||||
{
|
||||
name: "nil root and epoch",
|
||||
},
|
||||
{
|
||||
name: "already verified",
|
||||
checkpt: ðpb.Checkpoint{Epoch: 2},
|
||||
finalizedEpoch: 2,
|
||||
wsVerified: true,
|
||||
name: "nil root and epoch",
|
||||
disabled: true,
|
||||
},
|
||||
{
|
||||
name: "not yet to verify, ws epoch higher than finalized epoch",
|
||||
checkpt: ðpb.Checkpoint{Epoch: 2},
|
||||
finalizedEpoch: 1,
|
||||
checkpt: ðpb.Checkpoint{Root: bytesutil.PadTo([]byte{'a'}, 32), Epoch: blockEpoch},
|
||||
finalizedEpoch: blockEpoch - 1,
|
||||
},
|
||||
{
|
||||
name: "can't find the block in DB",
|
||||
checkpt: ðpb.Checkpoint{Root: bytesutil.PadTo([]byte{'a'}, 32), Epoch: 1},
|
||||
finalizedEpoch: 3,
|
||||
checkpt: ðpb.Checkpoint{Root: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength), Epoch: 1},
|
||||
finalizedEpoch: blockEpoch + 1,
|
||||
wantErr: errWSBlockNotFound,
|
||||
},
|
||||
{
|
||||
name: "can't find the block corresponds to ws epoch in DB",
|
||||
checkpt: ðpb.Checkpoint{Root: r[:], Epoch: 2}, // Root belongs in epoch 1.
|
||||
finalizedEpoch: 3,
|
||||
checkpt: ðpb.Checkpoint{Root: r[:], Epoch: blockEpoch - 2}, // Root belongs in epoch 1.
|
||||
finalizedEpoch: blockEpoch - 1,
|
||||
wantErr: errWSBlockNotFoundInEpoch,
|
||||
},
|
||||
{
|
||||
name: "can verify and pass",
|
||||
checkpt: ðpb.Checkpoint{Root: r[:], Epoch: 1},
|
||||
finalizedEpoch: 3,
|
||||
checkpt: ðpb.Checkpoint{Root: r[:], Epoch: blockEpoch},
|
||||
finalizedEpoch: blockEpoch + 1,
|
||||
},
|
||||
{
|
||||
name: "equal epoch",
|
||||
checkpt: ðpb.Checkpoint{Root: r[:], Epoch: blockEpoch},
|
||||
finalizedEpoch: blockEpoch,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
wv, err := NewWeakSubjectivityVerifier(tt.checkpt, beaconDB)
|
||||
require.Equal(t, !tt.disabled, wv.enabled)
|
||||
require.NoError(t, err)
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, WeakSubjectivityCheckpt: tt.checkpt},
|
||||
finalizedCheckpt: ðpb.Checkpoint{Epoch: tt.finalizedEpoch},
|
||||
wsVerifier: wv,
|
||||
cfg: &config{BeaconDB: beaconDB, WeakSubjectivityCheckpt: tt.checkpt},
|
||||
store: &store.Store{},
|
||||
wsVerifier: wv,
|
||||
}
|
||||
err = s.wsVerifier.VerifyWeakSubjectivity(context.Background(), s.finalizedCheckpt.Epoch)
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: tt.finalizedEpoch})
|
||||
err = s.wsVerifier.VerifyWeakSubjectivity(context.Background(), s.store.FinalizedCheckpt().Epoch)
|
||||
if tt.wantErr == nil {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
|
||||
30
beacon-chain/cache/BUILD.bazel
vendored
30
beacon-chain/cache/BUILD.bazel
vendored
@@ -1,44 +1,35 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
# gazelle:exclude committee_disabled.go
|
||||
# gazelle:exclude proposer_indices_disabled.go
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"active_balance.go",
|
||||
"active_balance_disabled.go", # keep
|
||||
"attestation_data.go",
|
||||
"checkpoint_state.go",
|
||||
"committee.go",
|
||||
"committee_disabled.go", # keep
|
||||
"committees.go",
|
||||
"common.go",
|
||||
"doc.go",
|
||||
"error.go",
|
||||
"proposer_indices.go",
|
||||
"proposer_indices_disabled.go", # keep
|
||||
"proposer_indices_type.go",
|
||||
"skip_slot_cache.go",
|
||||
"subnet_ids.go",
|
||||
"sync_committee.go",
|
||||
"sync_committee_disabled.go", # keep
|
||||
"sync_committee_head_state.go",
|
||||
"sync_subnet_ids.go",
|
||||
] + select({
|
||||
"//testing/fuzz:fuzzing_enabled": [
|
||||
"active_balance_disabled.go",
|
||||
"committee_disabled.go",
|
||||
"proposer_indices_disabled.go",
|
||||
"sync_committee_disabled.go",
|
||||
],
|
||||
"//conditions:default": [
|
||||
"active_balance.go",
|
||||
"committee.go",
|
||||
"proposer_indices.go",
|
||||
"sync_committee.go",
|
||||
],
|
||||
}),
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//testing/fuzz:__pkg__",
|
||||
"//tools:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v2:go_default_library",
|
||||
"//cache/lru:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -48,6 +39,7 @@ go_library(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_patrickmn_go_cache//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
@@ -81,7 +73,9 @@ go_test(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//beacon-chain/state/v2:go_default_library",
|
||||
"//beacon-chain/state/v3:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
11
beacon-chain/cache/active_balance.go
vendored
11
beacon-chain/cache/active_balance.go
vendored
@@ -1,4 +1,5 @@
|
||||
// +build !libfuzzer
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
@@ -17,10 +18,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
)
|
||||
|
||||
var (
|
||||
const (
|
||||
// maxBalanceCacheSize defines the max number of active balances can cache.
|
||||
maxBalanceCacheSize = uint64(4)
|
||||
maxBalanceCacheSize = int(4)
|
||||
)
|
||||
|
||||
var (
|
||||
// BalanceCacheMiss tracks the number of balance requests that aren't present in the cache.
|
||||
balanceCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "total_effective_balance_cache_miss",
|
||||
@@ -42,7 +45,7 @@ type BalanceCache struct {
|
||||
// NewEffectiveBalanceCache creates a new effective balance cache for storing/accessing total balance by epoch.
|
||||
func NewEffectiveBalanceCache() *BalanceCache {
|
||||
return &BalanceCache{
|
||||
cache: lruwrpr.New(int(maxBalanceCacheSize)),
|
||||
cache: lruwrpr.New(maxBalanceCacheSize),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
// +build libfuzzer
|
||||
//go:build fuzz
|
||||
// +build fuzz
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
11
beacon-chain/cache/committee.go
vendored
11
beacon-chain/cache/committee.go
vendored
@@ -1,4 +1,5 @@
|
||||
// +build !libfuzzer
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
@@ -19,11 +20,13 @@ import (
|
||||
mathutil "github.com/prysmaticlabs/prysm/math"
|
||||
)
|
||||
|
||||
var (
|
||||
const (
|
||||
// maxCommitteesCacheSize defines the max number of shuffled committees on per randao basis can cache.
|
||||
// Due to reorgs and long finality, it's good to keep the old cache around for quickly switch over.
|
||||
maxCommitteesCacheSize = uint64(32)
|
||||
maxCommitteesCacheSize = int(32)
|
||||
)
|
||||
|
||||
var (
|
||||
// CommitteeCacheMiss tracks the number of committee requests that aren't present in the cache.
|
||||
CommitteeCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "committee_cache_miss",
|
||||
@@ -55,7 +58,7 @@ func committeeKeyFn(obj interface{}) (string, error) {
|
||||
// NewCommitteesCache creates a new committee cache for storing/accessing shuffled indices of a committee.
|
||||
func NewCommitteesCache() *CommitteeCache {
|
||||
return &CommitteeCache{
|
||||
CommitteeCache: lruwrpr.New(int(maxCommitteesCacheSize)),
|
||||
CommitteeCache: lruwrpr.New(maxCommitteesCacheSize),
|
||||
inProgress: make(map[string]bool),
|
||||
}
|
||||
}
|
||||
|
||||
3
beacon-chain/cache/committee_disabled.go
vendored
3
beacon-chain/cache/committee_disabled.go
vendored
@@ -1,4 +1,5 @@
|
||||
// +build libfuzzer
|
||||
//go:build fuzz
|
||||
// +build fuzz
|
||||
|
||||
// This file is used in fuzzer builds to bypass global committee caches.
|
||||
package cache
|
||||
|
||||
4
beacon-chain/cache/committee_fuzz_test.go
vendored
4
beacon-chain/cache/committee_fuzz_test.go
vendored
@@ -33,7 +33,7 @@ func TestCommitteeCache_FuzzCommitteesByEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.Equal(t, maxCommitteesCacheSize, uint64(len(cache.CommitteeCache.Keys())), "Incorrect key size")
|
||||
assert.Equal(t, maxCommitteesCacheSize, len(cache.CommitteeCache.Keys()), "Incorrect key size")
|
||||
}
|
||||
|
||||
func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
|
||||
@@ -50,5 +50,5 @@ func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
|
||||
assert.DeepEqual(t, c.SortedIndices, indices)
|
||||
}
|
||||
|
||||
assert.Equal(t, maxCommitteesCacheSize, uint64(len(cache.CommitteeCache.Keys())), "Incorrect key size")
|
||||
assert.Equal(t, maxCommitteesCacheSize, len(cache.CommitteeCache.Keys()), "Incorrect key size")
|
||||
}
|
||||
|
||||
2
beacon-chain/cache/committee_test.go
vendored
2
beacon-chain/cache/committee_test.go
vendored
@@ -102,7 +102,7 @@ func TestCommitteeCache_CanRotate(t *testing.T) {
|
||||
}
|
||||
|
||||
k := cache.CommitteeCache.Keys()
|
||||
assert.Equal(t, maxCommitteesCacheSize, uint64(len(k)))
|
||||
assert.Equal(t, maxCommitteesCacheSize, len(k))
|
||||
|
||||
sort.Slice(k, func(i, j int) bool {
|
||||
return k[i].(string) < k[j].(string)
|
||||
|
||||
6
beacon-chain/cache/depositcache/BUILD.bazel
vendored
6
beacon-chain/cache/depositcache/BUILD.bazel
vendored
@@ -8,8 +8,12 @@ go_library(
|
||||
"pending_deposits.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//testing/spectest:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
|
||||
@@ -14,10 +14,10 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/container/trie"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -50,10 +50,10 @@ type FinalizedDeposits struct {
|
||||
// stores all the deposit related data that is required by the beacon-node.
|
||||
type DepositCache struct {
|
||||
// Beacon chain deposits in memory.
|
||||
pendingDeposits []*dbpb.DepositContainer
|
||||
deposits []*dbpb.DepositContainer
|
||||
pendingDeposits []*ethpb.DepositContainer
|
||||
deposits []*ethpb.DepositContainer
|
||||
finalizedDeposits *FinalizedDeposits
|
||||
depositsByKey map[[48]byte][]*dbpb.DepositContainer
|
||||
depositsByKey map[[fieldparams.BLSPubkeyLength]byte][]*ethpb.DepositContainer
|
||||
depositsLock sync.RWMutex
|
||||
}
|
||||
|
||||
@@ -67,9 +67,9 @@ func New() (*DepositCache, error) {
|
||||
// finalizedDeposits.MerkleTrieIndex is initialized to -1 because it represents the index of the last trie item.
|
||||
// Inserting the first item into the trie will set the value of the index to 0.
|
||||
return &DepositCache{
|
||||
pendingDeposits: []*dbpb.DepositContainer{},
|
||||
deposits: []*dbpb.DepositContainer{},
|
||||
depositsByKey: map[[48]byte][]*ethpb.DepositContainer{},
|
||||
pendingDeposits: []*ethpb.DepositContainer{},
|
||||
deposits: []*ethpb.DepositContainer{},
|
||||
depositsByKey: map[[fieldparams.BLSPubkeyLength]byte][]*ethpb.DepositContainer{},
|
||||
finalizedDeposits: &FinalizedDeposits{Deposits: finalizedDepositsTrie, MerkleTrieIndex: -1},
|
||||
}, nil
|
||||
}
|
||||
@@ -77,7 +77,7 @@ func New() (*DepositCache, error) {
|
||||
// InsertDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertDeposit")
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertDeposit")
|
||||
defer span.End()
|
||||
if d == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -96,9 +96,9 @@ func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blo
|
||||
}
|
||||
// Keep the slice sorted on insertion in order to avoid costly sorting on retrieval.
|
||||
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Index >= index })
|
||||
depCtr := &dbpb.DepositContainer{Deposit: d, Eth1BlockHeight: blockNum, DepositRoot: depositRoot[:], Index: index}
|
||||
depCtr := ðpb.DepositContainer{Deposit: d, Eth1BlockHeight: blockNum, DepositRoot: depositRoot[:], Index: index}
|
||||
newDeposits := append(
|
||||
[]*dbpb.DepositContainer{depCtr},
|
||||
[]*ethpb.DepositContainer{depCtr},
|
||||
dc.deposits[heightIdx:]...)
|
||||
dc.deposits = append(dc.deposits[:heightIdx], newDeposits...)
|
||||
// Append the deposit to our map, in the event no deposits
|
||||
@@ -110,8 +110,8 @@ func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blo
|
||||
}
|
||||
|
||||
// InsertDepositContainers inserts a set of deposit containers into our deposit cache.
|
||||
func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*dbpb.DepositContainer) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertDepositContainers")
|
||||
func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*ethpb.DepositContainer) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertDepositContainers")
|
||||
defer span.End()
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
@@ -130,7 +130,7 @@ func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*dbp
|
||||
|
||||
// InsertFinalizedDeposits inserts deposits up to eth1DepositIndex (inclusive) into the finalized deposits cache.
|
||||
func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1DepositIndex int64) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertFinalizedDeposits")
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertFinalizedDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
@@ -163,8 +163,8 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
|
||||
}
|
||||
|
||||
// AllDepositContainers returns all historical deposit containers.
|
||||
func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*dbpb.DepositContainer {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.AllDepositContainers")
|
||||
func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*ethpb.DepositContainer {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.AllDepositContainers")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
@@ -175,11 +175,15 @@ func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*dbpb.Deposi
|
||||
// AllDeposits returns a list of historical deposits until the given block number
|
||||
// (inclusive). If no block is specified then this method returns all historical deposits.
|
||||
func (dc *DepositCache) AllDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.AllDeposits")
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.AllDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
return dc.allDeposits(untilBlk)
|
||||
}
|
||||
|
||||
func (dc *DepositCache) allDeposits(untilBlk *big.Int) []*ethpb.Deposit {
|
||||
var deposits []*ethpb.Deposit
|
||||
for _, ctnr := range dc.deposits {
|
||||
if untilBlk == nil || untilBlk.Uint64() >= ctnr.Eth1BlockHeight {
|
||||
@@ -192,7 +196,7 @@ func (dc *DepositCache) AllDeposits(ctx context.Context, untilBlk *big.Int) []*e
|
||||
// DepositsNumberAndRootAtHeight returns number of deposits made up to blockheight and the
|
||||
// root that corresponds to the latest deposit at that blockheight.
|
||||
func (dc *DepositCache) DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.DepositsNumberAndRootAtHeight")
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.DepositsNumberAndRootAtHeight")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
@@ -208,7 +212,7 @@ func (dc *DepositCache) DepositsNumberAndRootAtHeight(ctx context.Context, block
|
||||
// DepositByPubkey looks through historical deposits and finds one which contains
|
||||
// a certain public key within its deposit data.
|
||||
func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.DepositByPubkey")
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.DepositByPubkey")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
@@ -229,7 +233,7 @@ func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*et
|
||||
|
||||
// FinalizedDeposits returns the finalized deposits trie.
|
||||
func (dc *DepositCache) FinalizedDeposits(ctx context.Context) *FinalizedDeposits {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.FinalizedDeposits")
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.FinalizedDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
@@ -249,7 +253,7 @@ func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, untilBlk *big.
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
if dc.finalizedDeposits == nil {
|
||||
return dc.AllDeposits(ctx, untilBlk)
|
||||
return dc.allDeposits(untilBlk)
|
||||
}
|
||||
|
||||
lastFinalizedDepositIndex := dc.finalizedDeposits.MerkleTrieIndex
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/container/trie"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
@@ -44,31 +43,31 @@ func TestInsertDeposit_MaintainsSortedOrderByIndex(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Data: &dbpb.Deposit_Data{PublicKey: []byte{'A'}}},
|
||||
deposit: ðpb.Deposit{Data: ðpb.Deposit_Data{PublicKey: []byte{'A'}}},
|
||||
index: 0,
|
||||
expectedErr: "",
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Data: &dbpb.Deposit_Data{PublicKey: []byte{'B'}}},
|
||||
deposit: ðpb.Deposit{Data: ðpb.Deposit_Data{PublicKey: []byte{'B'}}},
|
||||
index: 3,
|
||||
expectedErr: "wanted deposit with index 1 to be inserted but received 3",
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Data: &dbpb.Deposit_Data{PublicKey: []byte{'C'}}},
|
||||
deposit: ðpb.Deposit{Data: ðpb.Deposit_Data{PublicKey: []byte{'C'}}},
|
||||
index: 1,
|
||||
expectedErr: "",
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Data: &dbpb.Deposit_Data{PublicKey: []byte{'D'}}},
|
||||
deposit: ðpb.Deposit{Data: ðpb.Deposit_Data{PublicKey: []byte{'D'}}},
|
||||
index: 4,
|
||||
expectedErr: "wanted deposit with index 2 to be inserted but received 4",
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Data: &dbpb.Deposit_Data{PublicKey: []byte{'E'}}},
|
||||
deposit: ðpb.Deposit{Data: ðpb.Deposit_Data{PublicKey: []byte{'E'}}},
|
||||
index: 2,
|
||||
expectedErr: "",
|
||||
},
|
||||
@@ -93,7 +92,7 @@ func TestAllDeposits_ReturnsAllDeposits(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []*dbpb.DepositContainer{
|
||||
deposits := []*ethpb.DepositContainer{
|
||||
{
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
@@ -133,7 +132,7 @@ func TestAllDeposits_FiltersDepositUpToAndIncludingBlockNumber(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []*dbpb.DepositContainer{
|
||||
deposits := []*ethpb.DepositContainer{
|
||||
{
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
@@ -174,7 +173,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
|
||||
t.Run("requesting_last_item_works", func(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
dc.deposits = []*ethpb.DepositContainer{
|
||||
{
|
||||
Eth1BlockHeight: 10,
|
||||
Index: 0,
|
||||
@@ -205,7 +204,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
dc.deposits = []*ethpb.DepositContainer{
|
||||
{
|
||||
Eth1BlockHeight: 10,
|
||||
Index: 0,
|
||||
@@ -221,7 +220,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
dc.deposits = []*ethpb.DepositContainer{
|
||||
{
|
||||
Eth1BlockHeight: 8,
|
||||
Index: 0,
|
||||
@@ -247,7 +246,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
dc.deposits = []*ethpb.DepositContainer{
|
||||
{
|
||||
Eth1BlockHeight: 8,
|
||||
Index: 0,
|
||||
@@ -263,7 +262,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
dc.deposits = []*ethpb.DepositContainer{
|
||||
{
|
||||
Eth1BlockHeight: 8,
|
||||
Index: 0,
|
||||
@@ -279,7 +278,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
dc.deposits = []*ethpb.DepositContainer{
|
||||
{
|
||||
Eth1BlockHeight: 8,
|
||||
Index: 0,
|
||||
@@ -316,7 +315,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
|
||||
func TestDepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
ctrs := []*dbpb.DepositContainer{
|
||||
ctrs := []*ethpb.DepositContainer{
|
||||
{
|
||||
Eth1BlockHeight: 9,
|
||||
Deposit: ðpb.Deposit{
|
||||
@@ -374,7 +373,7 @@ func TestFinalizedDeposits_DepositsCachedCorrectly(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
finalizedDeposits := []*dbpb.DepositContainer{
|
||||
finalizedDeposits := []*ethpb.DepositContainer{
|
||||
{
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
@@ -406,7 +405,7 @@ func TestFinalizedDeposits_DepositsCachedCorrectly(t *testing.T) {
|
||||
Index: 2,
|
||||
},
|
||||
}
|
||||
dc.deposits = append(finalizedDeposits, &dbpb.DepositContainer{
|
||||
dc.deposits = append(finalizedDeposits, ðpb.DepositContainer{
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{3}, 48),
|
||||
@@ -438,7 +437,7 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
oldFinalizedDeposits := []*dbpb.DepositContainer{
|
||||
oldFinalizedDeposits := []*ethpb.DepositContainer{
|
||||
{
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
@@ -460,7 +459,7 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
|
||||
Index: 1,
|
||||
},
|
||||
}
|
||||
newFinalizedDeposit := dbpb.DepositContainer{
|
||||
newFinalizedDeposit := ethpb.DepositContainer{
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{2}, 48),
|
||||
@@ -473,7 +472,7 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
|
||||
dc.deposits = oldFinalizedDeposits
|
||||
dc.InsertFinalizedDeposits(context.Background(), 1)
|
||||
// Artificially exclude old deposits so that they can only be retrieved from previously finalized deposits.
|
||||
dc.deposits = []*dbpb.DepositContainer{&newFinalizedDeposit}
|
||||
dc.deposits = []*ethpb.DepositContainer{&newFinalizedDeposit}
|
||||
|
||||
dc.InsertFinalizedDeposits(context.Background(), 2)
|
||||
|
||||
@@ -506,7 +505,7 @@ func TestNonFinalizedDeposits_ReturnsAllNonFinalizedDeposits(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
finalizedDeposits := []*dbpb.DepositContainer{
|
||||
finalizedDeposits := []*ethpb.DepositContainer{
|
||||
{
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{
|
||||
@@ -531,7 +530,7 @@ func TestNonFinalizedDeposits_ReturnsAllNonFinalizedDeposits(t *testing.T) {
|
||||
},
|
||||
}
|
||||
dc.deposits = append(finalizedDeposits,
|
||||
&dbpb.DepositContainer{
|
||||
ðpb.DepositContainer{
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
@@ -542,7 +541,7 @@ func TestNonFinalizedDeposits_ReturnsAllNonFinalizedDeposits(t *testing.T) {
|
||||
},
|
||||
Index: 2,
|
||||
},
|
||||
&dbpb.DepositContainer{
|
||||
ðpb.DepositContainer{
|
||||
Eth1BlockHeight: 11,
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
@@ -563,7 +562,7 @@ func TestNonFinalizedDeposits_ReturnsNonFinalizedDepositsUpToBlockNumber(t *test
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
finalizedDeposits := []*dbpb.DepositContainer{
|
||||
finalizedDeposits := []*ethpb.DepositContainer{
|
||||
{
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{
|
||||
@@ -588,7 +587,7 @@ func TestNonFinalizedDeposits_ReturnsNonFinalizedDepositsUpToBlockNumber(t *test
|
||||
},
|
||||
}
|
||||
dc.deposits = append(finalizedDeposits,
|
||||
&dbpb.DepositContainer{
|
||||
ðpb.DepositContainer{
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
@@ -599,7 +598,7 @@ func TestNonFinalizedDeposits_ReturnsNonFinalizedDepositsUpToBlockNumber(t *test
|
||||
},
|
||||
Index: 2,
|
||||
},
|
||||
&dbpb.DepositContainer{
|
||||
ðpb.DepositContainer{
|
||||
Eth1BlockHeight: 11,
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -24,13 +23,13 @@ var (
|
||||
// PendingDepositsFetcher specifically outlines a struct that can retrieve deposits
|
||||
// which have not yet been included in the chain.
|
||||
type PendingDepositsFetcher interface {
|
||||
PendingContainers(ctx context.Context, untilBlk *big.Int) []*dbpb.DepositContainer
|
||||
PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer
|
||||
}
|
||||
|
||||
// InsertPendingDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (dc *DepositCache) InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertPendingDeposit")
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertPendingDeposit")
|
||||
defer span.End()
|
||||
if d == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -42,7 +41,7 @@ func (dc *DepositCache) InsertPendingDeposit(ctx context.Context, d *ethpb.Depos
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
dc.pendingDeposits = append(dc.pendingDeposits,
|
||||
&dbpb.DepositContainer{Deposit: d, Eth1BlockHeight: blockNum, Index: index, DepositRoot: depositRoot[:]})
|
||||
ðpb.DepositContainer{Deposit: d, Eth1BlockHeight: blockNum, Index: index, DepositRoot: depositRoot[:]})
|
||||
pendingDepositsCount.Inc()
|
||||
span.AddAttributes(trace.Int64Attribute("count", int64(len(dc.pendingDeposits))))
|
||||
}
|
||||
@@ -66,13 +65,13 @@ func (dc *DepositCache) PendingDeposits(ctx context.Context, untilBlk *big.Int)
|
||||
|
||||
// PendingContainers returns a list of deposit containers until the given block number
|
||||
// (inclusive).
|
||||
func (dc *DepositCache) PendingContainers(ctx context.Context, untilBlk *big.Int) []*dbpb.DepositContainer {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
|
||||
func (dc *DepositCache) PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
var depositCntrs []*dbpb.DepositContainer
|
||||
var depositCntrs []*ethpb.DepositContainer
|
||||
for _, ctnr := range dc.pendingDeposits {
|
||||
if untilBlk == nil || untilBlk.Uint64() >= ctnr.Eth1BlockHeight {
|
||||
depositCntrs = append(depositCntrs, ctnr)
|
||||
@@ -91,7 +90,7 @@ func (dc *DepositCache) PendingContainers(ctx context.Context, untilBlk *big.Int
|
||||
// RemovePendingDeposit from the database. The deposit is indexed by the
|
||||
// Index. This method does nothing if deposit ptr is nil.
|
||||
func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Deposit) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.RemovePendingDeposit")
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.RemovePendingDeposit")
|
||||
defer span.End()
|
||||
|
||||
if d == nil {
|
||||
@@ -129,7 +128,7 @@ func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Depos
|
||||
|
||||
// PrunePendingDeposits removes any deposit which is older than the given deposit merkle tree index.
|
||||
func (dc *DepositCache) PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.PrunePendingDeposits")
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.PrunePendingDeposits")
|
||||
defer span.End()
|
||||
|
||||
if merkleTreeIndex == 0 {
|
||||
@@ -140,7 +139,7 @@ func (dc *DepositCache) PrunePendingDeposits(ctx context.Context, merkleTreeInde
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
var cleanDeposits []*dbpb.DepositContainer
|
||||
var cleanDeposits []*ethpb.DepositContainer
|
||||
for _, dp := range dc.pendingDeposits {
|
||||
if dp.Index >= merkleTreeIndex {
|
||||
cleanDeposits = append(cleanDeposits, dp)
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -42,7 +41,7 @@ func TestRemovePendingDeposit_OK(t *testing.T) {
|
||||
}
|
||||
depToRemove := ðpb.Deposit{Proof: proof1, Data: data}
|
||||
otherDep := ðpb.Deposit{Proof: proof2, Data: data}
|
||||
db.pendingDeposits = []*dbpb.DepositContainer{
|
||||
db.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Deposit: depToRemove, Index: 1},
|
||||
{Deposit: otherDep, Index: 5},
|
||||
}
|
||||
@@ -55,7 +54,7 @@ func TestRemovePendingDeposit_OK(t *testing.T) {
|
||||
|
||||
func TestRemovePendingDeposit_IgnoresNilDeposit(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dc.pendingDeposits = []*dbpb.DepositContainer{{Deposit: ðpb.Deposit{}}}
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{{Deposit: ðpb.Deposit{}}}
|
||||
dc.RemovePendingDeposit(context.Background(), nil /*deposit*/)
|
||||
assert.Equal(t, 1, len(dc.pendingDeposits), "Deposit unexpectedly removed")
|
||||
}
|
||||
@@ -79,7 +78,7 @@ func TestPendingDeposit_RoundTrip(t *testing.T) {
|
||||
func TestPendingDeposits_OK(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.pendingDeposits = []*dbpb.DepositContainer{
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("A")}}},
|
||||
{Eth1BlockHeight: 4, Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("B")}}},
|
||||
{Eth1BlockHeight: 6, Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("c")}}},
|
||||
@@ -99,7 +98,7 @@ func TestPendingDeposits_OK(t *testing.T) {
|
||||
func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.pendingDeposits = []*dbpb.DepositContainer{
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
@@ -109,7 +108,7 @@ func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 0)
|
||||
expected := []*dbpb.DepositContainer{
|
||||
expected := []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
@@ -123,7 +122,7 @@ func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
|
||||
func TestPrunePendingDeposits_OK(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.pendingDeposits = []*dbpb.DepositContainer{
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
@@ -133,7 +132,7 @@ func TestPrunePendingDeposits_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 6)
|
||||
expected := []*dbpb.DepositContainer{
|
||||
expected := []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
@@ -142,7 +141,7 @@ func TestPrunePendingDeposits_OK(t *testing.T) {
|
||||
|
||||
assert.DeepEqual(t, expected, dc.pendingDeposits)
|
||||
|
||||
dc.pendingDeposits = []*dbpb.DepositContainer{
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
@@ -152,7 +151,7 @@ func TestPrunePendingDeposits_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 10)
|
||||
expected = []*dbpb.DepositContainer{
|
||||
expected = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
3
beacon-chain/cache/proposer_indices.go
vendored
3
beacon-chain/cache/proposer_indices.go
vendored
@@ -1,4 +1,5 @@
|
||||
// +build !libfuzzer
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
// +build libfuzzer
|
||||
//go:build fuzz
|
||||
// +build fuzz
|
||||
|
||||
// This file is used in fuzzer builds to bypass proposer indices caches.
|
||||
package cache
|
||||
|
||||
2
beacon-chain/cache/subnet_ids.go
vendored
2
beacon-chain/cache/subnet_ids.go
vendored
@@ -27,7 +27,7 @@ var SubnetIDs = newSubnetIDs()
|
||||
func newSubnetIDs() *subnetIDs {
|
||||
// Given a node can calculate committee assignments of current epoch and next epoch.
|
||||
// Max size is set to 2 epoch length.
|
||||
cacheSize := int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxCommitteesPerSlot * 2))
|
||||
cacheSize := int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxCommitteesPerSlot * 2)) // lint:ignore uintcast -- constant values that would panic on startup if negative.
|
||||
attesterCache := lruwrpr.New(cacheSize)
|
||||
aggregatorCache := lruwrpr.New(cacheSize)
|
||||
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
|
||||
5
beacon-chain/cache/subnet_ids_test.go
vendored
5
beacon-chain/cache/subnet_ids_test.go
vendored
@@ -4,6 +4,7 @@ import (
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
@@ -46,12 +47,12 @@ func TestSubnetIDsCache_PersistentCommitteeRoundtrip(t *testing.T) {
|
||||
c := newSubnetIDs()
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
pubkey := [48]byte{byte(i)}
|
||||
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
|
||||
c.AddPersistentCommittee(pubkey[:], []uint64{uint64(i)}, 0)
|
||||
}
|
||||
|
||||
for i := uint64(0); i < 20; i++ {
|
||||
pubkey := [48]byte{byte(i)}
|
||||
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
|
||||
|
||||
idxs, ok, _ := c.GetPersistentSubnets(pubkey[:])
|
||||
if !ok {
|
||||
|
||||
3
beacon-chain/cache/sync_committee.go
vendored
3
beacon-chain/cache/sync_committee.go
vendored
@@ -1,4 +1,5 @@
|
||||
// +build !libfuzzer
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
// +build libfuzzer
|
||||
//go:build fuzz
|
||||
// +build fuzz
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
14
beacon-chain/cache/sync_committee_head_state.go
vendored
14
beacon-chain/cache/sync_committee_head_state.go
vendored
@@ -6,8 +6,8 @@ import (
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
lruwrpr "github.com/prysmaticlabs/prysm/cache/lru"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
)
|
||||
|
||||
// SyncCommitteeHeadStateCache for the latest head state requested by a sync committee participant.
|
||||
@@ -31,10 +31,11 @@ func (c *SyncCommitteeHeadStateCache) Put(slot types.Slot, st state.BeaconState)
|
||||
if st == nil || st.IsNil() {
|
||||
return ErrNilValueProvided
|
||||
}
|
||||
_, ok := st.(*stateAltair.BeaconState)
|
||||
if !ok {
|
||||
|
||||
if st.Version() == version.Phase0 {
|
||||
return ErrIncorrectType
|
||||
}
|
||||
|
||||
c.cache.Add(slot, st)
|
||||
return nil
|
||||
}
|
||||
@@ -47,9 +48,14 @@ func (c *SyncCommitteeHeadStateCache) Get(slot types.Slot) (state.BeaconState, e
|
||||
if !exists {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
st, ok := val.(*stateAltair.BeaconState)
|
||||
st, ok := val.(state.BeaconState)
|
||||
if !ok {
|
||||
return nil, ErrIncorrectType
|
||||
}
|
||||
switch st.Version() {
|
||||
case version.Altair, version.Bellatrix:
|
||||
default:
|
||||
return nil, ErrIncorrectType
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
v3 "github.com/prysmaticlabs/prysm/beacon-chain/state/v3"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
@@ -27,6 +28,13 @@ func TestSyncCommitteeHeadState(t *testing.T) {
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
bellatrixState, err := v3.InitializeFromProto(ðpb.BeaconStateBellatrix{
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
type put struct {
|
||||
slot types.Slot
|
||||
state state.BeaconState
|
||||
@@ -82,6 +90,24 @@ func TestSyncCommitteeHeadState(t *testing.T) {
|
||||
},
|
||||
want: beaconState,
|
||||
},
|
||||
{
|
||||
name: "not found when non-existent key in non-empty cache (bellatrix state)",
|
||||
key: types.Slot(2),
|
||||
put: &put{
|
||||
slot: types.Slot(1),
|
||||
state: bellatrixState,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "found with key (bellatrix state)",
|
||||
key: types.Slot(100),
|
||||
put: &put{
|
||||
slot: types.Slot(100),
|
||||
state: bellatrixState,
|
||||
},
|
||||
want: bellatrixState,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
50
beacon-chain/cache/sync_committee_test.go
vendored
50
beacon-chain/cache/sync_committee_test.go
vendored
@@ -5,8 +5,6 @@ import (
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
@@ -28,10 +26,10 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "only current epoch",
|
||||
currentSyncCommittee: convertToCommittee([][]byte{
|
||||
currentSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[1], pubKeys[2], pubKeys[3], pubKeys[2], pubKeys[2],
|
||||
}),
|
||||
nextSyncCommittee: convertToCommittee([][]byte{}),
|
||||
nextSyncCommittee: util.ConvertToCommittee([][]byte{}),
|
||||
currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
|
||||
1: {0},
|
||||
2: {1, 3, 4},
|
||||
@@ -45,8 +43,8 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "only next epoch",
|
||||
currentSyncCommittee: convertToCommittee([][]byte{}),
|
||||
nextSyncCommittee: convertToCommittee([][]byte{
|
||||
currentSyncCommittee: util.ConvertToCommittee([][]byte{}),
|
||||
nextSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[1], pubKeys[2], pubKeys[3], pubKeys[2], pubKeys[2],
|
||||
}),
|
||||
currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
|
||||
@@ -62,14 +60,14 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "some current epoch and some next epoch",
|
||||
currentSyncCommittee: convertToCommittee([][]byte{
|
||||
currentSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[1],
|
||||
pubKeys[2],
|
||||
pubKeys[3],
|
||||
pubKeys[2],
|
||||
pubKeys[2],
|
||||
}),
|
||||
nextSyncCommittee: convertToCommittee([][]byte{
|
||||
nextSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[7],
|
||||
pubKeys[6],
|
||||
pubKeys[5],
|
||||
@@ -90,14 +88,14 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "some current epoch and some next epoch duplicated across",
|
||||
currentSyncCommittee: convertToCommittee([][]byte{
|
||||
currentSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[1],
|
||||
pubKeys[2],
|
||||
pubKeys[3],
|
||||
pubKeys[2],
|
||||
pubKeys[2],
|
||||
}),
|
||||
nextSyncCommittee: convertToCommittee([][]byte{
|
||||
nextSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[2],
|
||||
pubKeys[1],
|
||||
pubKeys[3],
|
||||
@@ -117,13 +115,13 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "all duplicated",
|
||||
currentSyncCommittee: convertToCommittee([][]byte{
|
||||
currentSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
}),
|
||||
nextSyncCommittee: convertToCommittee([][]byte{
|
||||
nextSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
@@ -138,13 +136,13 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "unknown keys",
|
||||
currentSyncCommittee: convertToCommittee([][]byte{
|
||||
currentSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
}),
|
||||
nextSyncCommittee: convertToCommittee([][]byte{
|
||||
nextSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
@@ -189,13 +187,13 @@ func TestSyncCommitteeCache_RootDoesNotExist(t *testing.T) {
|
||||
func TestSyncCommitteeCache_CanRotate(t *testing.T) {
|
||||
c := cache.NewSyncCommittee()
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, 64)
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{1}})))
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(util.ConvertToCommittee([][]byte{{1}})))
|
||||
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'a'}, s))
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{2}})))
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(util.ConvertToCommittee([][]byte{{2}})))
|
||||
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'b'}, s))
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{3}})))
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(util.ConvertToCommittee([][]byte{{3}})))
|
||||
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'c'}, s))
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{4}})))
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(util.ConvertToCommittee([][]byte{{4}})))
|
||||
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'d'}, s))
|
||||
|
||||
_, err := c.CurrentPeriodIndexPosition([32]byte{'a'}, 0)
|
||||
@@ -204,19 +202,3 @@ func TestSyncCommitteeCache_CanRotate(t *testing.T) {
|
||||
_, err = c.CurrentPeriodIndexPosition([32]byte{'c'}, 0)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func convertToCommittee(inputKeys [][]byte) *ethpb.SyncCommittee {
|
||||
var pubKeys [][]byte
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSize; i++ {
|
||||
if i < uint64(len(inputKeys)) {
|
||||
pubKeys = append(pubKeys, bytesutil.PadTo(inputKeys[i], params.BeaconConfig().BLSPubkeyLength))
|
||||
} else {
|
||||
pubKeys = append(pubKeys, bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength))
|
||||
}
|
||||
}
|
||||
|
||||
return ðpb.SyncCommittee{
|
||||
Pubkeys: pubKeys,
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
}
|
||||
|
||||
9
beacon-chain/cache/sync_subnet_ids_test.go
vendored
9
beacon-chain/cache/sync_subnet_ids_test.go
vendored
@@ -3,6 +3,7 @@ package cache
|
||||
import (
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
@@ -12,12 +13,12 @@ func TestSyncSubnetIDsCache_Roundtrip(t *testing.T) {
|
||||
c := newSyncSubnetIDs()
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
pubkey := [48]byte{byte(i)}
|
||||
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
|
||||
c.AddSyncCommitteeSubnets(pubkey[:], 100, []uint64{uint64(i)}, 0)
|
||||
}
|
||||
|
||||
for i := uint64(0); i < 20; i++ {
|
||||
pubkey := [48]byte{byte(i)}
|
||||
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
|
||||
|
||||
idxs, _, ok, _ := c.GetSyncCommitteeSubnets(pubkey[:], 100)
|
||||
if !ok {
|
||||
@@ -34,7 +35,7 @@ func TestSyncSubnetIDsCache_ValidateCurrentEpoch(t *testing.T) {
|
||||
c := newSyncSubnetIDs()
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
pubkey := [48]byte{byte(i)}
|
||||
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
|
||||
c.AddSyncCommitteeSubnets(pubkey[:], 100, []uint64{uint64(i)}, 0)
|
||||
}
|
||||
|
||||
@@ -42,7 +43,7 @@ func TestSyncSubnetIDsCache_ValidateCurrentEpoch(t *testing.T) {
|
||||
assert.Equal(t, 0, len(coms))
|
||||
|
||||
for i := uint64(0); i < 20; i++ {
|
||||
pubkey := [48]byte{byte(i)}
|
||||
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
|
||||
|
||||
_, jEpoch, ok, _ := c.GetSyncCommitteeSubnets(pubkey[:], 100)
|
||||
if !ok {
|
||||
|
||||
@@ -30,6 +30,7 @@ go_library(
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v2:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
@@ -38,6 +39,7 @@ go_library(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/block:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
@@ -70,6 +72,8 @@ go_test(
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v2:go_default_library",
|
||||
"//beacon-chain/state/v3:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
|
||||
@@ -104,34 +104,30 @@ func SetParticipationAndRewardProposer(
|
||||
targetEpoch types.Epoch,
|
||||
indices []uint64,
|
||||
participatedFlags map[uint8]bool, totalBalance uint64) (state.BeaconState, error) {
|
||||
var epochParticipation []byte
|
||||
var proposerRewardNumerator uint64
|
||||
currentEpoch := time.CurrentEpoch(beaconState)
|
||||
var err error
|
||||
var stateErr error
|
||||
if targetEpoch == currentEpoch {
|
||||
epochParticipation, err = beaconState.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stateErr = beaconState.ModifyCurrentParticipationBits(func(val []byte) ([]byte, error) {
|
||||
propRewardNum, epochParticipation, err := EpochParticipation(beaconState, indices, val, participatedFlags, totalBalance)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposerRewardNumerator = propRewardNum
|
||||
return epochParticipation, nil
|
||||
})
|
||||
} else {
|
||||
epochParticipation, err = beaconState.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stateErr = beaconState.ModifyPreviousParticipationBits(func(val []byte) ([]byte, error) {
|
||||
propRewardNum, epochParticipation, err := EpochParticipation(beaconState, indices, val, participatedFlags, totalBalance)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposerRewardNumerator = propRewardNum
|
||||
return epochParticipation, nil
|
||||
})
|
||||
}
|
||||
|
||||
proposerRewardNumerator, epochParticipation, err := EpochParticipation(beaconState, indices, epochParticipation, participatedFlags, totalBalance)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if targetEpoch == currentEpoch {
|
||||
if err := beaconState.SetCurrentParticipationBits(epochParticipation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if err := beaconState.SetPreviousParticipationBits(epochParticipation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if stateErr != nil {
|
||||
return nil, stateErr
|
||||
}
|
||||
|
||||
if err := RewardProposer(ctx, beaconState, proposerRewardNumerator); err != nil {
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/math"
|
||||
@@ -28,7 +29,7 @@ func TestProcessAttestations_InclusionDelayFailure(t *testing.T) {
|
||||
attestations := []*ethpb.Attestation{
|
||||
util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, fieldparams.RootLength)},
|
||||
Slot: 5,
|
||||
},
|
||||
}),
|
||||
@@ -47,7 +48,7 @@ func TestProcessAttestations_InclusionDelayFailure(t *testing.T) {
|
||||
params.BeaconConfig().MinAttestationInclusionDelay,
|
||||
beaconState.Slot(),
|
||||
)
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
|
||||
require.ErrorContains(t, want, err)
|
||||
@@ -78,7 +79,7 @@ func TestProcessAttestations_NeitherCurrentNorPrevEpoch(t *testing.T) {
|
||||
time.PrevEpoch(beaconState),
|
||||
time.CurrentEpoch(beaconState),
|
||||
)
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
|
||||
require.ErrorContains(t, want, err)
|
||||
@@ -88,8 +89,8 @@ func TestProcessAttestations_CurrentEpochFFGDataMismatches(t *testing.T) {
|
||||
attestations := []*ethpb.Attestation{
|
||||
{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
Source: ðpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, fieldparams.RootLength)},
|
||||
Source: ðpb.Checkpoint{Epoch: 1, Root: make([]byte, fieldparams.RootLength)},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0x09},
|
||||
},
|
||||
@@ -107,13 +108,13 @@ func TestProcessAttestations_CurrentEpochFFGDataMismatches(t *testing.T) {
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
|
||||
|
||||
want := "source check point not equal to current justified checkpoint"
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
|
||||
require.ErrorContains(t, want, err)
|
||||
b.Block.Body.Attestations[0].Data.Source.Epoch = time.CurrentEpoch(beaconState)
|
||||
b.Block.Body.Attestations[0].Data.Source.Root = []byte{}
|
||||
wsb, err = wrapper.WrappedAltairSignedBeaconBlock(b)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
|
||||
require.ErrorContains(t, want, err)
|
||||
@@ -127,8 +128,8 @@ func TestProcessAttestations_PrevEpochFFGDataMismatches(t *testing.T) {
|
||||
attestations := []*ethpb.Attestation{
|
||||
{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
|
||||
Source: ðpb.Checkpoint{Epoch: 1, Root: make([]byte, fieldparams.RootLength)},
|
||||
Target: ðpb.Checkpoint{Epoch: 1, Root: make([]byte, fieldparams.RootLength)},
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
@@ -148,14 +149,14 @@ func TestProcessAttestations_PrevEpochFFGDataMismatches(t *testing.T) {
|
||||
require.NoError(t, beaconState.SetPreviousJustifiedCheckpoint(pfc))
|
||||
|
||||
want := "source check point not equal to previous justified checkpoint"
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
|
||||
require.ErrorContains(t, want, err)
|
||||
b.Block.Body.Attestations[0].Data.Source.Epoch = time.PrevEpoch(beaconState)
|
||||
b.Block.Body.Attestations[0].Data.Target.Epoch = time.PrevEpoch(beaconState)
|
||||
b.Block.Body.Attestations[0].Data.Source.Root = []byte{}
|
||||
wsb, err = wrapper.WrappedAltairSignedBeaconBlock(b)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
|
||||
require.ErrorContains(t, want, err)
|
||||
@@ -187,7 +188,7 @@ func TestProcessAttestations_InvalidAggregationBitsLength(t *testing.T) {
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
|
||||
|
||||
expected := "failed to verify aggregation bitfield: wanted participants bitfield length 3, got: 4"
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
|
||||
require.ErrorContains(t, expected, err)
|
||||
@@ -231,7 +232,7 @@ func TestProcessAttestations_OK(t *testing.T) {
|
||||
|
||||
err = beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -250,8 +251,8 @@ func TestProcessAttestationNoVerify_SourceTargetHead(t *testing.T) {
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
BeaconBlockRoot: r,
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, fieldparams.RootLength)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, fieldparams.RootLength)},
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
}
|
||||
@@ -259,7 +260,7 @@ func TestProcessAttestationNoVerify_SourceTargetHead(t *testing.T) {
|
||||
att.Signature = zeroSig[:]
|
||||
|
||||
ckp := beaconState.CurrentJustifiedCheckpoint()
|
||||
copy(ckp.Root, make([]byte, 32))
|
||||
copy(ckp.Root, make([]byte, fieldparams.RootLength))
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(ckp))
|
||||
|
||||
b, err := helpers.TotalActiveBalance(beaconState)
|
||||
@@ -420,7 +421,7 @@ func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
|
||||
}
|
||||
s, err := stateAltair.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
r, err := altair.ProcessAttestationsNoVerifySignature(context.Background(), s, wsb)
|
||||
if err != nil && r != nil {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user