mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 06:18:05 -05:00
Compare commits
316 Commits
replaceWit
...
eip4844-ch
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
df94859f1d | ||
|
|
80e9042c6b | ||
|
|
6b7e8ac00e | ||
|
|
ca770f12ee | ||
|
|
74bebd9244 | ||
|
|
447b42044a | ||
|
|
6ec8d23d4f | ||
|
|
a41d80a03d | ||
|
|
849c1dd25b | ||
|
|
d430266b70 | ||
|
|
6d52516638 | ||
|
|
2ff3a82eac | ||
|
|
d403b68d76 | ||
|
|
b0768e39c3 | ||
|
|
cae57ee9f6 | ||
|
|
3104fd217d | ||
|
|
d1b9954566 | ||
|
|
033b165c08 | ||
|
|
78175ee0fd | ||
|
|
a34cc7f7bf | ||
|
|
6f22cd7963 | ||
|
|
58967e4516 | ||
|
|
947c9fbe60 | ||
|
|
8cc1e67e6c | ||
|
|
2f3deac8b0 | ||
|
|
aa44ba40ab | ||
|
|
8b268d646c | ||
|
|
d6ecadb471 | ||
|
|
9a8bde448e | ||
|
|
ce71b3b6b1 | ||
|
|
5fa30bf73a | ||
|
|
b93aba6126 | ||
|
|
51ed80df69 | ||
|
|
a614c4ac8c | ||
|
|
7b777a10a5 | ||
|
|
bf1ab9951f | ||
|
|
5cea2b3855 | ||
|
|
ab63757fe5 | ||
|
|
736ed1e003 | ||
|
|
6ee9707fd7 | ||
|
|
e40835b1a9 | ||
|
|
216a420bbc | ||
|
|
4845abecb8 | ||
|
|
08a910e44f | ||
|
|
67ba2c4fe3 | ||
|
|
7efa501bdc | ||
|
|
879a694ab3 | ||
|
|
4e3b1881ed | ||
|
|
35d3707de7 | ||
|
|
1f468bd3f5 | ||
|
|
ac32098c86 | ||
|
|
e0af005c42 | ||
|
|
f08af1bdbf | ||
|
|
6d0420fde5 | ||
|
|
5172e6e362 | ||
|
|
42df0f70b6 | ||
|
|
dace0f6a2d | ||
|
|
26a5878181 | ||
|
|
db6474a3e4 | ||
|
|
b84851fd0d | ||
|
|
673702c100 | ||
|
|
520eb6baca | ||
|
|
e6047dc344 | ||
|
|
d86a452b15 | ||
|
|
67f9d0b9c4 | ||
|
|
21cd055b84 | ||
|
|
9f3bb623ec | ||
|
|
b10a95097e | ||
|
|
4561f5cacb | ||
|
|
50b672a4db | ||
|
|
ffbb73a59b | ||
|
|
649974f14d | ||
|
|
9ec0bc0734 | ||
|
|
9649e49658 | ||
|
|
49fdcb7347 | ||
|
|
cd6ee956ed | ||
|
|
ef95fd33f8 | ||
|
|
1a488241b0 | ||
|
|
5fdd3a3d66 | ||
|
|
b6a32c050f | ||
|
|
055e225093 | ||
|
|
144218cb1b | ||
|
|
13b575a609 | ||
|
|
b5a414eae9 | ||
|
|
b94b347ace | ||
|
|
f5ee225819 | ||
|
|
9cb48be14f | ||
|
|
85fa9951eb | ||
|
|
ec72575fc9 | ||
|
|
d9d1bb6d3d | ||
|
|
ffcdc26618 | ||
|
|
96981a07b9 | ||
|
|
6b2721b239 | ||
|
|
c79151a574 | ||
|
|
4b20234801 | ||
|
|
911048aa6d | ||
|
|
255e9693ee | ||
|
|
61c1216e3d | ||
|
|
17e1eaf0f3 | ||
|
|
9940943595 | ||
|
|
9a0f941870 | ||
|
|
5d0f54d332 | ||
|
|
d602c94b7b | ||
|
|
6a5ecbd68f | ||
|
|
29dfcab505 | ||
|
|
16e5c903cc | ||
|
|
66682cb4e5 | ||
|
|
52faea8b7d | ||
|
|
8a78315682 | ||
|
|
cab42a4ae3 | ||
|
|
a5bdd42bdd | ||
|
|
a26197f919 | ||
|
|
8b9cab457e | ||
|
|
080ce31395 | ||
|
|
7866e8a196 | ||
|
|
d5d17e00b3 | ||
|
|
9c6a1331cf | ||
|
|
d89c97634c | ||
|
|
7e95ca3705 | ||
|
|
abd46b01b7 | ||
|
|
8629ac8417 | ||
|
|
304925aabf | ||
|
|
16d93e47a5 | ||
|
|
6dcb2bbf0d | ||
|
|
deb138959a | ||
|
|
45e6f3bd00 | ||
|
|
55a9e0d51a | ||
|
|
3ddae600fb | ||
|
|
861ede8945 | ||
|
|
93f11f9047 | ||
|
|
56503110dd | ||
|
|
f67d35dffd | ||
|
|
efbca1b5b7 | ||
|
|
2de0ebaf8d | ||
|
|
0815ef94a3 | ||
|
|
092ffa99e5 | ||
|
|
b05b67b264 | ||
|
|
a5c6518c20 | ||
|
|
da048395ce | ||
|
|
f31f7be310 | ||
|
|
e1a2267f86 | ||
|
|
3c9e4ee7f7 | ||
|
|
9ba32c9acd | ||
|
|
d23008452e | ||
|
|
f397cba1e0 | ||
|
|
3eecbb5b1a | ||
|
|
1583e93b48 | ||
|
|
849457df81 | ||
|
|
903cab75ee | ||
|
|
ee108d4aff | ||
|
|
49bcc58762 | ||
|
|
a08baf4a14 | ||
|
|
8c56dfdd46 | ||
|
|
dcdd9af9db | ||
|
|
a464cf5c60 | ||
|
|
cc55c754dc | ||
|
|
2d483ab09f | ||
|
|
d64e10a337 | ||
|
|
1e9ee10674 | ||
|
|
3ac395b39e | ||
|
|
6e26a6f128 | ||
|
|
b512b92a8a | ||
|
|
5ff601a1b9 | ||
|
|
5823054519 | ||
|
|
3d196662bc | ||
|
|
b0601580ef | ||
|
|
c1f29ea651 | ||
|
|
881d1d435a | ||
|
|
d1aae0c941 | ||
|
|
468cc23876 | ||
|
|
d9646a9183 | ||
|
|
279cee42f1 | ||
|
|
57bdb907cc | ||
|
|
15d683c78f | ||
|
|
bf6c8ced7d | ||
|
|
78fb685027 | ||
|
|
a87536eba0 | ||
|
|
3f05395a00 | ||
|
|
85fc57d41e | ||
|
|
1e5976d5ce | ||
|
|
98c0b23350 | ||
|
|
039a0fffba | ||
|
|
90ec640e7a | ||
|
|
10acd31d25 | ||
|
|
4224014fad | ||
|
|
df1e8b33d8 | ||
|
|
cdb4ee42cc | ||
|
|
d29baec77e | ||
|
|
53c189da9b | ||
|
|
277fbce61b | ||
|
|
0adc54b7ff | ||
|
|
1cbd7e9888 | ||
|
|
0a9e1658dd | ||
|
|
31d4a4cd11 | ||
|
|
fbc4e73d31 | ||
|
|
c1d4eaa79d | ||
|
|
760af6428e | ||
|
|
dfa0ccf626 | ||
|
|
7a142cf324 | ||
|
|
1a51fdbd58 | ||
|
|
368a99ec8d | ||
|
|
1c7e734918 | ||
|
|
764d1325bf | ||
|
|
0cf30e9022 | ||
|
|
227b20f368 | ||
|
|
d7d70bc25b | ||
|
|
82f6ddb693 | ||
|
|
9e4e82d2c5 | ||
|
|
9838369fe9 | ||
|
|
6085ad1bfa | ||
|
|
d3851b27df | ||
|
|
d6100dfdcb | ||
|
|
c2144dac86 | ||
|
|
a47ff569a8 | ||
|
|
f8be022ef2 | ||
|
|
4f39e6b685 | ||
|
|
c67b000633 | ||
|
|
02f7443586 | ||
|
|
6275e7df4e | ||
|
|
1b6b52fda1 | ||
|
|
5fa1fd84b9 | ||
|
|
bd0c9f9e8d | ||
|
|
2532bb370c | ||
|
|
12efc6c2c1 | ||
|
|
a6cc9ac9c5 | ||
|
|
031f5845a2 | ||
|
|
b88559726c | ||
|
|
ca6ddf4490 | ||
|
|
3ebb2fce94 | ||
|
|
62f6b07cba | ||
|
|
f956f1ed6e | ||
|
|
1c0fa95053 | ||
|
|
04bf4a1060 | ||
|
|
ae276fd371 | ||
|
|
104bdaed12 | ||
|
|
089a5d6ac2 | ||
|
|
16b0820193 | ||
|
|
4b02267e96 | ||
|
|
746584c453 | ||
|
|
b56daaaca2 | ||
|
|
b7a6fe88ee | ||
|
|
22d1c37b92 | ||
|
|
78a393f825 | ||
|
|
ac8290c1bf | ||
|
|
5d0662b415 | ||
|
|
931e5e10c3 | ||
|
|
c172f838b1 | ||
|
|
c07ae29cd9 | ||
|
|
214c9bfd8b | ||
|
|
716140d64d | ||
|
|
088cb4ef59 | ||
|
|
fa33e93a8e | ||
|
|
d1472fc351 | ||
|
|
5c8c0c31d8 | ||
|
|
7f3c00c7a2 | ||
|
|
c180dab791 | ||
|
|
f24acc21c7 | ||
|
|
40b637849d | ||
|
|
e7db1685df | ||
|
|
eccbfd1011 | ||
|
|
90211f6769 | ||
|
|
edc32ac18e | ||
|
|
fe68e020e3 | ||
|
|
81e1e3544d | ||
|
|
09372d5c35 | ||
|
|
078a89e4ca | ||
|
|
dbc6ae26a6 | ||
|
|
b6f429867a | ||
|
|
09f50660ce | ||
|
|
189825b495 | ||
|
|
441cad58d4 | ||
|
|
1277d08f9e | ||
|
|
e03de47db7 | ||
|
|
764b7ff610 | ||
|
|
307be7694e | ||
|
|
c76ae1ef39 | ||
|
|
d499db7f0e | ||
|
|
a894b9f29a | ||
|
|
902e6b3905 | ||
|
|
ed2d1c7bf9 | ||
|
|
14b73cbd47 | ||
|
|
a39c7aa864 | ||
|
|
170bc9c8ec | ||
|
|
365c01fc29 | ||
|
|
3124785a08 | ||
|
|
60e6306107 | ||
|
|
42ccb7830a | ||
|
|
0bb03b9292 | ||
|
|
ed6fbf1480 | ||
|
|
477cec6021 | ||
|
|
924500d111 | ||
|
|
0677504ef1 | ||
|
|
ca2a7c4d9c | ||
|
|
28606629ad | ||
|
|
c817279464 | ||
|
|
009d6ed8ed | ||
|
|
5cec1282a9 | ||
|
|
340170fd29 | ||
|
|
7ed0cc139a | ||
|
|
2c822213eb | ||
|
|
0894b9591c | ||
|
|
f0ca45f9a2 | ||
|
|
afc48c6485 | ||
|
|
93dce8a0cb | ||
|
|
149ccdaf39 | ||
|
|
c08bb39ffe | ||
|
|
5083d8ab34 | ||
|
|
7552a5dd07 | ||
|
|
c93d68f853 | ||
|
|
2b74db2dce | ||
|
|
cc6c91415d | ||
|
|
6d7d7e0adc | ||
|
|
2105d777f0 | ||
|
|
14338afbdb | ||
|
|
3e8aa4023d | ||
|
|
b443875e66 |
@@ -43,12 +43,4 @@ build --flaky_test_attempts=5
|
||||
|
||||
# Better caching
|
||||
build:nostamp --nostamp
|
||||
|
||||
# Build metadata
|
||||
build --build_metadata=ROLE=CI
|
||||
build --build_metadata=REPO_URL=https://github.com/prysmaticlabs/prysm.git
|
||||
build --workspace_status_command=./hack/workspace_status_ci.sh
|
||||
|
||||
# Buildbuddy
|
||||
build --bes_results_url=https://app.buildbuddy.io/invocation/
|
||||
build --bes_backend=grpcs://remote.buildbuddy.io
|
||||
build:nostamp --workspace_status_command=./hack/workspace_status_ci.sh
|
||||
|
||||
14
.github/workflows/go.yml
vendored
14
.github/workflows/go.yml
vendored
@@ -26,14 +26,14 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Go 1.20
|
||||
- name: Set up Go 1.19
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: 1.19
|
||||
- name: Run Gosec Security Scanner
|
||||
run: | # https://github.com/securego/gosec/issues/469
|
||||
export PATH=$PATH:$(go env GOPATH)/bin
|
||||
go install github.com/securego/gosec/v2/cmd/gosec@v2.15.0
|
||||
go install github.com/securego/gosec/v2/cmd/gosec@v2.12.0
|
||||
gosec -exclude=G307 -exclude-dir=crypto/bls/herumi ./...
|
||||
|
||||
lint:
|
||||
@@ -43,16 +43,16 @@ jobs:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Go 1.20
|
||||
- name: Set up Go 1.19
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: 1.19
|
||||
id: go
|
||||
|
||||
- name: Golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: v1.52.2
|
||||
version: v1.50.1
|
||||
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
|
||||
|
||||
build:
|
||||
@@ -62,7 +62,7 @@ jobs:
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: 1.19
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
|
||||
@@ -17,15 +17,11 @@ linters:
|
||||
- errcheck
|
||||
- gosimple
|
||||
- gocognit
|
||||
- dupword
|
||||
- nilerr
|
||||
- whitespace
|
||||
- misspell
|
||||
|
||||
linters-settings:
|
||||
gocognit:
|
||||
# TODO: We should target for < 50
|
||||
min-complexity: 65
|
||||
min-complexity: 69
|
||||
|
||||
output:
|
||||
print-issued-lines: true
|
||||
|
||||
@@ -2,16 +2,16 @@
|
||||
|
||||
[](https://buildkite.com/prysmatic-labs/prysm)
|
||||
[](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.3.0)
|
||||
[](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.2/src/engine)
|
||||
[](https://discord.gg/prysmaticlabs)
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.2.0)
|
||||
[](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.1/src/engine)
|
||||
[](https://discord.gg/CTYGPUJ)
|
||||
[](https://www.gitpoap.io/gh/prysmaticlabs/prysm)
|
||||
|
||||
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/eth2/) specification, developed by [Prysmatic Labs](https://prysmaticlabs.com). See the [Changelog](https://github.com/prysmaticlabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
|
||||
|
||||
### Getting Started
|
||||
|
||||
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the [official documentation portal](https://docs.prylabs.network). If you still have questions, feel free to stop by our [Discord](https://discord.gg/prysmaticlabs).
|
||||
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the [official documentation portal](https://docs.prylabs.network). If you still have questions, feel free to stop by our [Discord](https://discord.gg/CTYGPUJ).
|
||||
|
||||
### Staking on Mainnet
|
||||
|
||||
|
||||
18
WORKSPACE
18
WORKSPACE
@@ -86,10 +86,10 @@ http_archive(
|
||||
# Expose internals of go_test for custom build transitions.
|
||||
"//third_party:io_bazel_rules_go_test.patch",
|
||||
],
|
||||
sha256 = "6b65cb7917b4d1709f9410ffe00ecf3e160edf674b78c54a894471320862184f",
|
||||
sha256 = "dd926a88a564a9246713a9c00b35315f54cbd46b31a26d5d8fb264c07045f05d",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.39.0/rules_go-v0.39.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.39.0/rules_go-v0.39.0.zip",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.38.1/rules_go-v0.38.1.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.38.1/rules_go-v0.38.1.zip",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -164,7 +164,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.20.3",
|
||||
go_version = "1.19.7",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
@@ -205,7 +205,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.3.0"
|
||||
consensus_spec_version = "v1.3.0-rc.5"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -221,7 +221,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "1c806e04ac5e3779032c06a6009350b3836b6809bb23812993d6ececd7047cf5",
|
||||
sha256 = "266006512e71e62396e8f31be01639560c9d59a93c38220fd8f51fabefc8f5f3",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -237,7 +237,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "2b42796dc5ccd9f1246032d0c17663e20f70334ff7e00325f0fc3af28cb24186",
|
||||
sha256 = "2ebf483830165909cb7961562fd369dedf079997a4832cc215a543898a73aa46",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -253,7 +253,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "231e3371e81ce9acde65d2910ec4580587e74dbbcfcbd9c675e473e022deec8a",
|
||||
sha256 = "333718ba5c907e0a99580caa8d28dd710543b3b271e4251581006d0e101fbce9",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -268,7 +268,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "219b74d95664ea7e8dfbf31162dfa206b9c0cf45919ea86db5fa0f8902977e3c",
|
||||
sha256 = "78b6925b5a4208e32385fa4387d2c27b381a8ddd18d66d5a7787e7846b86bfc8",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -126,7 +126,7 @@ type WeakSubjectivityData struct {
|
||||
}
|
||||
|
||||
// CheckpointString returns the standard string representation of a Checkpoint.
|
||||
// The format is a hex-encoded block root, followed by the epoch of the block, separated by a colon. For example:
|
||||
// The format is a a hex-encoded block root, followed by the epoch of the block, separated by a colon. For example:
|
||||
// "0x1c35540cac127315fabb6bf29181f2ae0de1a3fc909d2e76ba771e61312cc49a:74888"
|
||||
func (wsd *WeakSubjectivityData) CheckpointString() string {
|
||||
return fmt.Sprintf("%#x:%d", wsd.BlockRoot, wsd.Epoch)
|
||||
|
||||
@@ -483,7 +483,7 @@ func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, e
|
||||
version := bytesutil.ToBytes4(vSlice)
|
||||
ofs = append(ofs, forks.ForkScheduleEntry{
|
||||
Version: version,
|
||||
Epoch: primitives.Epoch(uint64(epoch)),
|
||||
Epoch: primitives.Epoch(epoch),
|
||||
})
|
||||
}
|
||||
sort.Sort(ofs)
|
||||
|
||||
@@ -11,12 +11,10 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/builder",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//consensus-types:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//network/authorization:go_default_library",
|
||||
|
||||
@@ -3,13 +3,10 @@ package builder
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/math"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
)
|
||||
@@ -41,7 +38,7 @@ type signedBuilderBid struct {
|
||||
func WrappedSignedBuilderBid(p *ethpb.SignedBuilderBid) (SignedBid, error) {
|
||||
w := signedBuilderBid{p: p}
|
||||
if w.IsNil() {
|
||||
return nil, consensus_types.ErrNilObjectWrapped
|
||||
return nil, blocks.ErrNilObjectWrapped
|
||||
}
|
||||
return w, nil
|
||||
}
|
||||
@@ -74,7 +71,7 @@ type signedBuilderBidCapella struct {
|
||||
func WrappedSignedBuilderBidCapella(p *ethpb.SignedBuilderBidCapella) (SignedBid, error) {
|
||||
w := signedBuilderBidCapella{p: p}
|
||||
if w.IsNil() {
|
||||
return nil, consensus_types.ErrNilObjectWrapped
|
||||
return nil, blocks.ErrNilObjectWrapped
|
||||
}
|
||||
return w, nil
|
||||
}
|
||||
@@ -107,7 +104,7 @@ type builderBid struct {
|
||||
func WrappedBuilderBid(p *ethpb.BuilderBid) (Bid, error) {
|
||||
w := builderBid{p: p}
|
||||
if w.IsNil() {
|
||||
return nil, consensus_types.ErrNilObjectWrapped
|
||||
return nil, blocks.ErrNilObjectWrapped
|
||||
}
|
||||
return w, nil
|
||||
}
|
||||
@@ -155,19 +152,16 @@ type builderBidCapella struct {
|
||||
func WrappedBuilderBidCapella(p *ethpb.BuilderBidCapella) (Bid, error) {
|
||||
w := builderBidCapella{p: p}
|
||||
if w.IsNil() {
|
||||
return nil, consensus_types.ErrNilObjectWrapped
|
||||
return nil, blocks.ErrNilObjectWrapped
|
||||
}
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Header returns the execution data interface.
|
||||
func (b builderBidCapella) Header() (interfaces.ExecutionData, error) {
|
||||
if b.p == nil {
|
||||
return nil, errors.New("builder bid is nil")
|
||||
}
|
||||
// We have to convert big endian to little endian because the value is coming from the execution layer.
|
||||
v := big.NewInt(0).SetBytes(bytesutil.ReverseByteOrder(b.p.Value))
|
||||
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, math.WeiToGwei(v))
|
||||
v := bytesutil.ReverseByteOrder(b.p.Value)
|
||||
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, big.NewInt(0).SetBytes(v))
|
||||
}
|
||||
|
||||
// Version --
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -156,7 +157,6 @@ func (c *Client) do(ctx context.Context, method string, path string, body io.Rea
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
req.Header.Add("User-Agent", version.BuildData())
|
||||
for _, o := range opts {
|
||||
o(req)
|
||||
}
|
||||
@@ -245,6 +245,7 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported header version %s", strings.ToLower(v.Version))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// RegisterValidator encodes the SignedValidatorRegistrationV1 message to json (including hex-encoding the byte
|
||||
@@ -306,9 +307,6 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
if err := json.Unmarshal(rb, ep); err != nil {
|
||||
return nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlock response")
|
||||
}
|
||||
if strings.ToLower(ep.Version) != version.String(version.Bellatrix) {
|
||||
return nil, errors.New("not a bellatrix payload")
|
||||
}
|
||||
p, err := ep.ToProto()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not extract proto message from payload")
|
||||
@@ -339,14 +337,11 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
if err := json.Unmarshal(rb, ep); err != nil {
|
||||
return nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockCapella response")
|
||||
}
|
||||
if strings.ToLower(ep.Version) != version.String(version.Capella) {
|
||||
return nil, errors.New("not a capella payload")
|
||||
}
|
||||
p, err := ep.ToProto()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not extract proto message from payload")
|
||||
}
|
||||
return blocks.WrappedExecutionPayloadCapella(p, 0)
|
||||
return blocks.WrappedExecutionPayloadCapella(p, big.NewInt(0))
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported block version %s", version.String(sb.Version()))
|
||||
}
|
||||
|
||||
@@ -313,26 +313,6 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
assert.DeepEqual(t, ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943"), withdrawals[0].Address)
|
||||
assert.Equal(t, uint64(1), withdrawals[0].Amount)
|
||||
})
|
||||
t.Run("mismatched versions, expected bellatrix got capella", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadCapella)), // send a Capella payload
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
|
||||
require.NoError(t, err)
|
||||
_, err = c.SubmitBlindedBlock(ctx, sbbb)
|
||||
require.ErrorContains(t, "not a bellatrix payload", err)
|
||||
})
|
||||
t.Run("not blinded", func(t *testing.T) {
|
||||
sbb, err := blocks.NewSignedBeaconBlock(ð.SignedBeaconBlockBellatrix{Block: ð.BeaconBlockBellatrix{Body: ð.BeaconBlockBodyBellatrix{}}})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -346,74 +346,6 @@ func (p *ExecutionPayload) ToProto() (*v1.ExecutionPayload, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// FromProto converts a proto execution payload type to our builder
|
||||
// compatible payload type.
|
||||
func FromProto(payload *v1.ExecutionPayload) (ExecutionPayload, error) {
|
||||
bFee, err := sszBytesToUint256(payload.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return ExecutionPayload{}, err
|
||||
}
|
||||
txs := make([]hexutil.Bytes, len(payload.Transactions))
|
||||
for i := range payload.Transactions {
|
||||
txs[i] = payload.Transactions[i]
|
||||
}
|
||||
return ExecutionPayload{
|
||||
ParentHash: payload.ParentHash,
|
||||
FeeRecipient: payload.FeeRecipient,
|
||||
StateRoot: payload.StateRoot,
|
||||
ReceiptsRoot: payload.ReceiptsRoot,
|
||||
LogsBloom: payload.LogsBloom,
|
||||
PrevRandao: payload.PrevRandao,
|
||||
BlockNumber: Uint64String(payload.BlockNumber),
|
||||
GasLimit: Uint64String(payload.GasLimit),
|
||||
GasUsed: Uint64String(payload.GasUsed),
|
||||
Timestamp: Uint64String(payload.Timestamp),
|
||||
ExtraData: payload.ExtraData,
|
||||
BaseFeePerGas: bFee,
|
||||
BlockHash: payload.BlockHash,
|
||||
Transactions: txs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// FromProtoCapella converts a proto execution payload type for capella to our
|
||||
// builder compatible payload type.
|
||||
func FromProtoCapella(payload *v1.ExecutionPayloadCapella) (ExecutionPayloadCapella, error) {
|
||||
bFee, err := sszBytesToUint256(payload.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return ExecutionPayloadCapella{}, err
|
||||
}
|
||||
txs := make([]hexutil.Bytes, len(payload.Transactions))
|
||||
for i := range payload.Transactions {
|
||||
txs[i] = payload.Transactions[i]
|
||||
}
|
||||
withdrawals := make([]Withdrawal, len(payload.Withdrawals))
|
||||
for i, w := range payload.Withdrawals {
|
||||
withdrawals[i] = Withdrawal{
|
||||
Index: Uint256{Int: big.NewInt(0).SetUint64(w.Index)},
|
||||
ValidatorIndex: Uint256{Int: big.NewInt(0).SetUint64(uint64(w.ValidatorIndex))},
|
||||
Address: w.Address,
|
||||
Amount: Uint256{Int: big.NewInt(0).SetUint64(w.Amount)},
|
||||
}
|
||||
}
|
||||
return ExecutionPayloadCapella{
|
||||
ParentHash: payload.ParentHash,
|
||||
FeeRecipient: payload.FeeRecipient,
|
||||
StateRoot: payload.StateRoot,
|
||||
ReceiptsRoot: payload.ReceiptsRoot,
|
||||
LogsBloom: payload.LogsBloom,
|
||||
PrevRandao: payload.PrevRandao,
|
||||
BlockNumber: Uint64String(payload.BlockNumber),
|
||||
GasLimit: Uint64String(payload.GasLimit),
|
||||
GasUsed: Uint64String(payload.GasUsed),
|
||||
Timestamp: Uint64String(payload.Timestamp),
|
||||
ExtraData: payload.ExtraData,
|
||||
BaseFeePerGas: bFee,
|
||||
BlockHash: payload.BlockHash,
|
||||
Transactions: txs,
|
||||
Withdrawals: withdrawals,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type ExecHeaderResponseCapella struct {
|
||||
Data struct {
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
|
||||
@@ -70,16 +70,15 @@ type Gateway struct {
|
||||
func New(ctx context.Context, opts ...Option) (*Gateway, error) {
|
||||
g := &Gateway{
|
||||
ctx: ctx,
|
||||
cfg: &config{},
|
||||
cfg: &config{
|
||||
router: mux.NewRouter(),
|
||||
},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if err := opt(g); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if g.cfg.router == nil {
|
||||
g.cfg.router = mux.NewRouter()
|
||||
}
|
||||
return g, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,11 @@ import (
|
||||
|
||||
type Option func(g *Gateway) error
|
||||
|
||||
func (g *Gateway) SetRouter(r *mux.Router) *Gateway {
|
||||
g.cfg.router = r
|
||||
return g
|
||||
}
|
||||
|
||||
func WithPbHandlers(handlers []*PbMux) Option {
|
||||
return func(g *Gateway) error {
|
||||
g.cfg.pbHandlers = handlers
|
||||
|
||||
@@ -14,7 +14,7 @@ type WorkerResults struct {
|
||||
|
||||
// Scatter scatters a computation across multiple goroutines.
|
||||
// This breaks the task in to a number of chunks and executes those chunks in parallel with the function provided.
|
||||
// Results returned are collected and presented as a set of WorkerResults, which can be reassembled by the calling function.
|
||||
// Results returned are collected and presented a a set of WorkerResults, which can be reassembled by the calling function.
|
||||
// Any error that occurs in the workers will be passed back to the calling function.
|
||||
func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (interface{}, error)) ([]*WorkerResults, error) {
|
||||
if inputLen <= 0 {
|
||||
|
||||
@@ -4,7 +4,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"chain_info.go",
|
||||
"chain_info_forkchoice.go",
|
||||
"error.go",
|
||||
"execution_engine.go",
|
||||
"forkchoice_update_execution.go",
|
||||
@@ -58,13 +57,11 @@ go_library(
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/payload-attribute:go_default_library",
|
||||
@@ -120,7 +117,6 @@ go_test(
|
||||
"receive_attestation_test.go",
|
||||
"receive_block_test.go",
|
||||
"service_test.go",
|
||||
"setup_test.go",
|
||||
"weak_subjectivity_checks_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
@@ -170,7 +166,6 @@ go_test(
|
||||
"mock_test.go",
|
||||
"receive_block_test.go",
|
||||
"service_norace_test.go",
|
||||
"setup_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
gc_goopts = [
|
||||
|
||||
@@ -7,15 +7,14 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -38,16 +37,12 @@ type ChainInfoFetcher interface {
|
||||
// of locking forkchoice
|
||||
type ForkchoiceFetcher interface {
|
||||
Ancestor(context.Context, []byte, primitives.Slot) ([]byte, error)
|
||||
CachedHeadRoot() [32]byte
|
||||
GetProposerHead() [32]byte
|
||||
SetForkChoiceGenesisTime(uint64)
|
||||
}
|
||||
|
||||
// HeadUpdater defines a common interface for methods in blockchain service
|
||||
// which allow to update the head info
|
||||
type HeadUpdater interface {
|
||||
UpdateHead(context.Context, primitives.Slot)
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
InsertNode(context.Context, state.BeaconState, [32]byte) error
|
||||
ForkChoiceDump(context.Context) (*ethpbv1.ForkChoiceDump, error)
|
||||
NewSlot(context.Context, primitives.Slot) error
|
||||
ProposerBoost() [32]byte
|
||||
}
|
||||
|
||||
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
|
||||
@@ -81,17 +76,12 @@ type HeadFetcher interface {
|
||||
|
||||
// ForkFetcher retrieves the current fork information of the Ethereum beacon chain.
|
||||
type ForkFetcher interface {
|
||||
ForkChoicer() forkchoice.ForkChoicer
|
||||
CurrentFork() *ethpb.Fork
|
||||
GenesisFetcher
|
||||
TimeFetcher
|
||||
}
|
||||
|
||||
// TemporalOracle is like ForkFetcher minus CurrentFork()
|
||||
type TemporalOracle interface {
|
||||
GenesisFetcher
|
||||
TimeFetcher
|
||||
}
|
||||
|
||||
// CanonicalFetcher retrieves the current chain's canonical information.
|
||||
type CanonicalFetcher interface {
|
||||
IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error)
|
||||
@@ -103,8 +93,6 @@ type FinalizationFetcher interface {
|
||||
FinalizedCheckpt() *ethpb.Checkpoint
|
||||
CurrentJustifiedCheckpt() *ethpb.Checkpoint
|
||||
PreviousJustifiedCheckpt() *ethpb.Checkpoint
|
||||
UnrealizedJustifiedPayloadBlockHash() [32]byte
|
||||
FinalizedBlockHash() [32]byte
|
||||
InForkchoice([32]byte) bool
|
||||
IsFinalized(ctx context.Context, blockRoot [32]byte) bool
|
||||
}
|
||||
@@ -117,25 +105,25 @@ type OptimisticModeFetcher interface {
|
||||
|
||||
// FinalizedCheckpt returns the latest finalized checkpoint from chain store.
|
||||
func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
cp := s.ForkChoicer().FinalizedCheckpoint()
|
||||
return ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
}
|
||||
|
||||
// PreviousJustifiedCheckpt returns the current justified checkpoint from chain store.
|
||||
func (s *Service) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
cp := s.cfg.ForkChoiceStore.PreviousJustifiedCheckpoint()
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
cp := s.ForkChoicer().PreviousJustifiedCheckpoint()
|
||||
return ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
}
|
||||
|
||||
// CurrentJustifiedCheckpt returns the current justified checkpoint from chain store.
|
||||
func (s *Service) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
cp := s.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
cp := s.ForkChoicer().JustifiedCheckpoint()
|
||||
return ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
}
|
||||
|
||||
@@ -298,8 +286,8 @@ func (s *Service) CurrentFork() *ethpb.Fork {
|
||||
|
||||
// IsCanonical returns true if the input block root is part of the canonical chain.
|
||||
func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
// If the block has not been finalized, check fork choice store to see if the block is canonical
|
||||
if s.cfg.ForkChoiceStore.HasNode(blockRoot) {
|
||||
return s.cfg.ForkChoiceStore.IsCanonical(blockRoot), nil
|
||||
@@ -309,6 +297,14 @@ func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, er
|
||||
return s.cfg.BeaconDB.IsFinalizedBlock(ctx, blockRoot), nil
|
||||
}
|
||||
|
||||
// ChainHeads returns all possible chain heads (leaves of fork choice tree).
|
||||
// Heads roots and heads slots are returned.
|
||||
func (s *Service) ChainHeads() ([][32]byte, []primitives.Slot) {
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
return s.cfg.ForkChoiceStore.Tips()
|
||||
}
|
||||
|
||||
// HeadPublicKeyToValidatorIndex returns the validator index of the `pubkey` in current head state.
|
||||
func (s *Service) HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool) {
|
||||
s.headLock.RLock()
|
||||
@@ -333,8 +329,13 @@ func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index primiti
|
||||
return v.PublicKey(), nil
|
||||
}
|
||||
|
||||
// ForkChoicer returns the forkchoice interface.
|
||||
func (s *Service) ForkChoicer() forkchoice.ForkChoicer {
|
||||
return s.cfg.ForkChoiceStore
|
||||
}
|
||||
|
||||
// IsOptimistic returns true if the current head is optimistic.
|
||||
func (s *Service) IsOptimistic(_ context.Context) (bool, error) {
|
||||
func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
|
||||
if slots.ToEpoch(s.CurrentSlot()) < params.BeaconConfig().BellatrixForkEpoch {
|
||||
return false, nil
|
||||
}
|
||||
@@ -342,8 +343,8 @@ func (s *Service) IsOptimistic(_ context.Context) (bool, error) {
|
||||
headRoot := s.head.root
|
||||
s.headLock.RUnlock()
|
||||
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(headRoot)
|
||||
if err == nil {
|
||||
return optimistic, nil
|
||||
@@ -359,14 +360,14 @@ func (s *Service) IsOptimistic(_ context.Context) (bool, error) {
|
||||
// IsFinalized returns true if the input root is finalized.
|
||||
// It first checks latest finalized root then checks finalized root index in DB.
|
||||
func (s *Service) IsFinalized(ctx context.Context, root [32]byte) bool {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
if s.cfg.ForkChoiceStore.FinalizedCheckpoint().Root == root {
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
if s.ForkChoicer().FinalizedCheckpoint().Root == root {
|
||||
return true
|
||||
}
|
||||
// If node exists in our store, then it is not
|
||||
// finalized.
|
||||
if s.cfg.ForkChoiceStore.HasNode(root) {
|
||||
if s.ForkChoicer().HasNode(root) {
|
||||
return false
|
||||
}
|
||||
return s.cfg.BeaconDB.IsFinalizedBlock(ctx, root)
|
||||
@@ -376,25 +377,17 @@ func (s *Service) IsFinalized(ctx context.Context, root [32]byte) bool {
|
||||
// This in particular means that the blockroot is a descendant of the
|
||||
// finalized checkpoint
|
||||
func (s *Service) InForkchoice(root [32]byte) bool {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.HasNode(root)
|
||||
}
|
||||
|
||||
// IsViableForkCheckpoint returns whether the given checkpoint is a checkpoint in any
|
||||
// chain known to forkchoice
|
||||
func (s *Service) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.IsViableForCheckpoint(cp)
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
return s.ForkChoicer().HasNode(root)
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot takes the root as argument instead of the current head
|
||||
// and returns true if it is optimistic.
|
||||
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
s.ForkChoicer().RLock()
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(root)
|
||||
s.cfg.ForkChoiceStore.RUnlock()
|
||||
s.ForkChoicer().RUnlock()
|
||||
if err == nil {
|
||||
return optimistic, nil
|
||||
}
|
||||
@@ -417,10 +410,7 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
|
||||
}
|
||||
|
||||
if ss == nil {
|
||||
ss, err = s.recoverStateSummary(ctx, root)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
return true, errInvalidNilSummary
|
||||
}
|
||||
validatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
@@ -446,10 +436,7 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
|
||||
return false, err
|
||||
}
|
||||
if lastValidated == nil {
|
||||
lastValidated, err = s.recoverStateSummary(ctx, root)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return false, errInvalidNilSummary
|
||||
}
|
||||
|
||||
if ss.Slot > lastValidated.Slot {
|
||||
@@ -478,9 +465,9 @@ func (s *Service) Ancestor(ctx context.Context, root []byte, slot primitives.Slo
|
||||
r := bytesutil.ToBytes32(root)
|
||||
// Get ancestor root from fork choice store instead of recursively looking up blocks in DB.
|
||||
// This is most optimal outcome.
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
s.ForkChoicer().RLock()
|
||||
ar, err := s.cfg.ForkChoiceStore.AncestorRoot(ctx, r, slot)
|
||||
s.cfg.ForkChoiceStore.RUnlock()
|
||||
s.ForkChoicer().RUnlock()
|
||||
if err != nil {
|
||||
// Try getting ancestor root from DB when failed to retrieve from fork choice store.
|
||||
// This is the second line of defense for retrieving ancestor root.
|
||||
@@ -498,17 +485,7 @@ func (s *Service) SetGenesisTime(t time.Time) {
|
||||
s.genesisTime = t
|
||||
}
|
||||
|
||||
func (s *Service) recoverStateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error) {
|
||||
if s.cfg.BeaconDB.HasBlock(ctx, blockRoot) {
|
||||
b, err := s.cfg.BeaconDB.Block(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
summary := ðpb.StateSummary{Slot: b.Block().Slot(), Root: blockRoot[:]}
|
||||
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, summary); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return summary, nil
|
||||
}
|
||||
return nil, errBlockDoesNotExist
|
||||
// ForkChoiceStore returns the fork choice store in the service.
|
||||
func (s *Service) ForkChoiceStore() forkchoice.ForkChoicer {
|
||||
return s.cfg.ForkChoiceStore
|
||||
}
|
||||
|
||||
@@ -1,94 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
)
|
||||
|
||||
// CachedHeadRoot returns the corresponding value from Forkchoice
|
||||
func (s *Service) CachedHeadRoot() [32]byte {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.CachedHeadRoot()
|
||||
}
|
||||
|
||||
// GetProposerHead returns the corresponding value from forkchoice
|
||||
func (s *Service) GetProposerHead() [32]byte {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.GetProposerHead()
|
||||
}
|
||||
|
||||
// SetForkChoiceGenesisTime sets the genesis time in Forkchoice
|
||||
func (s *Service) SetForkChoiceGenesisTime(timestamp uint64) {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(timestamp)
|
||||
}
|
||||
|
||||
// HighestReceivedBlockSlot returns the corresponding value from forkchoice
|
||||
func (s *Service) HighestReceivedBlockSlot() primitives.Slot {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.HighestReceivedBlockSlot()
|
||||
}
|
||||
|
||||
// ReceivedBlocksLastEpoch returns the corresponding value from forkchoice
|
||||
func (s *Service) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.ReceivedBlocksLastEpoch()
|
||||
}
|
||||
|
||||
// InsertNode is a wrapper for node insertion which is self locked
|
||||
func (s *Service) InsertNode(ctx context.Context, st state.BeaconState, root [32]byte) error {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
}
|
||||
|
||||
// ForkChoiceDump returns the corresponding value from forkchoice
|
||||
func (s *Service) ForkChoiceDump(ctx context.Context) (*ethpbv1.ForkChoiceDump, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.ForkChoiceDump(ctx)
|
||||
}
|
||||
|
||||
// NewSlot returns the corresponding value from forkchoice
|
||||
func (s *Service) NewSlot(ctx context.Context, slot primitives.Slot) error {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.NewSlot(ctx, slot)
|
||||
}
|
||||
|
||||
// ProposerBoost wraps the corresponding method from forkchoice
|
||||
func (s *Service) ProposerBoost() [32]byte {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.ProposerBoost()
|
||||
}
|
||||
|
||||
// ChainHeads returns all possible chain heads (leaves of fork choice tree).
|
||||
// Heads roots and heads slots are returned.
|
||||
func (s *Service) ChainHeads() ([][32]byte, []primitives.Slot) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.Tips()
|
||||
}
|
||||
|
||||
// UnrealizedJustifiedPayloadBlockHash returns unrealized justified payload block hash from forkchoice.
|
||||
func (s *Service) UnrealizedJustifiedPayloadBlockHash() [32]byte {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
|
||||
}
|
||||
|
||||
// FinalizedBlockHash returns finalized payload block hash from forkchoice.
|
||||
func (s *Service) FinalizedBlockHash() [32]byte {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
|
||||
}
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
@@ -70,9 +71,23 @@ func TestHeadRoot_Nil(t *testing.T) {
|
||||
assert.DeepEqual(t, params.BeaconConfig().ZeroHash[:], headRoot, "Incorrect pre chain start value")
|
||||
}
|
||||
|
||||
func TestService_ForkChoiceStore(t *testing.T) {
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
p := c.ForkChoiceStore()
|
||||
require.Equal(t, primitives.Epoch(0), p.FinalizedCheckpoint().Epoch)
|
||||
}
|
||||
|
||||
func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, fcs := tr.ctx, tr.fcs
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
gs, _ := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
@@ -88,8 +103,16 @@ func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
jroot := [32]byte{'j'}
|
||||
cp := &forkchoicetypes.Checkpoint{Epoch: 6, Root: jroot}
|
||||
@@ -102,37 +125,6 @@ func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
|
||||
require.Equal(t, cp.Root, bytesutil.ToBytes32(jp.Root))
|
||||
}
|
||||
|
||||
func TestFinalizedBlockHash(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
|
||||
r := [32]byte{'f'}
|
||||
cp := &forkchoicetypes.Checkpoint{Epoch: 6, Root: r}
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, bState, r))
|
||||
|
||||
require.NoError(t, fcs.UpdateFinalizedCheckpoint(cp))
|
||||
h := service.FinalizedBlockHash()
|
||||
require.Equal(t, params.BeaconConfig().ZeroHash, h)
|
||||
require.Equal(t, r, fcs.FinalizedCheckpoint().Root)
|
||||
}
|
||||
|
||||
func TestUnrealizedJustifiedBlockHash(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
service := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
ojc := ðpb.Checkpoint{Root: []byte{'j'}}
|
||||
ofc := ðpb.Checkpoint{Root: []byte{'f'}}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
service.cfg.ForkChoiceStore.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) { return []uint64{}, nil })
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 6, Root: [32]byte{'j'}}))
|
||||
|
||||
h := service.UnrealizedJustifiedPayloadBlockHash()
|
||||
require.Equal(t, params.BeaconConfig().ZeroHash, h)
|
||||
require.Equal(t, [32]byte{'j'}, service.cfg.ForkChoiceStore.JustifiedCheckpoint().Root)
|
||||
}
|
||||
|
||||
func TestHeadSlot_CanRetrieve(t *testing.T) {
|
||||
c := &Service{}
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{})
|
||||
@@ -145,9 +137,16 @@ func TestHeadSlot_CanRetrieve(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHeadRoot_CanRetrieve(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
gs, _ := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
|
||||
@@ -157,8 +156,16 @@ func TestHeadRoot_CanRetrieve(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHeadRoot_UseDB(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB := tr.ctx, tr.db
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
@@ -477,10 +484,15 @@ func TestService_IsOptimisticForRoot_DB(t *testing.T) {
|
||||
validatedCheckpoint := ðpb.Checkpoint{Root: br[:]}
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
||||
|
||||
_, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
|
||||
require.ErrorContains(t, "nil summary returned from the DB", err)
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
|
||||
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
|
||||
cp := ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: validatedRoot[:],
|
||||
@@ -542,31 +554,12 @@ func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticForRoot_StateSummaryRecovered(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 10
|
||||
br, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, context.Background(), beaconDB, b)
|
||||
_, err = c.IsOptimisticForRoot(ctx, br)
|
||||
assert.NoError(t, err)
|
||||
summ, err := beaconDB.StateSummary(ctx, br)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, summ)
|
||||
assert.Equal(t, 10, int(summ.Slot))
|
||||
assert.DeepEqual(t, br[:], summ.Root)
|
||||
}
|
||||
|
||||
func TestService_IsFinalized(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}}
|
||||
r1 := [32]byte{'a'}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
require.NoError(t, c.ForkChoiceStore().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
Root: r1,
|
||||
}))
|
||||
b := util.NewBeaconBlock()
|
||||
|
||||
@@ -15,8 +15,8 @@ var (
|
||||
errNilFinalizedCheckpoint = errors.New("nil finalized checkpoint returned from state")
|
||||
// errNilJustifiedCheckpoint is returned when a nil justified checkpt is returned from a state.
|
||||
errNilJustifiedCheckpoint = errors.New("nil justified checkpoint returned from state")
|
||||
// errBlockDoesNotExist is returned when a block does not exist for a particular state summary.
|
||||
errBlockDoesNotExist = errors.New("could not find block in DB")
|
||||
// errInvalidNilSummary is returned when a nil summary is returned from the DB.
|
||||
errInvalidNilSummary = errors.New("nil summary returned from the DB")
|
||||
// errWrongBlockCount is returned when the wrong number of blocks or block roots is used
|
||||
errWrongBlockCount = errors.New("wrong number of blocks or block roots")
|
||||
// errBlockNotFoundInCacheOrDB is returned when a block is not found in the cache or DB.
|
||||
@@ -25,11 +25,8 @@ var (
|
||||
errWSBlockNotFound = errors.New("weak subjectivity root not found in db")
|
||||
// errWSBlockNotFoundInEpoch is returned when a block is not found in the WS cache or DB within epoch.
|
||||
errWSBlockNotFoundInEpoch = errors.New("weak subjectivity root not found in db within epoch")
|
||||
// ErrNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
|
||||
// errNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
|
||||
ErrNotDescendantOfFinalized = invalidBlock{error: errors.New("not descendant of finalized checkpoint")}
|
||||
// ErrNotCheckpoint is returned when a given checkpoint is not a
|
||||
// checkpoint in any chain known to forkchoice
|
||||
ErrNotCheckpoint = errors.New("not a checkpoint in forkchoice")
|
||||
)
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
@@ -61,8 +60,8 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
log.WithError(err).Error("Could not get execution payload for head block")
|
||||
return nil, nil
|
||||
}
|
||||
finalizedHash := s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
|
||||
justifiedHash := s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
|
||||
finalizedHash := s.ForkChoicer().FinalizedPayloadBlockHash()
|
||||
justifiedHash := s.ForkChoicer().JustifiedPayloadBlockHash()
|
||||
fcs := &enginev1.ForkchoiceState{
|
||||
HeadBlockHash: headPayload.BlockHash(),
|
||||
SafeBlockHash: justifiedHash[:],
|
||||
@@ -70,7 +69,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
}
|
||||
|
||||
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
|
||||
hasAttr, attr, proposerId := s.getPayloadAttribute(ctx, arg.headState, nextSlot, arg.headRoot[:])
|
||||
hasAttr, attr, proposerId := s.getPayloadAttribute(ctx, arg.headState, nextSlot)
|
||||
|
||||
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, attr)
|
||||
if err != nil {
|
||||
@@ -89,7 +88,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
if len(lastValidHash) == 0 {
|
||||
lastValidHash = defaultLatestValidHash
|
||||
}
|
||||
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, headRoot, headBlk.ParentRoot(), bytesutil.ToBytes32(lastValidHash))
|
||||
invalidRoots, err := s.ForkChoicer().SetOptimisticToInvalid(ctx, headRoot, headBlk.ParentRoot(), bytesutil.ToBytes32(lastValidHash))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not set head root to invalid")
|
||||
return nil, nil
|
||||
@@ -225,7 +224,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, blk.Block().ParentRoot(), bytesutil.ToBytes32(lastValidHash))
|
||||
invalidRoots, err := s.ForkChoicer().SetOptimisticToInvalid(ctx, root, blk.Block().ParentRoot(), bytesutil.ToBytes32(lastValidHash))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -251,23 +250,20 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
||||
|
||||
// getPayloadAttributes returns the payload attributes for the given state and slot.
|
||||
// The attribute is required to initiate a payload build process in the context of an `engine_forkchoiceUpdated` call.
|
||||
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot primitives.Slot, headRoot []byte) (bool, payloadattribute.Attributer, primitives.ValidatorIndex) {
|
||||
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot primitives.Slot) (bool, payloadattribute.Attributer, primitives.ValidatorIndex) {
|
||||
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
|
||||
// Root is `[32]byte{}` since we are retrieving proposer ID of a given slot. During insertion at assignment the root was not known.
|
||||
proposerID, _, ok := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(slot, [32]byte{} /* root */)
|
||||
if !ok && !features.Get().PrepareAllPayloads { // There's no need to build attribute if there is no proposer for slot.
|
||||
if !ok { // There's no need to build attribute if there is no proposer for slot.
|
||||
return false, emptyAttri, 0
|
||||
}
|
||||
|
||||
// Get previous randao.
|
||||
st = st.Copy()
|
||||
if slot > st.Slot() {
|
||||
var err error
|
||||
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not process slots to get payload attribute")
|
||||
return false, emptyAttri, 0
|
||||
}
|
||||
st, err := transition.ProcessSlotsIfPossible(ctx, st, slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not process slots to get payload attribute")
|
||||
return false, emptyAttri, 0
|
||||
}
|
||||
prevRando, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
if err != nil {
|
||||
@@ -305,7 +301,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
|
||||
var attr payloadattribute.Attributer
|
||||
switch st.Version() {
|
||||
case version.Capella:
|
||||
case version.Capella, version.Deneb:
|
||||
withdrawals, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||
|
||||
@@ -9,12 +9,14 @@ import (
|
||||
gethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
bstate "github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
@@ -30,16 +32,23 @@ import (
|
||||
)
|
||||
|
||||
func Test_NotifyForkchoiceUpdate_GetPayloadAttrErrorCanContinue(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
altairBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockAltair())
|
||||
altairBlkRoot, err := altairBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
bellatrixBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockBellatrix())
|
||||
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisState(t, 10)
|
||||
service.head = &head{
|
||||
state: st,
|
||||
@@ -86,15 +95,23 @@ func Test_NotifyForkchoiceUpdate_GetPayloadAttrErrorCanContinue(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
altairBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockAltair())
|
||||
altairBlkRoot, err := altairBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
bellatrixBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockBellatrix())
|
||||
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisState(t, 10)
|
||||
service.head = &head{
|
||||
state: st,
|
||||
@@ -246,8 +263,8 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
// Prepare blocks
|
||||
ba := util.NewBeaconBlockBellatrix()
|
||||
@@ -279,6 +296,12 @@ func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
|
||||
brd, err := wbd.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Insert blocks into forkchoice
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
fcs := doublylinkedtree.New()
|
||||
service.cfg.ForkChoiceStore = fcs
|
||||
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
|
||||
|
||||
fcs.SetBalancesByRooter(func(context.Context, [32]byte) ([]uint64, error) { return []uint64{50, 100, 200}, nil })
|
||||
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{}))
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
@@ -334,8 +357,8 @@ func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
|
||||
// 3. the blockchain package calls fcu to obtain heads G -> F -> D.
|
||||
|
||||
func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
// Prepare blocks
|
||||
ba := util.NewBeaconBlockBellatrix()
|
||||
@@ -390,6 +413,12 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
|
||||
brg, err := wbg.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Insert blocks into forkchoice
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
fcs := doublylinkedtree.New()
|
||||
service.cfg.ForkChoiceStore = fcs
|
||||
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
|
||||
|
||||
fcs.SetBalancesByRooter(func(context.Context, [32]byte) ([]uint64, error) { return []uint64{50, 100, 200}, nil })
|
||||
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{}))
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
@@ -467,9 +496,15 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.TerminalTotalDifficulty = "2"
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
ctx, fcs := tr.ctx, tr.fcs
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
phase0State, _ := util.DeterministicGenesisState(t, 1)
|
||||
altairState, _ := util.DeterministicGenesisStateAltair(t, 1)
|
||||
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
||||
@@ -500,6 +535,8 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
}
|
||||
bellatrixBlk, err := consensusblocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(blk))
|
||||
require.NoError(t, err)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
service.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
r, err := bellatrixBlk.Block().HashTreeRoot()
|
||||
@@ -706,10 +743,14 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.TerminalTotalDifficulty = "2"
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
@@ -722,6 +763,8 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
}
|
||||
bellatrixBlk, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
e := &mockExecution.EngineClient{BlockByHashMap: map[[32]byte]*v1.ExecutionBlock{}}
|
||||
e.BlockByHashMap[[32]byte{'a'}] = &v1.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
@@ -744,11 +787,19 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_GetPayloadAttribute(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
ctx := tr.ctx
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
|
||||
// Cache miss
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0, []byte{})
|
||||
hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0)
|
||||
require.Equal(t, false, hasPayload)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), vId)
|
||||
|
||||
@@ -757,7 +808,7 @@ func Test_GetPayloadAttribute(t *testing.T) {
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
|
||||
hook := logTest.NewGlobal()
|
||||
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, slot)
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
|
||||
@@ -767,36 +818,26 @@ func Test_GetPayloadAttribute(t *testing.T) {
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveFeeRecipientsByValidatorIDs(ctx, []primitives.ValidatorIndex{suggestedVid}, []common.Address{suggestedAddr}))
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
|
||||
hasPayload, attr, vId = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
hasPayload, attr, vId = service.getPayloadAttribute(ctx, st, slot)
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
|
||||
}
|
||||
|
||||
func Test_GetPayloadAttribute_PrepareAllPayloads(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
PrepareAllPayloads: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, 0, []byte{})
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), vId)
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
|
||||
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
|
||||
}
|
||||
|
||||
func Test_GetPayloadAttributeV2(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
ctx := tr.ctx
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
|
||||
// Cache miss
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisStateCapella(t, 1)
|
||||
hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0, []byte{})
|
||||
hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0)
|
||||
require.Equal(t, false, hasPayload)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), vId)
|
||||
|
||||
@@ -805,7 +846,7 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
|
||||
hook := logTest.NewGlobal()
|
||||
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, slot)
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
|
||||
@@ -818,7 +859,7 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveFeeRecipientsByValidatorIDs(ctx, []primitives.ValidatorIndex{suggestedVid}, []common.Address{suggestedAddr}))
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
|
||||
hasPayload, attr, vId = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
hasPayload, attr, vId = service.getPayloadAttribute(ctx, st, slot)
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
|
||||
@@ -830,9 +871,18 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
|
||||
func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
stateGen := stategen.New(beaconDB, fcs)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stateGen),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
var genesisStateRoot [32]byte
|
||||
genesisBlk := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
|
||||
@@ -937,8 +987,16 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_removeInvalidBlockAndState(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fc := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
WithForkChoiceStore(fc),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Deleting unknown block should not error.
|
||||
require.NoError(t, service.removeInvalidBlockAndState(ctx, [][32]byte{{'a'}, {'b'}, {'c'}}))
|
||||
@@ -982,10 +1040,18 @@ func TestService_removeInvalidBlockAndState(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_getPayloadHash(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fc := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
WithForkChoiceStore(fc),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err := service.getPayloadHash(ctx, []byte{})
|
||||
_, err = service.getPayloadHash(ctx, []byte{})
|
||||
require.ErrorIs(t, errBlockNotFoundInCacheOrDB, err)
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
|
||||
@@ -14,12 +14,11 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
func (s *Service) isNewProposer(slot primitives.Slot) bool {
|
||||
_, _, ok := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(slot, [32]byte{} /* root */)
|
||||
return ok || features.Get().PrepareAllPayloads
|
||||
return ok
|
||||
}
|
||||
|
||||
func (s *Service) isNewHead(r [32]byte) bool {
|
||||
@@ -50,27 +49,21 @@ func (s *Service) getStateAndBlock(ctx context.Context, r [32]byte) (state.Beaco
|
||||
}
|
||||
|
||||
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It decides whether a new call to FCU should be made.
|
||||
// it returns true if the new head is updated
|
||||
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, newHeadRoot [32]byte, proposingSlot primitives.Slot) (bool, error) {
|
||||
_, span := trace.StartSpan(ctx, "beacon-chain.blockchain.forkchoiceUpdateWithExecution")
|
||||
defer span.End()
|
||||
// Note: Use the service context here to avoid the parent context being ended during a forkchoice update.
|
||||
ctx = trace.NewContext(s.ctx, span)
|
||||
|
||||
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, newHeadRoot [32]byte, proposingSlot primitives.Slot) error {
|
||||
isNewHead := s.isNewHead(newHeadRoot)
|
||||
if !isNewHead {
|
||||
return false, nil
|
||||
return nil
|
||||
}
|
||||
isNewProposer := s.isNewProposer(proposingSlot)
|
||||
if isNewProposer && !features.Get().DisableReorgLateBlocks {
|
||||
if s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
return false, nil
|
||||
return nil
|
||||
}
|
||||
}
|
||||
headState, headBlock, err := s.getStateAndBlock(ctx, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get forkchoice update argument")
|
||||
return false, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, ¬ifyForkchoiceUpdateArg{
|
||||
@@ -79,7 +72,7 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, newHeadRoot
|
||||
headBlock: headBlock.Block(),
|
||||
})
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not notify forkchoice update")
|
||||
return errors.Wrap(err, "could not notify forkchoice update")
|
||||
}
|
||||
|
||||
if err := s.saveHead(ctx, newHeadRoot, headBlock, headState); err != nil {
|
||||
@@ -90,19 +83,19 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, newHeadRoot
|
||||
if err := s.pruneAttsFromPool(headBlock); err != nil {
|
||||
log.WithError(err).Error("could not prune attestations from pool")
|
||||
}
|
||||
return true, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// shouldOverrideFCU checks whether the incoming block is still subject to being
|
||||
// reorged or not by the next proposer.
|
||||
func (s *Service) shouldOverrideFCU(newHeadRoot [32]byte, proposingSlot primitives.Slot) bool {
|
||||
headWeight, err := s.cfg.ForkChoiceStore.Weight(newHeadRoot)
|
||||
headWeight, err := s.ForkChoicer().Weight(newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
|
||||
}
|
||||
currentSlot := s.CurrentSlot()
|
||||
if proposingSlot == currentSlot {
|
||||
proposerHead := s.cfg.ForkChoiceStore.GetProposerHead()
|
||||
proposerHead := s.ForkChoicer().GetProposerHead()
|
||||
if proposerHead != newHeadRoot {
|
||||
return true
|
||||
}
|
||||
@@ -113,7 +106,7 @@ func (s *Service) shouldOverrideFCU(newHeadRoot [32]byte, proposingSlot primitiv
|
||||
params.BeaconConfig().SecondsPerSlot)
|
||||
lateBlockFailedAttemptSecondThreshold.Inc()
|
||||
} else {
|
||||
if s.cfg.ForkChoiceStore.ShouldOverrideFCU() {
|
||||
if s.ForkChoicer().ShouldOverrideFCU() {
|
||||
return true
|
||||
}
|
||||
secs, err := slots.SecondsSinceSlotStart(currentSlot,
|
||||
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -74,8 +76,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
|
||||
_, err = service.forkchoiceUpdateWithExecution(ctx, service.headRoot(), service.CurrentSlot()+1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, service.headRoot(), service.CurrentSlot()+1))
|
||||
hookErr := "could not notify forkchoice update"
|
||||
invalidStateErr := "could not get state summary: could not find block in DB"
|
||||
require.LogsDoNotContain(t, hook, invalidStateErr)
|
||||
@@ -83,8 +84,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
|
||||
gb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.saveInitSyncBlock(ctx, [32]byte{'a'}, gb))
|
||||
_, err = service.forkchoiceUpdateWithExecution(ctx, [32]byte{'a'}, service.CurrentSlot()+1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, [32]byte{'a'}, service.CurrentSlot()+1))
|
||||
require.LogsContain(t, hook, invalidStateErr)
|
||||
|
||||
hook.Reset()
|
||||
@@ -108,8 +108,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
|
||||
state: st,
|
||||
}
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1}, [32]byte{2})
|
||||
_, err = service.forkchoiceUpdateWithExecution(ctx, r1, service.CurrentSlot())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, r1, service.CurrentSlot()))
|
||||
require.LogsDoNotContain(t, hook, invalidStateErr)
|
||||
require.LogsDoNotContain(t, hook, hookErr)
|
||||
|
||||
@@ -126,8 +125,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
|
||||
state: st,
|
||||
}
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1}, [32]byte{2})
|
||||
_, err = service.forkchoiceUpdateWithExecution(ctx, r1, service.CurrentSlot()+1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, r1, service.CurrentSlot()+1))
|
||||
require.LogsDoNotContain(t, hook, invalidStateErr)
|
||||
require.LogsDoNotContain(t, hook, hookErr)
|
||||
vId, payloadID, has := service.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(2, [32]byte{2})
|
||||
@@ -137,21 +135,28 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
|
||||
|
||||
// Test zero headRoot returns immediately.
|
||||
headRoot := service.headRoot()
|
||||
_, err = service.forkchoiceUpdateWithExecution(ctx, [32]byte{}, service.CurrentSlot()+1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, [32]byte{}, service.CurrentSlot()+1))
|
||||
require.Equal(t, service.headRoot(), headRoot)
|
||||
}
|
||||
|
||||
func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
altairBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockAltair())
|
||||
altairBlkRoot, err := altairBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
bellatrixBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockBellatrix())
|
||||
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisState(t, 10)
|
||||
service.head = &head{
|
||||
state: st,
|
||||
@@ -183,17 +188,24 @@ func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testin
|
||||
service.head.block = sb
|
||||
service.head.state = st
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(service.CurrentSlot()+1, 0, [8]byte{}, [32]byte{} /* root */)
|
||||
_, err = service.forkchoiceUpdateWithExecution(ctx, r, service.CurrentSlot()+1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, r, service.CurrentSlot()+1))
|
||||
|
||||
}
|
||||
|
||||
func TestShouldOverrideFCU(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, fcs := tr.ctx, tr.fcs
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
service.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot) * time.Second))
|
||||
require.NoError(t, err)
|
||||
headRoot := [32]byte{'b'}
|
||||
parentRoot := [32]byte{'a'}
|
||||
ojc := ðpb.Checkpoint{}
|
||||
|
||||
@@ -28,8 +28,6 @@ import (
|
||||
// UpdateAndSaveHeadWithBalances updates the beacon state head after getting justified balanced from cache.
|
||||
// This function is only used in spec-tests, it does save the head after updating it.
|
||||
func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not update head")
|
||||
@@ -54,7 +52,7 @@ type head struct {
|
||||
|
||||
// This saves head info to the local service cache, it also saves the
|
||||
// new head root to the DB.
|
||||
// Caller of the method MUST acquire a lock on forkchoice.
|
||||
// Caller of the method MUST aqcuire a lock on forkchoice.
|
||||
func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock interfaces.ReadOnlySignedBeaconBlock, headState state.BeaconState) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.saveHead")
|
||||
defer span.End()
|
||||
@@ -89,25 +87,25 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
newHeadSlot := headBlock.Block().Slot()
|
||||
newStateRoot := headBlock.Block().StateRoot()
|
||||
|
||||
// A chain re-org occurred, so we fire an event notifying the rest of the services.
|
||||
r, err := s.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get old head root")
|
||||
}
|
||||
oldHeadRoot := bytesutil.ToBytes32(r)
|
||||
if headBlock.Block().ParentRoot() != oldHeadRoot {
|
||||
// A chain re-org occurred, so we fire an event notifying the rest of the services.
|
||||
commonRoot, forkSlot, err := s.cfg.ForkChoiceStore.CommonAncestor(ctx, oldHeadRoot, newHeadRoot)
|
||||
commonRoot, forkSlot, err := s.ForkChoicer().CommonAncestor(ctx, oldHeadRoot, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not find common ancestor root")
|
||||
commonRoot = params.BeaconConfig().ZeroHash
|
||||
}
|
||||
dis := headSlot + newHeadSlot - 2*forkSlot
|
||||
dep := math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot))
|
||||
oldWeight, err := s.cfg.ForkChoiceStore.Weight(oldHeadRoot)
|
||||
oldWeight, err := s.ForkChoicer().Weight(oldHeadRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", oldHeadRoot)).Warn("could not determine node weight")
|
||||
}
|
||||
newWeight, err := s.cfg.ForkChoiceStore.Weight(newHeadRoot)
|
||||
newWeight, err := s.ForkChoicer().Weight(newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
|
||||
}
|
||||
@@ -125,7 +123,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
reorgDistance.Observe(float64(dis))
|
||||
reorgDepth.Observe(float64(dep))
|
||||
|
||||
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(newHeadRoot)
|
||||
isOptimistic, err := s.ForkChoicer().IsOptimistic(newHeadRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if node is optimistically synced")
|
||||
}
|
||||
@@ -365,7 +363,7 @@ func (s *Service) notifyNewHeadEvent(
|
||||
// This saves the Attestations and BLSToExecChanges between `orphanedRoot` and the common ancestor root that is derived using `newHeadRoot`.
|
||||
// It also filters out the attestations that is one epoch older as a defense so invalid attestations don't flow into the attestation pool.
|
||||
func (s *Service) saveOrphanedOperations(ctx context.Context, orphanedRoot [32]byte, newHeadRoot [32]byte) error {
|
||||
commonAncestorRoot, _, err := s.cfg.ForkChoiceStore.CommonAncestor(ctx, newHeadRoot, orphanedRoot)
|
||||
commonAncestorRoot, _, err := s.ForkChoicer().CommonAncestor(ctx, newHeadRoot, orphanedRoot)
|
||||
switch {
|
||||
// Exit early if there's no common ancestor and root doesn't exist, there would be nothing to save.
|
||||
case errors.Is(err, forkchoice.ErrUnknownCommonAncestor):
|
||||
@@ -403,19 +401,6 @@ func (s *Service) saveOrphanedOperations(ctx context.Context, orphanedRoot [32]b
|
||||
}
|
||||
saveOrphanedAttCount.Inc()
|
||||
}
|
||||
for _, as := range orphanedBlk.Block().Body().AttesterSlashings() {
|
||||
if err := s.cfg.SlashingPool.InsertAttesterSlashing(ctx, s.headStateReadOnly(ctx), as); err != nil {
|
||||
log.WithError(err).Error("Could not insert reorg attester slashing")
|
||||
}
|
||||
}
|
||||
for _, vs := range orphanedBlk.Block().Body().ProposerSlashings() {
|
||||
if err := s.cfg.SlashingPool.InsertProposerSlashing(ctx, s.headStateReadOnly(ctx), vs); err != nil {
|
||||
log.WithError(err).Error("Could not insert reorg proposer slashing")
|
||||
}
|
||||
}
|
||||
for _, v := range orphanedBlk.Block().Body().VoluntaryExits() {
|
||||
s.cfg.ExitPool.InsertVoluntaryExit(v)
|
||||
}
|
||||
if orphanedBlk.Version() >= version.Capella {
|
||||
changes, err := orphanedBlk.Block().Body().BLSToExecutionChanges()
|
||||
if err != nil {
|
||||
|
||||
@@ -53,7 +53,7 @@ func (s *Service) HeadSyncContributionProofDomain(ctx context.Context, slot prim
|
||||
|
||||
// HeadSyncCommitteeIndices returns the sync committee index position using the head state. Input `slot` is taken in consideration
|
||||
// where validator's duty for `slot - 1` is used for block inclusion in `slot`. That means when a validator is at epoch boundary
|
||||
// across EPOCHS_PER_SYNC_COMMITTEE_PERIOD then the validator will be considered using next period sync committee.
|
||||
// across EPOCHS_PER_SYNC_COMMITTEE_PERIOD then the valiator will be considered using next period sync committee.
|
||||
//
|
||||
// Spec definition:
|
||||
// Being assigned to a sync committee for a given slot means that the validator produces and broadcasts signatures for slot - 1 for inclusion in slot.
|
||||
|
||||
@@ -9,8 +9,10 @@ import (
|
||||
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -306,7 +308,7 @@ func TestSaveOrphanedAtts(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
}
|
||||
|
||||
@@ -324,88 +326,6 @@ func TestSaveOrphanedAtts(t *testing.T) {
|
||||
require.DeepEqual(t, wantAtts, atts)
|
||||
}
|
||||
|
||||
func TestSaveOrphanedOps(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.ShardCommitteePeriod = 0
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
// Chain setup
|
||||
// 0 -- 1 -- 2 -- 3
|
||||
// \-4
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
service.head = &head{state: st}
|
||||
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
|
||||
rG, err := blkG.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
blk1.Block.ParentRoot = rG[:]
|
||||
r1, err := blk1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
assert.NoError(t, err)
|
||||
blk2.Block.ParentRoot = r1[:]
|
||||
r2, err := blk2.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blkConfig := util.DefaultBlockGenConfig()
|
||||
blkConfig.NumBLSChanges = 5
|
||||
blkConfig.NumProposerSlashings = 1
|
||||
blkConfig.NumAttesterSlashings = 1
|
||||
blkConfig.NumVoluntaryExits = 1
|
||||
blk3, err := util.GenerateFullBlock(st, keys, blkConfig, 3)
|
||||
assert.NoError(t, err)
|
||||
blk3.Block.ParentRoot = r2[:]
|
||||
r3, err := blk3.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk4 := util.NewBeaconBlock()
|
||||
blk4.Block.Slot = 4
|
||||
blk4.Block.ParentRoot = rG[:]
|
||||
r4, err := blk4.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
}
|
||||
|
||||
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
|
||||
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
wantAtts := []*ethpb.Attestation{
|
||||
blk3.Block.Body.Attestations[0],
|
||||
blk2.Block.Body.Attestations[0],
|
||||
blk1.Block.Body.Attestations[0],
|
||||
}
|
||||
atts := service.cfg.AttPool.AggregatedAttestations()
|
||||
sort.Slice(atts, func(i, j int) bool {
|
||||
return atts[i].Data.Slot > atts[j].Data.Slot
|
||||
})
|
||||
require.DeepEqual(t, wantAtts, atts)
|
||||
require.Equal(t, 1, len(service.cfg.SlashingPool.PendingProposerSlashings(ctx, st, false)))
|
||||
require.Equal(t, 1, len(service.cfg.SlashingPool.PendingAttesterSlashings(ctx, st, false)))
|
||||
exits, err := service.cfg.ExitPool.PendingExits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(exits))
|
||||
}
|
||||
|
||||
func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
@@ -453,7 +373,7 @@ func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
}
|
||||
|
||||
@@ -511,7 +431,7 @@ func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
}
|
||||
|
||||
@@ -570,7 +490,7 @@ func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
}
|
||||
|
||||
@@ -579,9 +499,18 @@ func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdateHead_noSavedChanges(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
ctx := context.Background()
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
ojp := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, ojp, ojp)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
|
||||
consensusBlocks "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
@@ -53,7 +53,7 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
|
||||
txs, err := p.Transactions()
|
||||
switch {
|
||||
case errors.Is(err, consensus_types.ErrUnsupportedGetter):
|
||||
case errors.Is(err, consensusBlocks.ErrUnsupportedGetter):
|
||||
case err != nil:
|
||||
return err
|
||||
default:
|
||||
@@ -61,6 +61,13 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
txsPerSlotCount.Set(float64(len(txs)))
|
||||
}
|
||||
}
|
||||
if b.Version() >= version.Deneb {
|
||||
k, err := b.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log = log.WithField("blobCount", len(k))
|
||||
}
|
||||
log.Info("Finished applying state transition")
|
||||
return nil
|
||||
}
|
||||
@@ -95,6 +102,7 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"version": version.String(block.Version()),
|
||||
}).Info("Synced new block")
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -5,19 +5,16 @@ import (
|
||||
|
||||
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
)
|
||||
|
||||
func testServiceOptsWithDB(t *testing.T) []Option {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
cs := startup.NewClockSynchronizer()
|
||||
return []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithClockSynchronizer(cs),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,6 +22,5 @@ func testServiceOptsWithDB(t *testing.T) []Option {
|
||||
// in your code path. this is a lightweight way to satisfy the stategen/beacondb
|
||||
// initialization requirements w/o the overhead of db init.
|
||||
func testServiceOptsNoDB() []Option {
|
||||
cs := startup.NewClockSynchronizer()
|
||||
return []Option{WithClockSynchronizer(cs)}
|
||||
return []Option{}
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
@@ -164,11 +163,3 @@ func WithFinalizedStateAtStartUp(st state.BeaconState) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithClockSynchronizer(gs *startup.ClockSynchronizer) Option {
|
||||
return func(s *Service) error {
|
||||
s.clockSetter = gs
|
||||
s.clockWaiter = gs
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,17 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
gethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/holiman/uint256"
|
||||
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
mocks "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
@@ -104,8 +108,16 @@ func Test_validateMergeBlock(t *testing.T) {
|
||||
cfg.TerminalTotalDifficulty = "2"
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
|
||||
service.cfg.ExecutionEngineCaller = engine
|
||||
@@ -146,8 +158,16 @@ func Test_validateMergeBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_getBlkParentHashAndTD(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
|
||||
service.cfg.ExecutionEngineCaller = engine
|
||||
@@ -219,9 +239,14 @@ func Test_validateTerminalBlockHash(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(ðpb.SignedBeaconBlockBellatrix{}))
|
||||
require.NoError(t, err)
|
||||
blk.SetSlot(1)
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/async"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
@@ -19,17 +18,7 @@ import (
|
||||
)
|
||||
|
||||
// getAttPreState retrieves the att pre state by either from the cache or the DB.
|
||||
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
|
||||
// If the attestation is recent and canonical we can use the head state to compute the shuffling.
|
||||
headEpoch := slots.ToEpoch(s.HeadSlot())
|
||||
if c.Epoch == headEpoch {
|
||||
targetSlot, err := s.cfg.ForkChoiceStore.Slot([32]byte(c.Root))
|
||||
if err == nil && slots.ToEpoch(targetSlot)+1 >= headEpoch {
|
||||
if s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
|
||||
return s.HeadStateReadOnly(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (state.BeaconState, error) {
|
||||
// Use a multilock to allow scoped holding of a mutex by a checkpoint root + epoch
|
||||
// allowing us to behave smarter in terms of how this function is used concurrently.
|
||||
epochKey := strconv.FormatUint(uint64(c.Epoch), 10 /* base 10 */)
|
||||
@@ -43,36 +32,7 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (stat
|
||||
if cachedState != nil && !cachedState.IsNil() {
|
||||
return cachedState, nil
|
||||
}
|
||||
// Try the next slot cache for the early epoch calls, this should mostly have been covered already
|
||||
// but is cheap
|
||||
slot, err := slots.EpochStart(c.Epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute epoch start")
|
||||
}
|
||||
cachedState = transition.NextSlotState(c.Root, slot)
|
||||
if cachedState != nil && !cachedState.IsNil() {
|
||||
if cachedState.Slot() != slot {
|
||||
cachedState, err = transition.ProcessSlots(ctx, cachedState, slot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process slots")
|
||||
}
|
||||
}
|
||||
if err := s.checkpointStateCache.AddCheckpointState(c, cachedState); err != nil {
|
||||
return nil, errors.Wrap(err, "could not save checkpoint state to cache")
|
||||
}
|
||||
return cachedState, nil
|
||||
}
|
||||
|
||||
// Do not process attestations for old non viable checkpoints otherwise
|
||||
ok, err := s.cfg.ForkChoiceStore.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: [32]byte(c.Root), Epoch: c.Epoch})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not check checkpoint condition in forkchoice")
|
||||
}
|
||||
if !ok {
|
||||
return nil, errors.Wrap(ErrNotCheckpoint, fmt.Sprintf("epoch %d root %#x", c.Epoch, c.Root))
|
||||
}
|
||||
|
||||
// Fallback to state regeneration.
|
||||
baseState, err := s.cfg.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get pre state for epoch %d", c.Epoch)
|
||||
@@ -95,6 +55,7 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (stat
|
||||
return nil, errors.Wrap(err, "could not save checkpoint state to cache")
|
||||
}
|
||||
return baseState, nil
|
||||
|
||||
}
|
||||
|
||||
// verifyAttTargetEpoch validates attestation is from the current or previous epoch.
|
||||
|
||||
@@ -6,6 +6,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -18,29 +21,29 @@ import (
|
||||
)
|
||||
|
||||
func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB := tr.ctx, tr.db
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
_, err := blockTree1(t, beaconDB, []byte{'g'})
|
||||
fc := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(fc),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = blockTree1(t, beaconDB, []byte{'g'})
|
||||
require.NoError(t, err)
|
||||
|
||||
blkWithoutState := util.NewBeaconBlock()
|
||||
blkWithoutState.Block.Slot = 0
|
||||
util.SaveBlock(t, ctx, beaconDB, blkWithoutState)
|
||||
|
||||
cp := ðpb.Checkpoint{}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
|
||||
BlkWithOutStateRoot, err := blkWithoutState.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
blkWithStateBadAtt := util.NewBeaconBlock()
|
||||
blkWithStateBadAtt.Block.Slot = 1
|
||||
r, err := blkWithStateBadAtt.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp = ðpb.Checkpoint{Root: r[:]}
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, blkWithStateBadAtt.Block.Slot, r, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, blkWithStateBadAtt)
|
||||
BlkWithStateBadAttRoot, err := blkWithStateBadAtt.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -51,7 +54,7 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot))
|
||||
|
||||
blkWithValidState := util.NewBeaconBlock()
|
||||
blkWithValidState.Block.Slot = 32
|
||||
blkWithValidState.Block.Slot = 2
|
||||
util.SaveBlock(t, ctx, beaconDB, blkWithValidState)
|
||||
|
||||
blkWithValidStateRoot, err := blkWithValidState.Block.HashTreeRoot()
|
||||
@@ -66,10 +69,6 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, blkWithValidStateRoot))
|
||||
|
||||
service.head = &head{
|
||||
state: st,
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
a *ethpb.Attestation
|
||||
@@ -80,6 +79,11 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Root: make([]byte, 32)}}}),
|
||||
wantedErr: "slot 32 does not match target epoch 0",
|
||||
},
|
||||
{
|
||||
name: "no pre state for attestations's target block",
|
||||
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}}),
|
||||
wantedErr: "could not get pre state for epoch 0",
|
||||
},
|
||||
{
|
||||
name: "process attestation doesn't match current epoch",
|
||||
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Epoch: 100,
|
||||
@@ -124,9 +128,17 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
@@ -146,8 +158,15 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
@@ -168,9 +187,6 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)}))
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte(cp1.Root), [32]byte{}, [32]byte{'R'}, cp1, cp1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
s1, err := service.getAttPreState(ctx, cp1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot(), "Unexpected state slot")
|
||||
@@ -178,17 +194,8 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
cp2 := ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength)}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'B'})))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength)}))
|
||||
|
||||
s2, err := service.getAttPreState(ctx, cp2)
|
||||
require.ErrorContains(t, "epoch 2 root 0x4200000000000000000000000000000000000000000000000000000000000000: not a checkpoint in forkchoice", err)
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 33, [32]byte(cp2.Root), [32]byte(cp1.Root), [32]byte{'R'}, cp2, cp2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
|
||||
s2, err = service.getAttPreState(ctx, cp2)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
|
||||
|
||||
s1, err = service.getAttPreState(ctx, cp1)
|
||||
@@ -207,30 +214,26 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
cp3 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}))
|
||||
st, root, err = prepareForkchoiceState(ctx, 31, [32]byte(cp3.Root), [32]byte(cp2.Root), [32]byte{'P'}, cp2, cp2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
|
||||
s3, err := service.getAttPreState(ctx, cp3)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, s.Slot(), s3.Slot(), "Unexpected state slot")
|
||||
}
|
||||
|
||||
func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
baseState, _ := util.DeterministicGenesisState(t, 1)
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
epoch := primitives.Epoch(1)
|
||||
blk := util.NewBeaconBlock()
|
||||
r1, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
checkpoint := ðpb.Checkpoint{Epoch: epoch, Root: r1[:]}
|
||||
baseState, _ := util.DeterministicGenesisState(t, 1)
|
||||
checkpoint := ðpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("hi"), fieldparams.RootLength)}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r1, [32]byte{}, params.BeaconConfig().ZeroHash, checkpoint, checkpoint)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, r1))
|
||||
returned, err := service.getAttPreState(ctx, checkpoint)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch.Mul(uint64(checkpoint.Epoch)), returned.Slot(), "Incorrectly returned base state")
|
||||
@@ -240,16 +243,8 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
assert.Equal(t, returned.Slot(), cached.Slot(), "State should have been cached")
|
||||
|
||||
epoch = 2
|
||||
blk = util.NewBeaconBlock()
|
||||
blk.Block.Slot = 64
|
||||
r2, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
newCheckpoint := ðpb.Checkpoint{Epoch: epoch, Root: r2[:]}
|
||||
newCheckpoint := ðpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("bye"), fieldparams.RootLength)}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root)))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, blk.Block.Slot, r2, r1, params.BeaconConfig().ZeroHash, newCheckpoint, newCheckpoint)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, r2))
|
||||
returned, err = service.getAttPreState(ctx, newCheckpoint)
|
||||
require.NoError(t, err)
|
||||
s, err := slots.EpochStart(newCheckpoint.Epoch)
|
||||
@@ -328,22 +323,3 @@ func TestVerifyBeaconBlock_OK(t *testing.T) {
|
||||
|
||||
assert.NoError(t, service.verifyBeaconBlock(ctx, d), "Did not receive the wanted error")
|
||||
}
|
||||
|
||||
func TestGetAttPreState_HeadState(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
baseState, _ := util.DeterministicGenesisState(t, 1)
|
||||
|
||||
epoch := primitives.Epoch(1)
|
||||
blk := util.NewBeaconBlock()
|
||||
r1, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
checkpoint := ðpb.Checkpoint{Epoch: epoch, Root: r1[:]}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
|
||||
require.NoError(t, transition.UpdateNextSlotCache(ctx, checkpoint.Root, baseState))
|
||||
_, err = service.getAttPreState(ctx, checkpoint)
|
||||
require.NoError(t, err)
|
||||
st, err := service.checkpointStateCache.StateByCheckpoint(checkpoint)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().SlotsPerEpoch, st.Slot())
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
@@ -107,13 +108,13 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
}
|
||||
|
||||
// Verify that the parent block is in forkchoice
|
||||
if !s.cfg.ForkChoiceStore.HasNode(b.ParentRoot()) {
|
||||
if !s.ForkChoicer().HasNode(b.ParentRoot()) {
|
||||
return ErrNotDescendantOfFinalized
|
||||
}
|
||||
|
||||
// Save current justified and finalized epochs for future use.
|
||||
currStoreJustifiedEpoch := s.cfg.ForkChoiceStore.JustifiedCheckpoint().Epoch
|
||||
currStoreFinalizedEpoch := s.cfg.ForkChoiceStore.FinalizedCheckpoint().Epoch
|
||||
currStoreJustifiedEpoch := s.ForkChoicer().JustifiedCheckpoint().Epoch
|
||||
currStoreFinalizedEpoch := s.ForkChoicer().FinalizedCheckpoint().Epoch
|
||||
preStateFinalizedEpoch := preState.FinalizedCheckpoint().Epoch
|
||||
preStateJustifiedEpoch := preState.CurrentJustifiedCheckpoint().Epoch
|
||||
|
||||
@@ -141,7 +142,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -187,18 +187,18 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
}()
|
||||
}
|
||||
|
||||
justified := s.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
justified := s.ForkChoicer().JustifiedCheckpoint()
|
||||
start := time.Now()
|
||||
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
if blockRoot != headRoot {
|
||||
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
|
||||
receivedWeight, err := s.ForkChoicer().Weight(blockRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Warn("could not determine node weight")
|
||||
}
|
||||
headWeight, err := s.cfg.ForkChoiceStore.Weight(headRoot)
|
||||
headWeight, err := s.ForkChoicer().Weight(headRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", headRoot)).Warn("could not determine node weight")
|
||||
}
|
||||
@@ -208,24 +208,12 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
"headRoot": fmt.Sprintf("%#x", headRoot),
|
||||
"headWeight": headWeight,
|
||||
}).Debug("Head block is not the received block")
|
||||
} else {
|
||||
// Updating next slot state cache can happen in the background. It shouldn't block rest of the process.
|
||||
go func() {
|
||||
// Use a custom deadline here, since this method runs asynchronously.
|
||||
// We ignore the parent method's context and instead create a new one
|
||||
// with a custom deadline, therefore using the background context instead.
|
||||
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
|
||||
defer cancel()
|
||||
if err := transition.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
|
||||
log.WithError(err).Debug("could not update next slot state cache")
|
||||
}
|
||||
}()
|
||||
}
|
||||
newBlockHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
|
||||
// verify conditions for FCU, notifies FCU, and saves the new head.
|
||||
// This function also prunes attestations, other similar operations happen in prunePostBlockOperationPools.
|
||||
if _, err := s.forkchoiceUpdateWithExecution(ctx, headRoot, s.CurrentSlot()+1); err != nil {
|
||||
if err := s.forkchoiceUpdateWithExecution(ctx, headRoot, s.CurrentSlot()+1); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -240,6 +228,18 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
},
|
||||
})
|
||||
|
||||
// Updating next slot state cache can happen in the background. It shouldn't block rest of the process.
|
||||
go func() {
|
||||
// Use a custom deadline here, since this method runs asynchronously.
|
||||
// We ignore the parent method's context and instead create a new one
|
||||
// with a custom deadline, therefore using the background context instead.
|
||||
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
|
||||
defer cancel()
|
||||
if err := transition.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
|
||||
log.WithError(err).Debug("could not update next slot state cache")
|
||||
}
|
||||
}()
|
||||
|
||||
// Save justified check point to db.
|
||||
postStateJustifiedEpoch := postState.CurrentJustifiedCheckpoint().Epoch
|
||||
if justified.Epoch > currStoreJustifiedEpoch || (justified.Epoch == postStateJustifiedEpoch && justified.Epoch > preStateJustifiedEpoch) {
|
||||
@@ -252,7 +252,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
|
||||
// Save finalized check point to db and more.
|
||||
postStateFinalizedEpoch := postState.FinalizedCheckpoint().Epoch
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
if finalized.Epoch > currStoreFinalizedEpoch || (finalized.Epoch == postStateFinalizedEpoch && finalized.Epoch > preStateFinalizedEpoch) {
|
||||
if err := s.updateFinalized(ctx, ðpb.Checkpoint{Epoch: finalized.Epoch, Root: finalized.Root[:]}); err != nil {
|
||||
return err
|
||||
@@ -283,6 +283,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
log.WithError(err).Error("Could not insert finalized deposits.")
|
||||
}
|
||||
}()
|
||||
|
||||
}
|
||||
defer reportAttestationInclusion(b)
|
||||
if err := s.handleEpochBoundary(ctx, postState); err != nil {
|
||||
@@ -575,7 +576,7 @@ func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashing
|
||||
for _, slashing := range slashings {
|
||||
indices := blocks.SlashableAttesterIndices(slashing)
|
||||
for _, index := range indices {
|
||||
s.cfg.ForkChoiceStore.InsertSlashedIndex(ctx, primitives.ValidatorIndex(index))
|
||||
s.ForkChoicer().InsertSlashedIndex(ctx, primitives.ValidatorIndex(index))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -651,21 +652,30 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
|
||||
|
||||
// This routine checks if there is a cached proposer payload ID available for the next slot proposer.
|
||||
// If there is not, it will call forkchoice updated with the correct payload attribute then cache the payload ID.
|
||||
func (s *Service) spawnLateBlockTasksLoop() {
|
||||
func (s *Service) fillMissingPayloadIDRoutine(ctx context.Context, stateFeed *event.Feed) {
|
||||
// Wait for state to be initialized.
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := stateFeed.Subscribe(stateChannel)
|
||||
go func() {
|
||||
_, err := s.clockWaiter.WaitForClock(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("spawnLateBlockTasksLoop encountered an error waiting for initialization")
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
stateSub.Unsubscribe()
|
||||
return
|
||||
case <-stateChannel:
|
||||
stateSub.Unsubscribe()
|
||||
break
|
||||
}
|
||||
|
||||
attThreshold := params.BeaconConfig().SecondsPerSlot / 3
|
||||
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, time.Duration(attThreshold)*time.Second, params.BeaconConfig().SecondsPerSlot)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C():
|
||||
s.lateBlockTasks(s.ctx)
|
||||
if err := s.fillMissingBlockPayloadId(ctx); err != nil {
|
||||
log.WithError(err).Error("Could not fill missing payload ID")
|
||||
}
|
||||
|
||||
case <-s.ctx.Done():
|
||||
case <-ctx.Done():
|
||||
log.Debug("Context closed, exiting routine")
|
||||
return
|
||||
}
|
||||
@@ -673,13 +683,11 @@ func (s *Service) spawnLateBlockTasksLoop() {
|
||||
}()
|
||||
}
|
||||
|
||||
// lateBlockTasks is called 4 seconds into the slot and performs tasks
|
||||
// related to late blocks. It emits a MissedSlot state feed event.
|
||||
// It calls FCU and sets the right attributes if we are proposing next slot
|
||||
// it also updates the next slot cache to deal with skipped slots.
|
||||
func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
// fillMissingBlockPayloadId is called 4 seconds into the slot and calls FCU if we are proposing next slot
|
||||
// and the cache has been missed
|
||||
func (s *Service) fillMissingBlockPayloadId(ctx context.Context) error {
|
||||
if s.CurrentSlot() == s.HeadSlot() {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.MissedSlot,
|
||||
@@ -688,32 +696,22 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
// Head root should be empty when retrieving proposer index for the next slot.
|
||||
_, id, has := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(s.CurrentSlot()+1, [32]byte{} /* head root */)
|
||||
// There exists proposer for next slot, but we haven't called fcu w/ payload attribute yet.
|
||||
if (!has && !features.Get().PrepareAllPayloads) || id != [8]byte{} {
|
||||
return
|
||||
if !has || id != [8]byte{} {
|
||||
return nil
|
||||
}
|
||||
s.headLock.RLock()
|
||||
headBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
s.headLock.RUnlock()
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve head block")
|
||||
return
|
||||
return err
|
||||
}
|
||||
headRoot := s.headRoot()
|
||||
headState := s.headState(ctx)
|
||||
headRoot := s.headRoot()
|
||||
s.headLock.RUnlock()
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, ¬ifyForkchoiceUpdateArg{
|
||||
headState: headState,
|
||||
headRoot: headRoot,
|
||||
headBlock: headBlock.Block(),
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
}
|
||||
lastRoot, lastState := transition.LastCachedState()
|
||||
if lastState == nil {
|
||||
lastRoot, lastState = headRoot[:], headState
|
||||
}
|
||||
if err = transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("could not update next slot state cache")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -64,7 +64,7 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b interfaces.ReadOnlyBe
|
||||
parentRoot := b.ParentRoot()
|
||||
// Loosen the check to HasBlock because state summary gets saved in batches
|
||||
// during initial syncing. There's no risk given a state summary object is just a
|
||||
// subset of the block object.
|
||||
// a subset of the block object.
|
||||
if !s.cfg.BeaconDB.HasStateSummary(ctx, parentRoot) && !s.cfg.BeaconDB.HasBlock(ctx, parentRoot) {
|
||||
return errors.New("could not reconstruct parent state")
|
||||
}
|
||||
@@ -85,7 +85,7 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b interfaces.ReadOnlyBe
|
||||
// verifyBlkFinalizedSlot validates input block is not less than or equal
|
||||
// to current finalized slot.
|
||||
func (s *Service) verifyBlkFinalizedSlot(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
finalizedSlot, err := slots.EpochStart(finalized.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -177,7 +177,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
|
||||
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
|
||||
|
||||
// Fork choice only matters from last finalized slot.
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
fSlot, err := slots.EpochStart(finalized.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -203,7 +203,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
|
||||
if len(pendingNodes) == 1 {
|
||||
return nil
|
||||
}
|
||||
if root != s.ensureRootNotZeros(finalized.Root) && !s.cfg.ForkChoiceStore.HasNode(root) {
|
||||
if root != s.ensureRootNotZeros(finalized.Root) && !s.ForkChoicer().HasNode(root) {
|
||||
return ErrNotDescendantOfFinalized
|
||||
}
|
||||
return s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes)
|
||||
@@ -230,9 +230,7 @@ func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) e
|
||||
// to be included(rather than the last one to be processed). This was most likely
|
||||
// done as the state cannot represent signed integers.
|
||||
eth1DepositIndex -= 1
|
||||
if err = s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(eth1DepositIndex)); err != nil {
|
||||
return err
|
||||
}
|
||||
s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(eth1DepositIndex))
|
||||
// Deposit proofs are only used during state transition and can be safely removed to save space.
|
||||
if err = s.cfg.DepositCache.PruneProofs(ctx, int64(eth1DepositIndex)); err != nil {
|
||||
return errors.Wrap(err, "could not prune deposit proofs")
|
||||
|
||||
@@ -12,7 +12,9 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
gethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/pkg/errors"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
@@ -22,8 +24,9 @@ import (
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
@@ -42,9 +45,18 @@ import (
|
||||
)
|
||||
|
||||
func TestStore_OnBlock(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
ctx := context.Background()
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
var genesisStateRoot [32]byte
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
@@ -127,7 +139,7 @@ func TestStore_OnBlock(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fRoot := bytesutil.ToBytes32(roots[0])
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: fRoot}))
|
||||
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: fRoot}))
|
||||
root, err := tt.blk.Block.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(tt.blk)
|
||||
@@ -139,8 +151,17 @@ func TestStore_OnBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStore_OnBlockBatch(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fc := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
WithForkChoiceStore(fc),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
@@ -163,7 +184,7 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
blks = append(blks, wsb)
|
||||
blkRoots = append(blkRoots, root)
|
||||
}
|
||||
err := service.onBlockBatch(ctx, blks, blkRoots[1:])
|
||||
err = service.onBlockBatch(ctx, blks, blkRoots[1:])
|
||||
require.ErrorIs(t, errWrongBlockCount, err)
|
||||
err = service.onBlockBatch(ctx, blks, blkRoots)
|
||||
require.NoError(t, err)
|
||||
@@ -174,9 +195,17 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fc := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
WithForkChoiceStore(fc),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
bState := st.Copy()
|
||||
@@ -197,12 +226,22 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
|
||||
blks = append(blks, wsb)
|
||||
blkRoots = append(blkRoots, root)
|
||||
}
|
||||
require.NoError(t, service.onBlockBatch(ctx, blks, blkRoots))
|
||||
err = service.onBlockBatch(ctx, blks, blkRoots)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB := tr.ctx, tr.db
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fc := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
WithForkChoiceStore(fc),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
@@ -220,8 +259,16 @@ func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB := tr.ctx, tr.db
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
@@ -241,9 +288,9 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
r0 := bytesutil.ToBytes32(roots[0])
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, r0, service.originBlockRoot, [32]byte{}, fcp, fcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
fcp2 := &forkchoicetypes.Checkpoint{Epoch: 0, Root: r0}
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fcp2))
|
||||
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(fcp2))
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
@@ -261,8 +308,16 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB := tr.ctx, tr.db
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
@@ -284,9 +339,9 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
|
||||
r0 := bytesutil.ToBytes32(roots[0])
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, r0, service.originBlockRoot, [32]byte{}, fcp, fcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
fcp2 := &forkchoicetypes.Checkpoint{Epoch: 0, Root: r0}
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fcp2))
|
||||
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(fcp2))
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
@@ -304,8 +359,16 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB := tr.ctx, tr.db
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
|
||||
var genesisStateRoot [32]byte
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -343,7 +406,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 32)
|
||||
|
||||
// Set finalized epoch to 2.
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 2, Root: r64}))
|
||||
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 2, Root: r64}))
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
@@ -354,8 +417,17 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB := tr.ctx, tr.db
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fc := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
|
||||
var genesisStateRoot [32]byte
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -493,8 +565,17 @@ func TestAncestorByDB_CtxErr(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAncestor_HandleSkipSlot(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
beaconDB := tr.db
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
b1 := util.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
@@ -575,8 +656,17 @@ func TestAncestor_CanUseForkchoice(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAncestor_CanUseDB(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB := tr.ctx, tr.db
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
b1 := util.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
@@ -641,8 +731,21 @@ func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, fcs := tr.ctx, tr.fcs
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
@@ -678,8 +781,21 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
@@ -713,15 +829,39 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOnBlock_NilBlock(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
err := service.onBlock(tr.ctx, nil, [32]byte{})
|
||||
err = service.onBlock(ctx, nil, [32]byte{})
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
func TestOnBlock_InvalidSignature(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
@@ -744,8 +884,21 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
config.BellatrixForkEpoch = 2
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
@@ -764,8 +917,13 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, depositCache := tr.ctx, tr.dc
|
||||
ctx := context.Background()
|
||||
opts := testServiceOptsWithDB(t)
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts = append(opts, WithDepositCache(depositCache))
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
gs, _ := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
@@ -793,8 +951,13 @@ func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, depositCache := tr.ctx, tr.dc
|
||||
ctx := context.Background()
|
||||
opts := testServiceOptsWithDB(t)
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts = append(opts, WithDepositCache(depositCache))
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
gs, _ := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
@@ -817,7 +980,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root)))
|
||||
}
|
||||
// Insert 3 deposits before hand.
|
||||
require.NoError(t, depositCache.InsertFinalizedDeposits(ctx, 2))
|
||||
depositCache.InsertFinalizedDeposits(ctx, 2)
|
||||
|
||||
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
|
||||
fDeposits := depositCache.FinalizedDeposits(ctx)
|
||||
@@ -921,8 +1084,18 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
cfg.TerminalBlockHash = params.BeaconConfig().ZeroHash
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
ctx := tr.ctx
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
aHash := common.BytesToHash([]byte("a"))
|
||||
bHash := common.BytesToHash([]byte("b"))
|
||||
@@ -1049,8 +1222,17 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_insertSlashingsToForkChoiceStore(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 100)
|
||||
att1 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
@@ -1091,8 +1273,21 @@ func TestService_insertSlashingsToForkChoiceStore(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
@@ -1157,9 +1352,18 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_verifyBlkFinalizedSlot_invalidBlock(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1}))
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1}))
|
||||
blk := util.HydrateBeaconBlock(ðpb.BeaconBlock{Slot: 1})
|
||||
wb, err := consensusblocks.NewBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
@@ -1181,9 +1385,22 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
config.BellatrixForkEpoch = 2
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
|
||||
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
|
||||
ctx := tr.ctx
|
||||
fc := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
WithForkChoiceStore(fc),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithExecutionEngineCaller(mockEngine),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
@@ -1242,7 +1459,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
jc := service.ForkChoicer().JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), jc.Epoch)
|
||||
|
||||
// import a block that justifies the second epoch
|
||||
@@ -1257,14 +1474,14 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
jc = service.ForkChoicer().JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
|
||||
sjc := validHeadState.CurrentJustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), sjc.Epoch)
|
||||
lvh := b.Block.Body.ExecutionPayload.ParentHash
|
||||
// check our head
|
||||
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
require.Equal(t, firstInvalidRoot, service.ForkChoicer().CachedHeadRoot())
|
||||
|
||||
// import another block to find out that it was invalid
|
||||
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: lvh}
|
||||
@@ -1283,7 +1500,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
// Check that forkchoice's head is the last invalid block imported. The
|
||||
// store's headroot is the previous head (since the invalid block did
|
||||
// not finish importing) one and that the node is optimistic
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
require.Equal(t, root, service.ForkChoicer().CachedHeadRoot())
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, firstInvalidRoot, bytesutil.ToBytes32(headRoot))
|
||||
@@ -1305,7 +1522,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
require.Equal(t, root, service.ForkChoicer().CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
require.Equal(t, jc.Epoch, sjc.Epoch)
|
||||
require.Equal(t, jc.Root, bytesutil.ToBytes32(sjc.Root))
|
||||
@@ -1328,9 +1545,22 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
config.BellatrixForkEpoch = 2
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
|
||||
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
|
||||
ctx := tr.ctx
|
||||
fc := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
WithForkChoiceStore(fc),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithExecutionEngineCaller(mockEngine),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
@@ -1389,7 +1619,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
jc := service.ForkChoicer().JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), jc.Epoch)
|
||||
|
||||
// import a block that justifies the second epoch
|
||||
@@ -1404,14 +1634,14 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
jc = service.ForkChoicer().JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
|
||||
sjc := validHeadState.CurrentJustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), sjc.Epoch)
|
||||
lvh := b.Block.Body.ExecutionPayload.ParentHash
|
||||
// check our head
|
||||
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
require.Equal(t, firstInvalidRoot, service.ForkChoicer().CachedHeadRoot())
|
||||
|
||||
// import another block to find out that it was invalid
|
||||
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrInvalidPayloadStatus, NewPayloadResp: lvh}
|
||||
@@ -1430,7 +1660,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
|
||||
// not finish importing and it was never imported to forkchoice). Check
|
||||
// also that the node is optimistic
|
||||
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
require.Equal(t, firstInvalidRoot, service.ForkChoicer().CachedHeadRoot())
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, firstInvalidRoot, bytesutil.ToBytes32(headRoot))
|
||||
@@ -1452,7 +1682,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
require.Equal(t, root, service.ForkChoicer().CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
require.Equal(t, jc.Epoch, sjc.Epoch)
|
||||
require.Equal(t, jc.Root, bytesutil.ToBytes32(sjc.Root))
|
||||
@@ -1476,9 +1706,22 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
config.BellatrixForkEpoch = 2
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
|
||||
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
|
||||
ctx := tr.ctx
|
||||
fc := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
WithForkChoiceStore(fc),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithExecutionEngineCaller(mockEngine),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
@@ -1559,9 +1802,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we have justified the second epoch
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
jc := service.ForkChoicer().JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
invalidHeadRoot := service.cfg.ForkChoiceStore.CachedHeadRoot()
|
||||
invalidHeadRoot := service.ForkChoicer().CachedHeadRoot()
|
||||
|
||||
// import block 19 to find out that the whole chain 13--18 was in fact
|
||||
// invalid
|
||||
@@ -1582,7 +1825,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
|
||||
// not finish importing and it was never imported to forkchoice). Check
|
||||
// also that the node is optimistic
|
||||
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
require.Equal(t, invalidHeadRoot, service.ForkChoicer().CachedHeadRoot())
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, invalidHeadRoot, bytesutil.ToBytes32(headRoot))
|
||||
@@ -1612,7 +1855,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, root))
|
||||
// Check that the head is still INVALID and the node is still optimistic
|
||||
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
require.Equal(t, invalidHeadRoot, service.ForkChoicer().CachedHeadRoot())
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
@@ -1634,7 +1877,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Head should still be INVALID and the node optimistic
|
||||
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
require.Equal(t, invalidHeadRoot, service.ForkChoicer().CachedHeadRoot())
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
@@ -1650,7 +1893,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
require.Equal(t, root, service.ForkChoicer().CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
require.Equal(t, primitives.Epoch(4), sjc.Epoch)
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
@@ -1671,9 +1914,27 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
config.BellatrixForkEpoch = 2
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
|
||||
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
|
||||
ctx := tr.ctx
|
||||
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
|
||||
require.NoError(t, err)
|
||||
newfc := doublylinkedtree.New()
|
||||
newStateGen := stategen.New(beaconDB, newfc)
|
||||
newfc.SetBalancesByRooter(newStateGen.ActiveNonSlashedBalancesByRoot)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithStateGen(newStateGen),
|
||||
WithForkChoiceStore(newfc),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithExecutionEngineCaller(mockEngine),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
WithAttestationService(attSrv),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
genesisState, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := genesisState.HashTreeRoot(ctx)
|
||||
@@ -1750,7 +2011,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.onBlock(ctx, wsb, root))
|
||||
}
|
||||
// Check that we have justified the second epoch
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
jc := service.ForkChoicer().JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
|
||||
// import block 19 to find out that the whole chain 13--18 was in fact
|
||||
@@ -1784,7 +2045,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.StartFromSavedState(genesisState))
|
||||
|
||||
// Forkchoice has the genesisRoot loaded at startup
|
||||
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot()))
|
||||
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.ForkChoicer().CachedHeadRoot()))
|
||||
// Service's store has the finalized state as headRoot
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -1811,7 +2072,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
// We use onBlockBatch here because the valid chain is missing in forkchoice
|
||||
require.NoError(t, service.onBlockBatch(ctx, []interfaces.ReadOnlySignedBeaconBlock{wsb}, [][32]byte{root}))
|
||||
// Check that the head is now VALID and the node is not optimistic
|
||||
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot()))
|
||||
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.ForkChoicer().CachedHeadRoot()))
|
||||
headRoot, err = service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, bytesutil.ToBytes32(headRoot))
|
||||
@@ -1822,8 +2083,18 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fc := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
WithForkChoiceStore(fc),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
@@ -1875,34 +2146,30 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
r3 := bytesutil.ToBytes32(a3.Data.BeaconBlockRoot)
|
||||
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
|
||||
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committee as st
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committe as st
|
||||
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committee as st
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committe as st
|
||||
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
|
||||
}
|
||||
|
||||
func TestFillMissingBlockPayloadId_DiffSlotExitEarly(t *testing.T) {
|
||||
logHook := logTest.NewGlobal()
|
||||
service, tr := minimalTestService(t)
|
||||
service.lateBlockTasks(tr.ctx)
|
||||
require.LogsDoNotContain(t, logHook, "could not perform late block tasks")
|
||||
}
|
||||
fc := doublylinkedtree.New()
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
func TestFillMissingBlockPayloadId_PrepareAllPayloads(t *testing.T) {
|
||||
logHook := logTest.NewGlobal()
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
PrepareAllPayloads: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
opts := []Option{
|
||||
WithForkChoiceStore(fc),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
}
|
||||
|
||||
service, tr := minimalTestService(t)
|
||||
service.lateBlockTasks(tr.ctx)
|
||||
require.LogsDoNotContain(t, logHook, "could not perform late block tasks")
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.fillMissingBlockPayloadId(ctx), 0)
|
||||
}
|
||||
|
||||
// Helper function to simulate the block being on time or delayed for proposer
|
||||
// boost. It alters the genesisTime tracked by the store.
|
||||
func driftGenesisTime(s *Service, slot, delay int64) {
|
||||
func driftGenesisTime(s *Service, slot int64, delay int64) {
|
||||
offset := slot*int64(params.BeaconConfig().SecondsPerSlot) - delay
|
||||
s.SetGenesisTime(time.Unix(time.Now().Unix()-offset, 0))
|
||||
}
|
||||
|
||||
@@ -7,6 +7,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
@@ -26,7 +28,7 @@ const reorgLateBlockCountAttestations = 2 * time.Second
|
||||
// AttestationStateFetcher allows for retrieving a beacon state corresponding to the block
|
||||
// root of an attestation's target checkpoint.
|
||||
type AttestationStateFetcher interface {
|
||||
AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error)
|
||||
AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.BeaconState, error)
|
||||
}
|
||||
|
||||
// AttestationReceiver interface defines the methods of chain service receive and processing new attestations.
|
||||
@@ -37,7 +39,7 @@ type AttestationReceiver interface {
|
||||
}
|
||||
|
||||
// AttestationTargetState returns the pre state of attestation.
|
||||
func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
|
||||
func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.BeaconState, error) {
|
||||
ss, err := slots.EpochStart(target.Epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -45,9 +47,6 @@ func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Chec
|
||||
if err := slots.ValidateClock(ss, uint64(s.genesisTime.Unix())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// We acquire the lock here instead than on gettAttPreState because that function gets called from UpdateHead that holds a write lock
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.getAttPreState(ctx, target)
|
||||
}
|
||||
|
||||
@@ -68,13 +67,20 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestat
|
||||
}
|
||||
|
||||
// This routine processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
func (s *Service) spawnProcessAttestationsRoutine() {
|
||||
func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
|
||||
// Wait for state to be initialized.
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := stateFeed.Subscribe(stateChannel)
|
||||
go func() {
|
||||
_, err := s.clockWaiter.WaitForClock(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("spawnProcessAttestationsRoutine failed to receive genesis data")
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
stateSub.Unsubscribe()
|
||||
return
|
||||
case <-stateChannel:
|
||||
stateSub.Unsubscribe()
|
||||
break
|
||||
}
|
||||
|
||||
if s.genesisTime.IsZero() {
|
||||
log.Warn("ProcessAttestations routine waiting for genesis time")
|
||||
for s.genesisTime.IsZero() {
|
||||
@@ -94,15 +100,17 @@ func (s *Service) spawnProcessAttestationsRoutine() {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case <-pat.C():
|
||||
s.ForkChoicer().Lock()
|
||||
s.UpdateHead(s.ctx, s.CurrentSlot()+1)
|
||||
s.ForkChoicer().Unlock()
|
||||
case <-st.C():
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, s.CurrentSlot()); err != nil {
|
||||
s.ForkChoicer().Lock()
|
||||
if err := s.ForkChoicer().NewSlot(s.ctx, s.CurrentSlot()); err != nil {
|
||||
log.WithError(err).Error("could not process new slot")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
s.UpdateHead(s.ctx, s.CurrentSlot())
|
||||
s.ForkChoicer().Unlock()
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -111,12 +119,8 @@ func (s *Service) spawnProcessAttestationsRoutine() {
|
||||
// UpdateHead updates the canonical head of the chain based on information from fork-choice attestations and votes.
|
||||
// The caller of this function MUST hold a lock in forkchoice
|
||||
func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.UpdateHead")
|
||||
defer span.End()
|
||||
|
||||
start := time.Now()
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
// This function is only called at 10 seconds or 0 seconds into the slot
|
||||
disparity := params.BeaconNetworkConfig().MaximumGossipClockDisparity
|
||||
if !features.Get().DisableReorgLateBlocks {
|
||||
@@ -130,24 +134,19 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
newHeadRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not compute head from new attestations")
|
||||
// Fallback to our current head root in the event of a failure.
|
||||
s.headLock.RLock()
|
||||
newHeadRoot = s.headRoot()
|
||||
s.headLock.RUnlock()
|
||||
}
|
||||
newAttHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
|
||||
changed, err := s.forkchoiceUpdateWithExecution(s.ctx, newHeadRoot, proposingSlot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not update forkchoice")
|
||||
}
|
||||
if changed {
|
||||
s.headLock.RLock()
|
||||
s.headLock.RLock()
|
||||
if s.headRoot() != newHeadRoot {
|
||||
log.WithFields(logrus.Fields{
|
||||
"oldHeadRoot": fmt.Sprintf("%#x", s.headRoot()),
|
||||
"newHeadRoot": fmt.Sprintf("%#x", newHeadRoot),
|
||||
}).Debug("Head changed due to attestations")
|
||||
s.headLock.RUnlock()
|
||||
}
|
||||
s.headLock.RUnlock()
|
||||
if err := s.forkchoiceUpdateWithExecution(s.ctx, newHeadRoot, proposingSlot); err != nil {
|
||||
log.WithError(err).Error("could not update forkchoice")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,11 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -27,18 +31,22 @@ var (
|
||||
|
||||
func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
service, _ := minimalTestService(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
service.genesisTime = time.Now()
|
||||
chainService := setupBeaconChain(t, beaconDB)
|
||||
chainService.genesisTime = time.Now()
|
||||
|
||||
e := primitives.Epoch(slots.MaxSlotBuffer/uint64(params.BeaconConfig().SlotsPerEpoch) + 1)
|
||||
_, err := service.AttestationTargetState(context.Background(), ðpb.Checkpoint{Epoch: e})
|
||||
_, err := chainService.AttestationTargetState(context.Background(), ðpb.Checkpoint{Epoch: e})
|
||||
require.ErrorContains(t, "exceeds max allowed value relative to the local clock", err)
|
||||
}
|
||||
|
||||
func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
ctx := context.Background()
|
||||
opts := testServiceOptsWithDB(t)
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
@@ -61,8 +69,11 @@ func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
ctx := context.Background()
|
||||
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
@@ -85,10 +96,13 @@ func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessAttestations_Ok(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := tr.ctx
|
||||
ctx := context.Background()
|
||||
opts := testServiceOptsWithDB(t)
|
||||
opts = append(opts, WithAttestationPool(attestations.NewPool()))
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
|
||||
@@ -112,9 +126,21 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, fcs := tr.ctx, tr.fcs
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
newStateGen := stategen.New(beaconDB, fcs)
|
||||
fcs.SetBalancesByRooter(newStateGen.ActiveNonSlashedBalancesByRoot)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(newStateGen),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.genesisTime = prysmTime.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
@@ -163,9 +189,21 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_UpdateHead_NoAtts(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, fcs := tr.ctx, tr.fcs
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
newStateGen := stategen.New(beaconDB, fcs)
|
||||
fcs.SetBalancesByRooter(newStateGen.ActiveNonSlashedBalancesByRoot)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithStateGen(newStateGen),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.genesisTime = prysmTime.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
|
||||
@@ -67,12 +67,12 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
}
|
||||
|
||||
// Reports on block and fork choice metrics.
|
||||
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
cp := s.ForkChoicer().FinalizedCheckpoint()
|
||||
finalized := ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
|
||||
|
||||
// Log block sync status.
|
||||
cp = s.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
cp = s.ForkChoicer().JustifiedCheckpoint()
|
||||
justified := ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
if err := logBlockSyncStatus(blockCopy.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix())); err != nil {
|
||||
log.WithError(err).Error("Unable to log block sync status")
|
||||
@@ -123,7 +123,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Rea
|
||||
})
|
||||
|
||||
// Reports on blockCopy and fork choice metrics.
|
||||
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
cp := s.ForkChoicer().FinalizedCheckpoint()
|
||||
finalized := ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
|
||||
}
|
||||
@@ -131,7 +131,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Rea
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
@@ -152,8 +152,8 @@ func (s *Service) HasBlock(ctx context.Context, root [32]byte) bool {
|
||||
|
||||
// ReceiveAttesterSlashing receives an attester slashing and inserts it to forkchoice
|
||||
func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing *ethpb.AttesterSlashing) {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
s.ForkChoicer().Lock()
|
||||
defer s.ForkChoicer().Unlock()
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, []*ethpb.AttesterSlashing{slashing})
|
||||
}
|
||||
|
||||
@@ -179,14 +179,10 @@ func (s *Service) prunePostBlockOperationPools(ctx context.Context, blk interfac
|
||||
return errors.Wrap(err, "could not process BLSToExecutionChanges")
|
||||
}
|
||||
|
||||
// Mark slashings as seen so we don't include same ones in future blocks.
|
||||
// Mark attester slashings as seen so we don't include same ones in future blocks.
|
||||
for _, as := range blk.Block().Body().AttesterSlashings() {
|
||||
s.cfg.SlashingPool.MarkIncludedAttesterSlashing(as)
|
||||
}
|
||||
for _, ps := range blk.Block().Body().ProposerSlashings() {
|
||||
s.cfg.SlashingPool.MarkIncludedProposerSlashing(ps)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -211,7 +207,7 @@ func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
|
||||
currentEpoch := slots.ToEpoch(s.CurrentSlot())
|
||||
// Prevent `sinceFinality` going underflow.
|
||||
var sinceFinality primitives.Epoch
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
|
||||
@@ -7,7 +7,12 @@ import (
|
||||
"time"
|
||||
|
||||
blockchainTesting "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
@@ -120,15 +125,22 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
wg.Add(1)
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s, tr := minimalTestService(t,
|
||||
WithFinalizedStateAtStartUp(genesis),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}))
|
||||
|
||||
beaconDB := tr.db
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
genesisBlockRoot := bytesutil.ToBytes32(nil)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
|
||||
|
||||
fc := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(fc),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
WithFinalizedStateAtStartUp(genesis),
|
||||
}
|
||||
s, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
// Initialize it here.
|
||||
_ = s.cfg.StateNotifier.StateFeed()
|
||||
require.NoError(t, s.saveGenesisData(ctx, genesis))
|
||||
@@ -150,16 +162,25 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
s, tr := minimalTestService(t,
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}))
|
||||
ctx, beaconDB := tr.ctx, tr.db
|
||||
ctx := context.Background()
|
||||
genesis, keys := util.DeterministicGenesisState(t, 64)
|
||||
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
genesisBlockRoot := bytesutil.ToBytes32(nil)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
|
||||
fc := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(fc),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
}
|
||||
|
||||
s, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
// Initialize it here.
|
||||
_ = s.cfg.StateNotifier.StateFeed()
|
||||
require.NoError(t, s.saveGenesisData(ctx, genesis))
|
||||
@@ -225,8 +246,17 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s, _ := minimalTestService(t, WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}))
|
||||
err := s.saveGenesisData(ctx, genesis)
|
||||
fc := doublylinkedtree.New()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(fc),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
}
|
||||
s, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
err = s.saveGenesisData(ctx, genesis)
|
||||
require.NoError(t, err)
|
||||
root, err := tt.args.block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -246,7 +276,10 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_HasBlock(t *testing.T) {
|
||||
s, _ := minimalTestService(t)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
opts = append(opts, WithStateNotifier(&blockchainTesting.MockStateNotifier{}))
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
r := [32]byte{'a'}
|
||||
if s.HasBlock(context.Background(), r) {
|
||||
t.Error("Should not have block")
|
||||
@@ -266,8 +299,10 @@ func TestService_HasBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
opts := testServiceOptsWithDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, _ := minimalTestService(t)
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
@@ -277,9 +312,9 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
|
||||
func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
s, _ := minimalTestService(t)
|
||||
|
||||
opts := testServiceOptsWithDB(t)
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
@@ -291,7 +326,9 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
|
||||
func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s, _ := minimalTestService(t)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
@@ -299,8 +336,19 @@ func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHandleBlockBLSToExecutionChanges(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
pool := tr.blsPool
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fc := doublylinkedtree.New()
|
||||
pool := blstoexec.NewPool()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
WithForkChoiceStore(fc),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{}),
|
||||
WithBLSToExecPool(pool),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("pre Capella block", func(t *testing.T) {
|
||||
body := ðpb.BeaconBlockBodyBellatrix{}
|
||||
|
||||
@@ -27,7 +27,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
@@ -58,8 +57,6 @@ type Service struct {
|
||||
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
clockSetter startup.ClockSetter
|
||||
clockWaiter startup.ClockWaiter
|
||||
}
|
||||
|
||||
// config options for the service.
|
||||
@@ -86,8 +83,6 @@ type config struct {
|
||||
ExecutionEngineCaller execution.EngineCaller
|
||||
}
|
||||
|
||||
var ErrMissingClockSetter = errors.New("blockchain Service initialized without a startup.ClockSetter")
|
||||
|
||||
// NewService instantiates a new block service instance that will
|
||||
// be registered into a running beacon node.
|
||||
func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
@@ -105,9 +100,6 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if srv.clockSetter == nil {
|
||||
return nil, ErrMissingClockSetter
|
||||
}
|
||||
var err error
|
||||
srv.wsVerifier, err = NewWeakSubjectivityVerifier(srv.cfg.WeakSubjectivityCheckpt, srv.cfg.BeaconDB)
|
||||
if err != nil {
|
||||
@@ -129,8 +121,8 @@ func (s *Service) Start() {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
s.spawnProcessAttestationsRoutine()
|
||||
s.spawnLateBlockTasksLoop()
|
||||
s.spawnProcessAttestationsRoutine(s.cfg.StateNotifier.StateFeed())
|
||||
s.fillMissingPayloadIDRoutine(s.ctx, s.cfg.StateNotifier.StateFeed())
|
||||
}
|
||||
|
||||
// Stop the blockchain service's main event loop and associated goroutines.
|
||||
@@ -244,10 +236,13 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
return errors.Wrap(err, "could not verify initial checkpoint provided for chain sync")
|
||||
}
|
||||
|
||||
vr := bytesutil.ToBytes32(saved.GenesisValidatorsRoot())
|
||||
if err := s.clockSetter.SetClock(startup.NewClock(s.genesisTime, vr)); err != nil {
|
||||
return errors.Wrap(err, "failed to initialize blockchain service")
|
||||
}
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: s.genesisTime,
|
||||
GenesisValidatorsRoot: saved.GenesisValidatorsRoot(),
|
||||
},
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -364,10 +359,15 @@ func (s *Service) onExecutionChainStart(ctx context.Context, genesisTime time.Ti
|
||||
}
|
||||
go slots.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()), gRoot)
|
||||
|
||||
vr := bytesutil.ToBytes32(initializedState.GenesisValidatorsRoot())
|
||||
if err := s.clockSetter.SetClock(startup.NewClock(genesisTime, vr)); err != nil {
|
||||
log.WithError(err).Fatal("failed to initialize blockchain service from execution start event")
|
||||
}
|
||||
// We send out a state initialized event to the rest of the services
|
||||
// running in the beacon node.
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: genesisTime,
|
||||
GenesisValidatorsRoot: initializedState.GenesisValidatorsRoot(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// initializes the state and genesis block of the beacon chain to persistent storage
|
||||
|
||||
@@ -8,9 +8,13 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
@@ -19,9 +23,7 @@ import (
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
@@ -36,8 +38,50 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type mockBeaconNode struct {
|
||||
stateFeed *event.Feed
|
||||
}
|
||||
|
||||
// StateFeed mocks the same method in the beacon node.
|
||||
func (mbn *mockBeaconNode) StateFeed() *event.Feed {
|
||||
if mbn.stateFeed == nil {
|
||||
mbn.stateFeed = new(event.Feed)
|
||||
}
|
||||
return mbn.stateFeed
|
||||
}
|
||||
|
||||
type mockBroadcaster struct {
|
||||
broadcastCalled bool
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.SignedBlobSidecar) error {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Attestation) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
|
||||
}
|
||||
|
||||
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
|
||||
|
||||
func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
ctx := context.Background()
|
||||
var web3Service *execution.Service
|
||||
@@ -92,15 +136,12 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
WithDepositCache(depositCache),
|
||||
WithChainStartFetcher(web3Service),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithSlashingPool(slashings.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithP2PBroadcaster(&mockBroadcaster{}),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithForkChoiceStore(fc),
|
||||
WithAttestationService(attService),
|
||||
WithStateGen(stateGen),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
WithClockSynchronizer(startup.NewClockSynchronizer()),
|
||||
}
|
||||
|
||||
chainService, err := NewService(ctx, opts...)
|
||||
@@ -117,14 +158,12 @@ func TestChainStartStop_Initialized(t *testing.T) {
|
||||
|
||||
chainService := setupBeaconChain(t, beaconDB)
|
||||
|
||||
gt := time.Unix(23, 0)
|
||||
genesisBlk := util.NewBeaconBlock()
|
||||
blkRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
|
||||
require.NoError(t, s.SetSlot(1))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
|
||||
@@ -154,14 +193,12 @@ func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
|
||||
|
||||
chainService := setupBeaconChain(t, beaconDB)
|
||||
|
||||
gt := time.Unix(23, 0)
|
||||
genesisBlk := util.NewBeaconBlock()
|
||||
blkRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb := util.SaveBlock(t, ctx, beaconDB, genesisBlk)
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
@@ -228,14 +265,12 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
|
||||
|
||||
chainService := setupBeaconChain(t, beaconDB)
|
||||
|
||||
gt := time.Unix(23, 0)
|
||||
genesisBlk := util.NewBeaconBlock()
|
||||
blkRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
|
||||
require.NoError(t, s.SetSlot(0))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
|
||||
@@ -256,9 +291,14 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlock()
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
|
||||
headBlock := util.NewBeaconBlock()
|
||||
@@ -270,18 +310,23 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
c, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState))
|
||||
ctx, beaconDB, stateGen := tr.ctx, tr.db, tr.sg
|
||||
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, headBlock)
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
|
||||
require.NoError(t, err)
|
||||
fc := doublylinkedtree.New()
|
||||
stateGen := stategen.New(beaconDB, fc)
|
||||
c, err := NewService(ctx,
|
||||
WithForkChoiceStore(fc),
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stateGen),
|
||||
WithAttestationService(attSrv),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithFinalizedStateAtStartUp(headState))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
|
||||
|
||||
require.NoError(t, c.StartFromSavedState(headState))
|
||||
headBlk, err := c.HeadBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -301,9 +346,14 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlock()
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
|
||||
headBlock := util.NewBeaconBlock()
|
||||
@@ -315,21 +365,27 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
|
||||
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
c, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState))
|
||||
ctx, beaconDB := tr.ctx, tr.db
|
||||
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, headBlock)
|
||||
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
|
||||
require.NoError(t, err)
|
||||
ss := ðpb.StateSummary{
|
||||
Slot: finalizedSlot,
|
||||
Root: headRoot[:],
|
||||
}
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, ss))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: headRoot[:], Epoch: slots.ToEpoch(finalizedSlot)}))
|
||||
fc := doublylinkedtree.New()
|
||||
stateGen := stategen.New(beaconDB, fc)
|
||||
c, err := NewService(ctx,
|
||||
WithForkChoiceStore(fc),
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stateGen),
|
||||
WithAttestationService(attSrv),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithFinalizedStateAtStartUp(headState))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, c.StartFromSavedState(headState))
|
||||
s, err := c.HeadState(ctx)
|
||||
@@ -405,21 +461,17 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessChainStartTime_ReceivedFeed(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
mgs := &MockClockSetter{}
|
||||
service.clockSetter = mgs
|
||||
gt := time.Now()
|
||||
service.onExecutionChainStart(context.Background(), gt)
|
||||
gs, err := beaconDB.GenesisState(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, nil, gs)
|
||||
require.Equal(t, 32, len(gs.GenesisValidatorsRoot()))
|
||||
var zero [32]byte
|
||||
require.DeepNotEqual(t, gs.GenesisValidatorsRoot(), zero[:])
|
||||
require.Equal(t, gt, mgs.G.GenesisTime())
|
||||
require.Equal(t, bytesutil.ToBytes32(gs.GenesisValidatorsRoot()), mgs.G.GenesisValidatorsRoot())
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := service.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
service.onExecutionChainStart(context.Background(), time.Now())
|
||||
|
||||
stateEvent := <-stateChannel
|
||||
require.Equal(t, int(stateEvent.Type), statefeed.Initialized)
|
||||
_, ok := stateEvent.Data.(*statefeed.InitializedData)
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func BenchmarkHasBlockDB(b *testing.B) {
|
||||
@@ -468,10 +520,15 @@ func TestChainService_EverythingOptimistic(t *testing.T) {
|
||||
EnableStartOptimistic: true,
|
||||
})
|
||||
defer resetFn()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlock()
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
|
||||
headBlock := util.NewBeaconBlock()
|
||||
headBlock.Block.Slot = finalizedSlot
|
||||
@@ -482,17 +539,21 @@ func TestChainService_EverythingOptimistic(t *testing.T) {
|
||||
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
c, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState))
|
||||
ctx, beaconDB, stateGen := tr.ctx, tr.db, tr.sg
|
||||
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, headBlock)
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
|
||||
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
|
||||
require.NoError(t, err)
|
||||
fc := doublylinkedtree.New()
|
||||
stateGen := stategen.New(beaconDB, fc)
|
||||
c, err := NewService(ctx,
|
||||
WithForkChoiceStore(fc),
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stateGen),
|
||||
WithAttestationService(attSrv),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithFinalizedStateAtStartUp(headState))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
@@ -502,19 +563,3 @@ func TestChainService_EverythingOptimistic(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, op)
|
||||
}
|
||||
|
||||
// MockClockSetter satisfies the ClockSetter interface for testing the conditions where blockchain.Service should
|
||||
// call SetGenesis.
|
||||
type MockClockSetter struct {
|
||||
G *startup.Clock
|
||||
Err error
|
||||
}
|
||||
|
||||
var _ startup.ClockSetter = &MockClockSetter{}
|
||||
|
||||
// SetClock satisfies the ClockSetter interface.
|
||||
// The value is written to an exported field 'G' so that it can be accessed in tests.
|
||||
func (s *MockClockSetter) SetClock(g *startup.Clock) error {
|
||||
s.G = g
|
||||
return s.Err
|
||||
}
|
||||
|
||||
@@ -1,115 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type mockBeaconNode struct {
|
||||
stateFeed *event.Feed
|
||||
}
|
||||
|
||||
// StateFeed mocks the same method in the beacon node.
|
||||
func (mbn *mockBeaconNode) StateFeed() *event.Feed {
|
||||
if mbn.stateFeed == nil {
|
||||
mbn.stateFeed = new(event.Feed)
|
||||
}
|
||||
return mbn.stateFeed
|
||||
}
|
||||
|
||||
type mockBroadcaster struct {
|
||||
broadcastCalled bool
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Attestation) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
|
||||
}
|
||||
|
||||
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
|
||||
|
||||
type testServiceRequirements struct {
|
||||
ctx context.Context
|
||||
db db.Database
|
||||
fcs forkchoice.ForkChoicer
|
||||
sg *stategen.State
|
||||
notif statefeed.Notifier
|
||||
cs *startup.ClockSynchronizer
|
||||
attPool attestations.Pool
|
||||
attSrv *attestations.Service
|
||||
blsPool *blstoexec.Pool
|
||||
dc *depositcache.DepositCache
|
||||
}
|
||||
|
||||
func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceRequirements) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
sg := stategen.New(beaconDB, fcs)
|
||||
notif := &mockBeaconNode{}
|
||||
fcs.SetBalancesByRooter(sg.ActiveNonSlashedBalancesByRoot)
|
||||
cs := startup.NewClockSynchronizer()
|
||||
attPool := attestations.NewPool()
|
||||
attSrv, err := attestations.NewService(ctx, &attestations.Config{Pool: attPool})
|
||||
require.NoError(t, err)
|
||||
blsPool := blstoexec.NewPool()
|
||||
dc, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
req := &testServiceRequirements{
|
||||
ctx: ctx,
|
||||
db: beaconDB,
|
||||
fcs: fcs,
|
||||
sg: sg,
|
||||
notif: notif,
|
||||
cs: cs,
|
||||
attPool: attPool,
|
||||
attSrv: attSrv,
|
||||
blsPool: blsPool,
|
||||
dc: dc,
|
||||
}
|
||||
defOpts := []Option{WithDatabase(req.db),
|
||||
WithStateNotifier(req.notif),
|
||||
WithStateGen(req.sg),
|
||||
WithForkChoiceStore(req.fcs),
|
||||
WithClockSynchronizer(req.cs),
|
||||
WithAttestationPool(req.attPool),
|
||||
WithAttestationService(req.attSrv),
|
||||
WithBLSToExecPool(req.blsPool),
|
||||
WithDepositCache(dc),
|
||||
}
|
||||
// append the variadic opts so they override the defaults by being processed afterwards
|
||||
opts = append(defOpts, opts...)
|
||||
s, err := NewService(req.ctx, opts...)
|
||||
|
||||
require.NoError(t, err)
|
||||
return s, req
|
||||
}
|
||||
@@ -27,7 +27,6 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -26,7 +26,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -71,8 +70,13 @@ type ChainService struct {
|
||||
OptimisticRoots map[[32]byte]bool
|
||||
}
|
||||
|
||||
// ForkChoicer mocks the same method in the chain service
|
||||
func (s *ChainService) ForkChoicer() forkchoice.ForkChoicer {
|
||||
return s.ForkChoiceStore
|
||||
}
|
||||
|
||||
func (s *ChainService) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
|
||||
r, err := s.ForkChoiceStore.AncestorRoot(ctx, bytesutil.ToBytes32(root), slot)
|
||||
r, err := s.ForkChoicer().AncestorRoot(ctx, bytesutil.ToBytes32(root), slot)
|
||||
return r[:], err
|
||||
}
|
||||
|
||||
@@ -320,7 +324,7 @@ func (_ *ChainService) ReceiveAttestation(_ context.Context, _ *ethpb.Attestatio
|
||||
}
|
||||
|
||||
// AttestationTargetState mocks AttestationTargetState method in chain service.
|
||||
func (s *ChainService) AttestationTargetState(_ context.Context, _ *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
|
||||
func (s *ChainService) AttestationTargetState(_ context.Context, _ *ethpb.Checkpoint) (state.BeaconState, error) {
|
||||
return s.State, nil
|
||||
}
|
||||
|
||||
@@ -469,7 +473,7 @@ func (s *ChainService) UpdateHead(ctx context.Context, slot primitives.Slot) {
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("could not update head")
|
||||
}
|
||||
err = s.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
err = s.ForkChoicer().InsertNode(ctx, st, root)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("could not insert node to forkchoice")
|
||||
}
|
||||
@@ -516,84 +520,3 @@ func prepareForkchoiceState(
|
||||
st, err := state_native.InitializeFromProtoBellatrix(base)
|
||||
return st, blockRoot, err
|
||||
}
|
||||
|
||||
// CachedHeadRoot mocks the same method in the chain service
|
||||
func (s *ChainService) CachedHeadRoot() [32]byte {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.CachedHeadRoot()
|
||||
}
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// GetProposerHead mocks the same method in the chain service
|
||||
func (s *ChainService) GetProposerHead() [32]byte {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.GetProposerHead()
|
||||
}
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// SetForkchoiceGenesisTime mocks the same method in the chain service
|
||||
func (s *ChainService) SetForkChoiceGenesisTime(timestamp uint64) {
|
||||
if s.ForkChoiceStore != nil {
|
||||
s.ForkChoiceStore.SetGenesisTime(timestamp)
|
||||
}
|
||||
}
|
||||
|
||||
// ReceivedBlocksLastEpoch mocks the same method in the chain service
|
||||
func (s *ChainService) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.ReceivedBlocksLastEpoch()
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// HighestReceivedBlockSlot mocks the same method in the chain service
|
||||
func (s *ChainService) HighestReceivedBlockSlot() primitives.Slot {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.HighestReceivedBlockSlot()
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// InsertNode mocks the same method in the chain service
|
||||
func (s *ChainService) InsertNode(ctx context.Context, st state.BeaconState, root [32]byte) error {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ForkChoiceDump mocks the same method in the chain service
|
||||
func (s *ChainService) ForkChoiceDump(ctx context.Context) (*ethpbv1.ForkChoiceDump, error) {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.ForkChoiceDump(ctx)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// NewSlot mocks the same method in the chain service
|
||||
func (s *ChainService) NewSlot(ctx context.Context, slot primitives.Slot) error {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.NewSlot(ctx, slot)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProposerBoost mocks the same method in the chain service
|
||||
func (s *ChainService) ProposerBoost() [32]byte {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.ProposerBoost()
|
||||
}
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// FinalizedBlockHash mocks the same method in the chain service
|
||||
func (s *ChainService) FinalizedBlockHash() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// UnrealizedJustifiedPayloadBlockHash mocks the same method in the chain service
|
||||
func (s *ChainService) UnrealizedJustifiedPayloadBlockHash() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
|
||||
wsVerifier: wv,
|
||||
}
|
||||
require.NoError(t, fcs.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: tt.finalizedEpoch}))
|
||||
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
cp := s.ForkChoicer().FinalizedCheckpoint()
|
||||
err = s.wsVerifier.VerifyWeakSubjectivity(context.Background(), cp.Epoch)
|
||||
if tt.wantErr == nil {
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -12,13 +12,11 @@ go_library(
|
||||
deps = [
|
||||
"//api/client/builder:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
|
||||
@@ -3,7 +3,6 @@ package builder
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v4/api/client/builder"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
"github.com/urfave/cli/v2"
|
||||
@@ -51,11 +50,3 @@ func WithDatabase(beaconDB db.HeadAccessDatabase) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithRegistrationCache uses a cache for the validator registrations instead of a persistent db.
|
||||
func WithRegistrationCache() Option {
|
||||
return func(s *Service) error {
|
||||
s.registrationCache = cache.NewRegistrationCache()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,12 +8,10 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/client/builder"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -27,7 +25,6 @@ type BlockBuilder interface {
|
||||
SubmitBlindedBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error)
|
||||
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubKey [48]byte) (builder.SignedBid, error)
|
||||
RegisterValidator(ctx context.Context, reg []*ethpb.SignedValidatorRegistrationV1) error
|
||||
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
Configured() bool
|
||||
}
|
||||
|
||||
@@ -40,11 +37,10 @@ type config struct {
|
||||
|
||||
// Service defines a service that provides a client for interacting with the beacon chain and MEV relay network.
|
||||
type Service struct {
|
||||
cfg *config
|
||||
c builder.BuilderClient
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
registrationCache *cache.RegistrationCache
|
||||
cfg *config
|
||||
c builder.BuilderClient
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
// NewService instantiates a new service.
|
||||
@@ -81,8 +77,7 @@ func (s *Service) Start() {
|
||||
}
|
||||
|
||||
// Stop halts the service.
|
||||
func (s *Service) Stop() error {
|
||||
s.cancel()
|
||||
func (*Service) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -94,9 +89,6 @@ func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlyS
|
||||
defer func() {
|
||||
submitBlindedBlockLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
if s.c == nil {
|
||||
return nil, ErrNoBuilder
|
||||
}
|
||||
|
||||
return s.c.SubmitBlindedBlock(ctx, b)
|
||||
}
|
||||
@@ -109,14 +101,8 @@ func (s *Service) GetHeader(ctx context.Context, slot primitives.Slot, parentHas
|
||||
defer func() {
|
||||
getHeaderLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
if s.c == nil {
|
||||
tracing.AnnotateError(span, ErrNoBuilder)
|
||||
return nil, ErrNoBuilder
|
||||
}
|
||||
|
||||
h, err := s.c.GetHeader(ctx, slot, parentHash, pubKey)
|
||||
tracing.AnnotateError(span, err)
|
||||
return h, err
|
||||
return s.c.GetHeader(ctx, slot, parentHash, pubKey)
|
||||
}
|
||||
|
||||
// Status retrieves the status of the builder relay network.
|
||||
@@ -138,16 +124,9 @@ func (s *Service) RegisterValidator(ctx context.Context, reg []*ethpb.SignedVali
|
||||
defer func() {
|
||||
registerValidatorLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
if s.c == nil {
|
||||
return ErrNoBuilder
|
||||
}
|
||||
|
||||
// should be removed if db is removed
|
||||
idxs := make([]primitives.ValidatorIndex, 0)
|
||||
msgs := make([]*ethpb.ValidatorRegistrationV1, 0)
|
||||
|
||||
indexToRegistration := make(map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1)
|
||||
|
||||
valid := make([]*ethpb.SignedValidatorRegistrationV1, 0)
|
||||
for i := 0; i < len(reg); i++ {
|
||||
r := reg[i]
|
||||
@@ -161,33 +140,12 @@ func (s *Service) RegisterValidator(ctx context.Context, reg []*ethpb.SignedVali
|
||||
idxs = append(idxs, nx)
|
||||
msgs = append(msgs, r.Message)
|
||||
valid = append(valid, r)
|
||||
indexToRegistration[nx] = r.Message
|
||||
}
|
||||
if err := s.c.RegisterValidator(ctx, valid); err != nil {
|
||||
return errors.Wrap(err, "could not register validator(s)")
|
||||
}
|
||||
|
||||
if len(indexToRegistration) != len(msgs) {
|
||||
return errors.New("ids and registrations must be the same length")
|
||||
}
|
||||
if s.registrationCache != nil {
|
||||
s.registrationCache.UpdateIndexToRegisteredMap(ctx, indexToRegistration)
|
||||
return nil
|
||||
} else {
|
||||
return s.cfg.beaconDB.SaveRegistrationsByValidatorIDs(ctx, idxs, msgs)
|
||||
}
|
||||
}
|
||||
|
||||
// RegistrationByValidatorID returns either the values from the cache or db.
|
||||
func (s *Service) RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error) {
|
||||
if s.registrationCache != nil {
|
||||
return s.registrationCache.RegistrationByIndex(id)
|
||||
} else {
|
||||
if s.cfg == nil || s.cfg.beaconDB == nil {
|
||||
return nil, errors.New("nil beacon db")
|
||||
}
|
||||
return s.cfg.beaconDB.RegistrationByValidatorID(ctx, id)
|
||||
}
|
||||
return s.cfg.beaconDB.SaveRegistrationsByValidatorIDs(ctx, idxs, msgs)
|
||||
}
|
||||
|
||||
// Configured returns true if the user has configured a builder client.
|
||||
|
||||
@@ -37,18 +37,3 @@ func Test_RegisterValidator(t *testing.T) {
|
||||
require.NoError(t, s.RegisterValidator(ctx, []*eth.SignedValidatorRegistrationV1{{Message: ð.ValidatorRegistrationV1{Pubkey: pubkey[:], FeeRecipient: feeRecipient[:]}}}))
|
||||
assert.Equal(t, true, builder.RegisteredVals[pubkey])
|
||||
}
|
||||
|
||||
func Test_BuilderMethodsWithouClient(t *testing.T) {
|
||||
s, err := NewService(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, s.Configured())
|
||||
|
||||
_, err = s.GetHeader(context.Background(), 0, [32]byte{}, [48]byte{})
|
||||
assert.ErrorContains(t, ErrNoBuilder.Error(), err)
|
||||
|
||||
_, err = s.SubmitBlindedBlock(context.Background(), nil)
|
||||
assert.ErrorContains(t, ErrNoBuilder.Error(), err)
|
||||
|
||||
err = s.RegisterValidator(context.Background(), nil)
|
||||
assert.ErrorContains(t, ErrNoBuilder.Error(), err)
|
||||
}
|
||||
|
||||
@@ -8,8 +8,6 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/client/builder:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
|
||||
@@ -2,11 +2,10 @@ package testing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/client/builder"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
@@ -16,11 +15,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
// Config defines a config struct for dependencies into the service.
|
||||
type Config struct {
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
}
|
||||
|
||||
// MockBuilderService to mock builder.
|
||||
type MockBuilderService struct {
|
||||
HasConfigured bool
|
||||
@@ -29,10 +23,8 @@ type MockBuilderService struct {
|
||||
ErrSubmitBlindedBlock error
|
||||
Bid *ethpb.SignedBuilderBid
|
||||
BidCapella *ethpb.SignedBuilderBidCapella
|
||||
RegistrationCache *cache.RegistrationCache
|
||||
ErrGetHeader error
|
||||
ErrRegisterValidator error
|
||||
Cfg *Config
|
||||
}
|
||||
|
||||
// Configured for mocking.
|
||||
@@ -49,7 +41,7 @@ func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, _ interfaces.
|
||||
}
|
||||
return w, s.ErrSubmitBlindedBlock
|
||||
}
|
||||
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella, 0)
|
||||
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella, big.NewInt(0))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not wrap capella payload")
|
||||
}
|
||||
@@ -57,8 +49,8 @@ func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, _ interfaces.
|
||||
}
|
||||
|
||||
// GetHeader for mocking.
|
||||
func (s *MockBuilderService) GetHeader(_ context.Context, slot primitives.Slot, _ [32]byte, _ [48]byte) (builder.SignedBid, error) {
|
||||
if slots.ToEpoch(slot) >= params.BeaconConfig().CapellaForkEpoch || s.BidCapella != nil {
|
||||
func (s *MockBuilderService) GetHeader(ctx context.Context, slot primitives.Slot, hr [32]byte, pb [48]byte) (builder.SignedBid, error) {
|
||||
if slots.ToEpoch(slot) >= params.BeaconConfig().CapellaForkEpoch {
|
||||
return builder.WrappedSignedBuilderBidCapella(s.BidCapella)
|
||||
}
|
||||
w, err := builder.WrappedSignedBuilderBid(s.Bid)
|
||||
@@ -68,17 +60,6 @@ func (s *MockBuilderService) GetHeader(_ context.Context, slot primitives.Slot,
|
||||
return w, s.ErrGetHeader
|
||||
}
|
||||
|
||||
// RegistrationByValidatorID returns either the values from the cache or db.
|
||||
func (s *MockBuilderService) RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error) {
|
||||
if s.RegistrationCache != nil {
|
||||
return s.RegistrationCache.RegistrationByIndex(id)
|
||||
}
|
||||
if s.Cfg.BeaconDB != nil {
|
||||
return s.Cfg.BeaconDB.RegistrationByValidatorID(ctx, id)
|
||||
}
|
||||
return nil, cache.ErrNotFoundRegistration
|
||||
}
|
||||
|
||||
// RegisterValidator for mocking.
|
||||
func (s *MockBuilderService) RegisterValidator(context.Context, []*ethpb.SignedValidatorRegistrationV1) error {
|
||||
return s.ErrRegisterValidator
|
||||
|
||||
5
beacon-chain/cache/BUILD.bazel
vendored
5
beacon-chain/cache/BUILD.bazel
vendored
@@ -17,7 +17,6 @@ go_library(
|
||||
"proposer_indices.go",
|
||||
"proposer_indices_disabled.go", # keep
|
||||
"proposer_indices_type.go",
|
||||
"registration.go",
|
||||
"skip_slot_cache.go",
|
||||
"subnet_ids.go",
|
||||
"sync_committee.go",
|
||||
@@ -48,7 +47,6 @@ go_library(
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_k8s_client_go//tools/cache:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
@@ -66,7 +64,6 @@ go_test(
|
||||
"committee_test.go",
|
||||
"payload_id_test.go",
|
||||
"proposer_indices_test.go",
|
||||
"registration_test.go",
|
||||
"skip_slot_cache_test.go",
|
||||
"subnet_ids_test.go",
|
||||
"sync_committee_head_state_test.go",
|
||||
@@ -85,9 +82,7 @@ go_test(
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
13
beacon-chain/cache/active_balance.go
vendored
13
beacon-chain/cache/active_balance.go
vendored
@@ -42,16 +42,9 @@ type BalanceCache struct {
|
||||
|
||||
// NewEffectiveBalanceCache creates a new effective balance cache for storing/accessing total balance by epoch.
|
||||
func NewEffectiveBalanceCache() *BalanceCache {
|
||||
c := &BalanceCache{}
|
||||
c.Clear()
|
||||
return c
|
||||
}
|
||||
|
||||
// Clear resets the SyncCommitteeCache to its initial state
|
||||
func (c *BalanceCache) Clear() {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.cache = lruwrpr.New(maxBalanceCacheSize)
|
||||
return &BalanceCache{
|
||||
cache: lruwrpr.New(maxBalanceCacheSize),
|
||||
}
|
||||
}
|
||||
|
||||
// AddTotalEffectiveBalance adds a new total effective balance entry for current balance for state `st` into the cache.
|
||||
|
||||
10
beacon-chain/cache/active_balance_disabled.go
vendored
10
beacon-chain/cache/active_balance_disabled.go
vendored
@@ -3,11 +3,16 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
)
|
||||
|
||||
// FakeBalanceCache is a fake struct with 1 LRU cache for looking up balance by epoch.
|
||||
type FakeBalanceCache struct {
|
||||
cache *lru.Cache
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// NewEffectiveBalanceCache creates a new effective balance cache for storing/accessing total balance by epoch.
|
||||
@@ -24,8 +29,3 @@ func (c *FakeBalanceCache) AddTotalEffectiveBalance(st state.ReadOnlyBeaconState
|
||||
func (c *FakeBalanceCache) Get(st state.ReadOnlyBeaconState) (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Clear is a stub.
|
||||
func (c *FakeBalanceCache) Clear() {
|
||||
return
|
||||
}
|
||||
|
||||
15
beacon-chain/cache/committee.go
vendored
15
beacon-chain/cache/committee.go
vendored
@@ -56,17 +56,10 @@ func committeeKeyFn(obj interface{}) (string, error) {
|
||||
|
||||
// NewCommitteesCache creates a new committee cache for storing/accessing shuffled indices of a committee.
|
||||
func NewCommitteesCache() *CommitteeCache {
|
||||
cc := &CommitteeCache{}
|
||||
cc.Clear()
|
||||
return cc
|
||||
}
|
||||
|
||||
// Clear resets the CommitteeCache to its initial state
|
||||
func (c *CommitteeCache) Clear() {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.CommitteeCache = lruwrpr.New(maxCommitteesCacheSize)
|
||||
c.inProgress = make(map[string]bool)
|
||||
return &CommitteeCache{
|
||||
CommitteeCache: lruwrpr.New(maxCommitteesCacheSize),
|
||||
inProgress: make(map[string]bool),
|
||||
}
|
||||
}
|
||||
|
||||
// Committee fetches the shuffled indices by slot and committee index. Every list of indices
|
||||
|
||||
5
beacon-chain/cache/committee_disabled.go
vendored
5
beacon-chain/cache/committee_disabled.go
vendored
@@ -69,8 +69,3 @@ func (c *FakeCommitteeCache) MarkInProgress(seed [32]byte) error {
|
||||
func (c *FakeCommitteeCache) MarkNotInProgress(seed [32]byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clear is a stub.
|
||||
func (c *FakeCommitteeCache) Clear() {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -129,7 +129,7 @@ func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*eth
|
||||
}
|
||||
|
||||
// InsertFinalizedDeposits inserts deposits up to eth1DepositIndex (inclusive) into the finalized deposits cache.
|
||||
func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1DepositIndex int64) error {
|
||||
func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1DepositIndex int64) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertFinalizedDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.Lock()
|
||||
@@ -141,7 +141,7 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
|
||||
// Don't insert into finalized trie if there is no deposit to
|
||||
// insert.
|
||||
if len(dc.deposits) == 0 {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
// In the event we have less deposits than we need to
|
||||
// finalize we finalize till the index on which we do have it.
|
||||
@@ -151,7 +151,7 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
|
||||
// If we finalize to some lower deposit index, we
|
||||
// ignore it.
|
||||
if int(eth1DepositIndex) < insertIndex {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
for _, d := range dc.deposits {
|
||||
if d.Index <= dc.finalizedDeposits.MerkleTrieIndex {
|
||||
@@ -162,10 +162,12 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
|
||||
}
|
||||
depHash, err := d.Deposit.Data.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not hash deposit data")
|
||||
log.WithError(err).Error("Could not hash deposit data. Finalized deposit cache not updated.")
|
||||
return
|
||||
}
|
||||
if err = depositTrie.Insert(depHash[:], insertIndex); err != nil {
|
||||
return errors.Wrap(err, "could not insert deposit hash")
|
||||
log.WithError(err).Error("Could not insert deposit hash")
|
||||
return
|
||||
}
|
||||
insertIndex++
|
||||
}
|
||||
@@ -174,7 +176,6 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
|
||||
Deposits: depositTrie,
|
||||
MerkleTrieIndex: eth1DepositIndex,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AllDepositContainers returns all historical deposit containers.
|
||||
|
||||
@@ -416,7 +416,7 @@ func TestFinalizedDeposits_DepositsCachedCorrectly(t *testing.T) {
|
||||
Index: 3,
|
||||
})
|
||||
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2))
|
||||
dc.InsertFinalizedDeposits(context.Background(), 2)
|
||||
|
||||
cachedDeposits := dc.FinalizedDeposits(context.Background())
|
||||
require.NotNil(t, cachedDeposits, "Deposits not cached")
|
||||
@@ -474,9 +474,9 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
|
||||
Index: 2,
|
||||
}
|
||||
dc.deposits = oldFinalizedDeposits
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 1))
|
||||
dc.InsertFinalizedDeposits(context.Background(), 1)
|
||||
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2))
|
||||
dc.InsertFinalizedDeposits(context.Background(), 2)
|
||||
|
||||
dc.deposits = append(dc.deposits, []*ethpb.DepositContainer{newFinalizedDeposit}...)
|
||||
|
||||
@@ -503,7 +503,7 @@ func TestFinalizedDeposits_HandleZeroDeposits(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2))
|
||||
dc.InsertFinalizedDeposits(context.Background(), 2)
|
||||
|
||||
cachedDeposits := dc.FinalizedDeposits(context.Background())
|
||||
require.NotNil(t, cachedDeposits, "Deposits not cached")
|
||||
@@ -548,7 +548,7 @@ func TestFinalizedDeposits_HandleSmallerThanExpectedDeposits(t *testing.T) {
|
||||
}
|
||||
dc.deposits = finalizedDeposits
|
||||
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 5))
|
||||
dc.InsertFinalizedDeposits(context.Background(), 5)
|
||||
|
||||
cachedDeposits := dc.FinalizedDeposits(context.Background())
|
||||
require.NotNil(t, cachedDeposits, "Deposits not cached")
|
||||
@@ -623,10 +623,10 @@ func TestFinalizedDeposits_HandleLowerEth1DepositIndex(t *testing.T) {
|
||||
}
|
||||
dc.deposits = finalizedDeposits
|
||||
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 5))
|
||||
dc.InsertFinalizedDeposits(context.Background(), 5)
|
||||
|
||||
// Reinsert finalized deposits with a lower index.
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2))
|
||||
dc.InsertFinalizedDeposits(context.Background(), 2)
|
||||
|
||||
cachedDeposits := dc.FinalizedDeposits(context.Background())
|
||||
require.NotNil(t, cachedDeposits, "Deposits not cached")
|
||||
@@ -694,7 +694,7 @@ func TestNonFinalizedDeposits_ReturnsAllNonFinalizedDeposits(t *testing.T) {
|
||||
},
|
||||
Index: 3,
|
||||
})
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 1))
|
||||
dc.InsertFinalizedDeposits(context.Background(), 1)
|
||||
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), 1, nil)
|
||||
assert.Equal(t, 2, len(deps))
|
||||
@@ -751,7 +751,7 @@ func TestNonFinalizedDeposits_ReturnsNonFinalizedDepositsUpToBlockNumber(t *test
|
||||
},
|
||||
Index: 3,
|
||||
})
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 1))
|
||||
dc.InsertFinalizedDeposits(context.Background(), 1)
|
||||
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), 1, big.NewInt(10))
|
||||
assert.Equal(t, 1, len(deps))
|
||||
@@ -799,12 +799,12 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Perform this in a non-sensical ordering
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 10))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 3))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 4))
|
||||
dc.InsertFinalizedDeposits(context.Background(), 10)
|
||||
dc.InsertFinalizedDeposits(context.Background(), 2)
|
||||
dc.InsertFinalizedDeposits(context.Background(), 3)
|
||||
dc.InsertFinalizedDeposits(context.Background(), 4)
|
||||
|
||||
// Mimic finalized deposit trie fetch.
|
||||
// Mimick finalized deposit trie fetch.
|
||||
fd := dc.FinalizedDeposits(context.Background())
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex, big.NewInt(14))
|
||||
insertIndex := fd.MerkleTrieIndex + 1
|
||||
@@ -817,9 +817,9 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
|
||||
}
|
||||
insertIndex++
|
||||
}
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 15))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 15))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 14))
|
||||
dc.InsertFinalizedDeposits(context.Background(), 15)
|
||||
dc.InsertFinalizedDeposits(context.Background(), 15)
|
||||
dc.InsertFinalizedDeposits(context.Background(), 14)
|
||||
|
||||
fd = dc.FinalizedDeposits(context.Background())
|
||||
deps = dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex, big.NewInt(30))
|
||||
|
||||
4
beacon-chain/cache/error.go
vendored
4
beacon-chain/cache/error.go
vendored
@@ -1,6 +1,6 @@
|
||||
package cache
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
// ErrNilValueProvided for when we try to put a nil value in a cache.
|
||||
@@ -12,6 +12,4 @@ var (
|
||||
// ErrNonExistingSyncCommitteeKey when sync committee key (root) does not exist in cache.
|
||||
ErrNonExistingSyncCommitteeKey = errors.New("does not exist sync committee key")
|
||||
errNotSyncCommitteeIndexPosition = errors.New("not syncCommitteeIndexPosition struct")
|
||||
// ErrNotFoundRegistration when validator registration does not exist in cache.
|
||||
ErrNotFoundRegistration = errors.Wrap(ErrNotFound, "no validator registered")
|
||||
)
|
||||
|
||||
13
beacon-chain/cache/proposer_indices.go
vendored
13
beacon-chain/cache/proposer_indices.go
vendored
@@ -46,16 +46,9 @@ func proposerIndicesKeyFn(obj interface{}) (string, error) {
|
||||
|
||||
// NewProposerIndicesCache creates a new proposer indices cache for storing/accessing proposer index assignments of an epoch.
|
||||
func NewProposerIndicesCache() *ProposerIndicesCache {
|
||||
c := &ProposerIndicesCache{}
|
||||
c.Clear()
|
||||
return c
|
||||
}
|
||||
|
||||
// Clear resets the ProposerIndicesCache to its initial state
|
||||
func (c *ProposerIndicesCache) Clear() {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.proposerIndicesCache = cache.NewFIFO(proposerIndicesKeyFn)
|
||||
return &ProposerIndicesCache{
|
||||
proposerIndicesCache: cache.NewFIFO(proposerIndicesKeyFn),
|
||||
}
|
||||
}
|
||||
|
||||
// AddProposerIndices adds ProposerIndices object to the cache.
|
||||
|
||||
@@ -33,7 +33,3 @@ func (c *FakeProposerIndicesCache) HasProposerIndices(r [32]byte) (bool, error)
|
||||
func (c *FakeProposerIndicesCache) Len() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Clear is a stub.
|
||||
func (c *FakeProposerIndicesCache) Clear() {
|
||||
}
|
||||
|
||||
82
beacon-chain/cache/registration.go
vendored
82
beacon-chain/cache/registration.go
vendored
@@ -1,82 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/math"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// RegistrationCache is used to store the cached results of an Validator Registration request.
|
||||
// beacon api /eth/v1/validator/register_validator
|
||||
type RegistrationCache struct {
|
||||
indexToRegistration map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// NewRegistrationCache initializes the map and underlying cache.
|
||||
func NewRegistrationCache() *RegistrationCache {
|
||||
return &RegistrationCache{
|
||||
indexToRegistration: make(map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1),
|
||||
lock: sync.RWMutex{},
|
||||
}
|
||||
}
|
||||
|
||||
// RegistrationByIndex returns the registration by index in the cache and also removes items in the cache if expired.
|
||||
func (regCache *RegistrationCache) RegistrationByIndex(id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error) {
|
||||
regCache.lock.RLock()
|
||||
v, ok := regCache.indexToRegistration[id]
|
||||
if !ok {
|
||||
regCache.lock.RUnlock()
|
||||
return nil, errors.Wrapf(ErrNotFoundRegistration, "validator id %d", id)
|
||||
}
|
||||
isExpired, err := RegistrationTimeStampExpired(v.Timestamp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to check registration expiration")
|
||||
}
|
||||
if isExpired {
|
||||
regCache.lock.RUnlock()
|
||||
regCache.lock.Lock()
|
||||
defer regCache.lock.Unlock()
|
||||
delete(regCache.indexToRegistration, id)
|
||||
log.Warnf("registration for validator index %d expired at unix time %d", id, v.Timestamp)
|
||||
return nil, errors.Wrapf(ErrNotFoundRegistration, "validator id %d", id)
|
||||
}
|
||||
regCache.lock.RUnlock()
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func RegistrationTimeStampExpired(ts uint64) (bool, error) {
|
||||
// safely convert unint64 to int64
|
||||
i, err := math.Int(ts)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
expiryDuration := params.BeaconConfig().RegistrationDuration
|
||||
// registered time + expiration duration < current time = expired
|
||||
return time.Unix(int64(i), 0).Add(expiryDuration).Before(time.Now()), nil
|
||||
}
|
||||
|
||||
// UpdateIndexToRegisteredMap adds or updates values in the cache based on the argument.
|
||||
func (regCache *RegistrationCache) UpdateIndexToRegisteredMap(ctx context.Context, m map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1) {
|
||||
_, span := trace.StartSpan(ctx, "RegistrationCache.UpdateIndexToRegisteredMap")
|
||||
defer span.End()
|
||||
regCache.lock.Lock()
|
||||
defer regCache.lock.Unlock()
|
||||
for key, value := range m {
|
||||
regCache.indexToRegistration[key] = ðpb.ValidatorRegistrationV1{
|
||||
Pubkey: bytesutil.SafeCopyBytes(value.Pubkey),
|
||||
FeeRecipient: bytesutil.SafeCopyBytes(value.FeeRecipient),
|
||||
GasLimit: value.GasLimit,
|
||||
Timestamp: value.Timestamp,
|
||||
}
|
||||
}
|
||||
}
|
||||
82
beacon-chain/cache/registration_test.go
vendored
82
beacon-chain/cache/registration_test.go
vendored
@@ -1,82 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestRegistrationCache(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
pubkey, err := hexutil.Decode("0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
|
||||
require.NoError(t, err)
|
||||
validatorIndex := primitives.ValidatorIndex(1)
|
||||
cache := NewRegistrationCache()
|
||||
m := make(map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1)
|
||||
|
||||
m[validatorIndex] = ðpb.ValidatorRegistrationV1{
|
||||
FeeRecipient: []byte{},
|
||||
GasLimit: 100,
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
Pubkey: pubkey,
|
||||
}
|
||||
cache.UpdateIndexToRegisteredMap(context.Background(), m)
|
||||
reg, err := cache.RegistrationByIndex(validatorIndex)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(reg.Pubkey), string(pubkey))
|
||||
t.Run("Registration expired", func(t *testing.T) {
|
||||
validatorIndex2 := primitives.ValidatorIndex(2)
|
||||
overExpirationPadTime := time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot*uint64(params.BeaconConfig().SlotsPerEpoch)*4) // 4 epochs
|
||||
m[validatorIndex2] = ðpb.ValidatorRegistrationV1{
|
||||
FeeRecipient: []byte{},
|
||||
GasLimit: 100,
|
||||
Timestamp: uint64(time.Now().Add(-1 * overExpirationPadTime).Unix()),
|
||||
Pubkey: pubkey,
|
||||
}
|
||||
cache.UpdateIndexToRegisteredMap(context.Background(), m)
|
||||
_, err := cache.RegistrationByIndex(validatorIndex2)
|
||||
require.ErrorContains(t, "no validator registered", err)
|
||||
require.LogsContain(t, hook, "expired")
|
||||
})
|
||||
t.Run("Registration close to expiration still passes", func(t *testing.T) {
|
||||
pubkey, err := hexutil.Decode("0x88247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
|
||||
require.NoError(t, err)
|
||||
validatorIndex2 := primitives.ValidatorIndex(2)
|
||||
overExpirationPadTime := time.Second * time.Duration((params.BeaconConfig().SecondsPerSlot*uint64(params.BeaconConfig().SlotsPerEpoch)*3)-5) // 3 epochs - 5 seconds
|
||||
m[validatorIndex2] = ðpb.ValidatorRegistrationV1{
|
||||
FeeRecipient: []byte{},
|
||||
GasLimit: 100,
|
||||
Timestamp: uint64(time.Now().Add(-1 * overExpirationPadTime).Unix()),
|
||||
Pubkey: pubkey,
|
||||
}
|
||||
cache.UpdateIndexToRegisteredMap(context.Background(), m)
|
||||
reg, err := cache.RegistrationByIndex(validatorIndex2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(reg.Pubkey), string(pubkey))
|
||||
})
|
||||
}
|
||||
|
||||
func Test_RegistrationTimeStampExpired(t *testing.T) {
|
||||
// expiration set at 3 epochs
|
||||
t.Run("expired registration", func(t *testing.T) {
|
||||
overExpirationPadTime := time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot*uint64(params.BeaconConfig().SlotsPerEpoch)*4) // 4 epochs
|
||||
ts := uint64(time.Now().Add(-1 * overExpirationPadTime).Unix())
|
||||
isExpired, err := RegistrationTimeStampExpired(ts)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, isExpired)
|
||||
})
|
||||
t.Run("is not expired registration", func(t *testing.T) {
|
||||
overExpirationPadTime := time.Second * time.Duration((params.BeaconConfig().SecondsPerSlot*uint64(params.BeaconConfig().SlotsPerEpoch)*3)-5) // 3 epochs -5 seconds
|
||||
ts := uint64(time.Now().Add(-1 * overExpirationPadTime).Unix())
|
||||
isExpired, err := RegistrationTimeStampExpired(ts)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, isExpired)
|
||||
})
|
||||
}
|
||||
29
beacon-chain/cache/sync_committee.go
vendored
29
beacon-chain/cache/sync_committee.go
vendored
@@ -4,14 +4,12 @@ package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
@@ -33,9 +31,8 @@ var (
|
||||
// SyncCommitteeCache utilizes a FIFO cache to sufficiently cache validator position within sync committee.
|
||||
// It is thread safe with concurrent read write.
|
||||
type SyncCommitteeCache struct {
|
||||
cache *cache.FIFO
|
||||
lock sync.RWMutex
|
||||
cleared *atomic.Uint64
|
||||
cache *cache.FIFO
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// Index position of all validators in sync committee where `currentSyncCommitteeRoot` is the
|
||||
@@ -54,17 +51,9 @@ type positionInCommittee struct {
|
||||
|
||||
// NewSyncCommittee initializes and returns a new SyncCommitteeCache.
|
||||
func NewSyncCommittee() *SyncCommitteeCache {
|
||||
c := &SyncCommitteeCache{cleared: &atomic.Uint64{}}
|
||||
c.Clear()
|
||||
return c
|
||||
}
|
||||
|
||||
// Clear resets the SyncCommitteeCache to its initial state
|
||||
func (s *SyncCommitteeCache) Clear() {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
s.cleared.Add(1)
|
||||
s.cache = cache.NewFIFO(keyFn)
|
||||
return &SyncCommitteeCache{
|
||||
cache: cache.NewFIFO(keyFn),
|
||||
}
|
||||
}
|
||||
|
||||
// CurrentPeriodIndexPosition returns current period index position of a validator index with respect with
|
||||
@@ -134,10 +123,6 @@ func (s *SyncCommitteeCache) idxPositionInCommittee(
|
||||
// current epoch and next epoch. This should be called when `current_sync_committee` and `next_sync_committee`
|
||||
// change and that happens every `EPOCHS_PER_SYNC_COMMITTEE_PERIOD`.
|
||||
func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoot [32]byte, st state.BeaconState) error {
|
||||
// since we call UpdatePositionsInCommittee asynchronously, keep track of the cache value
|
||||
// seen at the beginning of the routine and compare at the end before updating. If the underlying value has been
|
||||
// cycled (new address), don't update it.
|
||||
clearCount := s.cleared.Load()
|
||||
csc, err := st.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -177,10 +162,6 @@ func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoo
|
||||
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
if clearCount != s.cleared.Load() {
|
||||
log.Warn("cache rotated during async committee update operation - abandoning cache update")
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := s.cache.Add(&syncCommitteeIndexPosition{
|
||||
currentSyncCommitteeRoot: syncCommitteeBoundaryRoot,
|
||||
|
||||
@@ -30,8 +30,3 @@ func (s *FakeSyncCommitteeCache) NextPeriodIndexPosition(root [32]byte, valIdx p
|
||||
func (s *FakeSyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoot [32]byte, state state.BeaconState) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clear -- fake.
|
||||
func (s *FakeSyncCommitteeCache) Clear() {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -208,7 +208,7 @@ func ProcessEpochParticipation(
|
||||
}
|
||||
|
||||
// ProcessRewardsAndPenaltiesPrecompute processes the rewards and penalties of individual validator.
|
||||
// This is an optimized version by passing in precomputed validator attesting records and total epoch balances.
|
||||
// This is an optimized version by passing in precomputed validator attesting records and and total epoch balances.
|
||||
func ProcessRewardsAndPenaltiesPrecompute(
|
||||
beaconState state.BeaconState,
|
||||
bal *precompute.Balance,
|
||||
@@ -265,7 +265,7 @@ func AttestationsDelta(beaconState state.BeaconState, bal *precompute.Balance, v
|
||||
finalizedEpoch := beaconState.FinalizedCheckpointEpoch()
|
||||
increment := cfg.EffectiveBalanceIncrement
|
||||
factor := cfg.BaseRewardFactor
|
||||
baseRewardMultiplier := increment * factor / math.CachedSquareRoot(bal.ActiveCurrentEpoch)
|
||||
baseRewardMultiplier := increment * factor / math.IntegerSquareRoot(bal.ActiveCurrentEpoch)
|
||||
leak := helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch)
|
||||
|
||||
// Modified in Altair and Bellatrix.
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
|
||||
// ProcessSyncCommitteeUpdates processes sync client committee updates for the beacon state.
|
||||
//
|
||||
// nolint:dupword
|
||||
// Spec code:
|
||||
// def process_sync_committee_updates(state: BeaconState) -> None:
|
||||
//
|
||||
@@ -46,7 +45,6 @@ func ProcessSyncCommitteeUpdates(ctx context.Context, beaconState state.BeaconSt
|
||||
|
||||
// ProcessParticipationFlagUpdates processes participation flag updates by rotating current to previous.
|
||||
//
|
||||
// nolint:dupword
|
||||
// Spec code:
|
||||
// def process_participation_flag_updates(state: BeaconState) -> None:
|
||||
//
|
||||
|
||||
@@ -58,5 +58,5 @@ func BaseRewardPerIncrement(activeBalance uint64) (uint64, error) {
|
||||
return 0, errors.New("active balance can't be 0")
|
||||
}
|
||||
cfg := params.BeaconConfig()
|
||||
return cfg.EffectiveBalanceIncrement * cfg.BaseRewardFactor / math.CachedSquareRoot(activeBalance), nil
|
||||
return cfg.EffectiveBalanceIncrement * cfg.BaseRewardFactor / math.IntegerSquareRoot(activeBalance), nil
|
||||
}
|
||||
|
||||
@@ -28,7 +28,6 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
@@ -61,7 +60,7 @@ func IsExecutionBlock(body interfaces.ReadOnlyBeaconBlockBody) (bool, error) {
|
||||
}
|
||||
payload, err := body.Execution()
|
||||
switch {
|
||||
case errors.Is(err, consensus_types.ErrUnsupportedGetter):
|
||||
case errors.Is(err, blocks.ErrUnsupportedGetter):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, err
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
@@ -609,7 +610,7 @@ func Test_ProcessPayloadCapella(t *testing.T) {
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
require.NoError(t, err)
|
||||
payload.PrevRandao = random
|
||||
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload, 0)
|
||||
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
_, err = blocks.ProcessPayload(st, wrapped)
|
||||
require.NoError(t, err)
|
||||
@@ -873,7 +874,7 @@ func emptyPayloadHeaderCapella() (interfaces.ExecutionData, error) {
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
}, 0)
|
||||
}, big.NewInt(0))
|
||||
}
|
||||
|
||||
func emptyPayload() *enginev1.ExecutionPayload {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
@@ -643,7 +644,7 @@ func TestProcessBlindWithdrawals(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wdRoot, err := ssz.WithdrawalSliceRoot(test.Args.Withdrawals, fieldparams.MaxWithdrawalsPerPayload)
|
||||
require.NoError(t, err)
|
||||
p, err := consensusblocks.WrappedExecutionPayloadHeaderCapella(&enginev1.ExecutionPayloadHeaderCapella{WithdrawalsRoot: wdRoot[:]}, 0)
|
||||
p, err := consensusblocks.WrappedExecutionPayloadHeaderCapella(&enginev1.ExecutionPayloadHeaderCapella{WithdrawalsRoot: wdRoot[:]}, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
post, err := blocks.ProcessWithdrawals(st, p)
|
||||
if test.Control.ExpectedError {
|
||||
@@ -1061,7 +1062,7 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
}
|
||||
st, err := prepareValidators(spb, test.Args)
|
||||
require.NoError(t, err)
|
||||
p, err := consensusblocks.WrappedExecutionPayloadCapella(&enginev1.ExecutionPayloadCapella{Withdrawals: test.Args.Withdrawals}, 0)
|
||||
p, err := consensusblocks.WrappedExecutionPayloadCapella(&enginev1.ExecutionPayloadCapella{Withdrawals: test.Args.Withdrawals}, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
post, err := blocks.ProcessWithdrawals(st, p)
|
||||
if test.Control.ExpectedError {
|
||||
|
||||
@@ -9,6 +9,110 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// UpgradeToDeneb updates inputs a generic state to return the version Deneb state.
|
||||
func UpgradeToDeneb(state state.BeaconState) (state.BeaconState, error) {
|
||||
epoch := time.CurrentEpoch(state)
|
||||
|
||||
currentSyncCommittee, err := state.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nextSyncCommittee, err := state.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prevEpochParticipation, err := state.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentEpochParticipation, err := state.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inactivityScores, err := state.InactivityScores()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloadHeader, err := state.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txRoot, err := payloadHeader.TransactionsRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wdRoot, err := payloadHeader.WithdrawalsRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wi, err := state.NextWithdrawalIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vi, err := state.NextWithdrawalValidatorIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
summarires, err := state.HistoricalSummaries()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateDeneb{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
Slot: state.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: state.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().DenebForkVersion,
|
||||
Epoch: epoch,
|
||||
},
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: [][]byte{},
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
Validators: state.Validators(),
|
||||
Balances: state.Balances(),
|
||||
RandaoMixes: state.RandaoMixes(),
|
||||
Slashings: state.Slashings(),
|
||||
PreviousEpochParticipation: prevEpochParticipation,
|
||||
CurrentEpochParticipation: currentEpochParticipation,
|
||||
JustificationBits: state.JustificationBits(),
|
||||
PreviousJustifiedCheckpoint: state.PreviousJustifiedCheckpoint(),
|
||||
CurrentJustifiedCheckpoint: state.CurrentJustifiedCheckpoint(),
|
||||
FinalizedCheckpoint: state.FinalizedCheckpoint(),
|
||||
InactivityScores: inactivityScores,
|
||||
CurrentSyncCommittee: currentSyncCommittee,
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: payloadHeader.ParentHash(),
|
||||
FeeRecipient: payloadHeader.FeeRecipient(),
|
||||
StateRoot: payloadHeader.StateRoot(),
|
||||
ReceiptsRoot: payloadHeader.ReceiptsRoot(),
|
||||
LogsBloom: payloadHeader.LogsBloom(),
|
||||
PrevRandao: payloadHeader.PrevRandao(),
|
||||
BlockNumber: payloadHeader.BlockNumber(),
|
||||
GasLimit: payloadHeader.GasLimit(),
|
||||
GasUsed: payloadHeader.GasUsed(),
|
||||
Timestamp: payloadHeader.Timestamp(),
|
||||
ExtraData: payloadHeader.ExtraData(),
|
||||
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
|
||||
BlockHash: payloadHeader.BlockHash(),
|
||||
ExcessDataGas: make([]byte, 32),
|
||||
TransactionsRoot: txRoot,
|
||||
WithdrawalsRoot: wdRoot,
|
||||
},
|
||||
NextWithdrawalIndex: wi,
|
||||
NextWithdrawalValidatorIndex: vi,
|
||||
HistoricalSummaries: summarires,
|
||||
}
|
||||
|
||||
return state_native.InitializeFromProtoUnsafeDeneb(s)
|
||||
}
|
||||
|
||||
// UpgradeToCapella updates a generic state to return the version Capella state.
|
||||
func UpgradeToCapella(state state.BeaconState) (state.BeaconState, error) {
|
||||
epoch := time.CurrentEpoch(state)
|
||||
|
||||
@@ -353,7 +353,7 @@ func ProcessRandaoMixesReset(state state.BeaconState) (state.BeaconState, error)
|
||||
}
|
||||
|
||||
// ProcessHistoricalDataUpdate processes the updates to historical data during epoch processing.
|
||||
// From Capella onward, per spec,state's historical summaries are updated instead of historical roots.
|
||||
// From Capella onward, per spec, state's historical summaries are updated instead of historical roots.
|
||||
func ProcessHistoricalDataUpdate(state state.BeaconState) (state.BeaconState, error) {
|
||||
currentEpoch := time.CurrentEpoch(state)
|
||||
nextEpoch := currentEpoch + 1
|
||||
@@ -393,7 +393,6 @@ func ProcessHistoricalDataUpdate(state state.BeaconState) (state.BeaconState, er
|
||||
|
||||
// ProcessParticipationRecordUpdates rotates current/previous epoch attestations during epoch processing.
|
||||
//
|
||||
// nolint:dupword
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def process_participation_record_updates(state: BeaconState) -> None:
|
||||
|
||||
@@ -14,7 +14,7 @@ type attesterRewardsFunc func(state.ReadOnlyBeaconState, *Balance, []*Validator)
|
||||
type proposerRewardsFunc func(state.ReadOnlyBeaconState, *Balance, []*Validator) ([]uint64, error)
|
||||
|
||||
// ProcessRewardsAndPenaltiesPrecompute processes the rewards and penalties of individual validator.
|
||||
// This is an optimized version by passing in precomputed validator attesting records and total epoch balances.
|
||||
// This is an optimized version by passing in precomputed validator attesting records and and total epoch balances.
|
||||
func ProcessRewardsAndPenaltiesPrecompute(
|
||||
state state.BeaconState,
|
||||
pBal *Balance,
|
||||
@@ -72,7 +72,7 @@ func AttestationsDelta(state state.ReadOnlyBeaconState, pBal *Balance, vp []*Val
|
||||
prevEpoch := time.PrevEpoch(state)
|
||||
finalizedEpoch := state.FinalizedCheckpointEpoch()
|
||||
|
||||
sqrtActiveCurrentEpoch := math.CachedSquareRoot(pBal.ActiveCurrentEpoch)
|
||||
sqrtActiveCurrentEpoch := math.IntegerSquareRoot(pBal.ActiveCurrentEpoch)
|
||||
for i, v := range vp {
|
||||
rewards[i], penalties[i] = attestationDelta(pBal, sqrtActiveCurrentEpoch, v, prevEpoch, finalizedEpoch)
|
||||
}
|
||||
@@ -104,6 +104,7 @@ func attestationDelta(pBal *Balance, sqrtActiveCurrentEpoch uint64, v *Validator
|
||||
} else {
|
||||
rewardNumerator := br * (pBal.PrevEpochAttested / effectiveBalanceIncrement)
|
||||
r += rewardNumerator / currentEpochBalance
|
||||
|
||||
}
|
||||
} else {
|
||||
p += br
|
||||
@@ -160,7 +161,7 @@ func ProposersDelta(state state.ReadOnlyBeaconState, pBal *Balance, vp []*Valida
|
||||
rewards := make([]uint64, numofVals)
|
||||
|
||||
totalBalance := pBal.ActiveCurrentEpoch
|
||||
balanceSqrt := math.CachedSquareRoot(totalBalance)
|
||||
balanceSqrt := math.IntegerSquareRoot(totalBalance)
|
||||
// Balance square root cannot be 0, this prevents division by 0.
|
||||
if balanceSqrt == 0 {
|
||||
balanceSqrt = 1
|
||||
|
||||
@@ -15,11 +15,12 @@ const (
|
||||
BlockProcessed = iota + 1
|
||||
// ChainStarted is sent when enough validators are active to start proposing blocks.
|
||||
ChainStarted
|
||||
// deprecated: Initialized is sent when the internal beacon node's state is ready to be accessed.
|
||||
_
|
||||
// deprecated: Synced is sent when the beacon node has completed syncing and is ready to participate in the network.
|
||||
_
|
||||
// Reorg is an event sent when the new head is not a descendant of the previous head.
|
||||
// Initialized is sent when the internal beacon node's state is ready to be accessed.
|
||||
Initialized
|
||||
// Synced is sent when the beacon node has completed syncing and is ready to participate in the network.
|
||||
Synced
|
||||
// Reorg is an event sent when the new head state's slot after a block
|
||||
// transition is lower than its previous head state slot value.
|
||||
Reorg
|
||||
// FinalizedCheckpoint event.
|
||||
FinalizedCheckpoint
|
||||
|
||||
@@ -48,7 +48,6 @@ go_test(
|
||||
"attestation_test.go",
|
||||
"beacon_committee_test.go",
|
||||
"block_test.go",
|
||||
"main_test.go",
|
||||
"randao_test.go",
|
||||
"rewards_penalties_test.go",
|
||||
"shuffle_test.go",
|
||||
@@ -57,7 +56,6 @@ go_test(
|
||||
"weak_subjectivity_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
race = "on",
|
||||
shard_count = 2,
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
|
||||
@@ -183,11 +183,11 @@ func ValidateAttestationTime(attSlot primitives.Slot, genesisTime time.Time, clo
|
||||
currentSlot,
|
||||
)
|
||||
if attTime.Before(lowerBounds) {
|
||||
attReceivedTooLateCount.Inc()
|
||||
attReceivedTooEarlyCount.Inc()
|
||||
return attError
|
||||
}
|
||||
if attTime.After(upperBounds) {
|
||||
attReceivedTooEarlyCount.Inc()
|
||||
attReceivedTooLateCount.Inc()
|
||||
return attError
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -382,10 +382,10 @@ func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaco
|
||||
|
||||
// ClearCache clears the beacon committee cache and sync committee cache.
|
||||
func ClearCache() {
|
||||
committeeCache.Clear()
|
||||
proposerIndicesCache.Clear()
|
||||
syncCommitteeCache.Clear()
|
||||
balanceCache.Clear()
|
||||
committeeCache = cache.NewCommitteesCache()
|
||||
proposerIndicesCache = cache.NewProposerIndicesCache()
|
||||
syncCommitteeCache = cache.NewSyncCommittee()
|
||||
balanceCache = cache.NewEffectiveBalanceCache()
|
||||
}
|
||||
|
||||
// computeCommittee returns the requested shuffled committee out of the total committees using
|
||||
|
||||
@@ -91,7 +91,6 @@ func TestVerifyBitfieldLength_OK(t *testing.T) {
|
||||
|
||||
func TestCommitteeAssignments_CannotRetrieveFutureEpoch(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
epoch := primitives.Epoch(1)
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Slot: 0, // Epoch 0.
|
||||
@@ -102,8 +101,6 @@ func TestCommitteeAssignments_CannotRetrieveFutureEpoch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
var activationEpoch primitives.Epoch
|
||||
@@ -121,6 +118,7 @@ func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
ClearCache()
|
||||
_, proposerIndexToSlots, err := CommitteeAssignments(context.Background(), state, 0)
|
||||
require.NoError(t, err, "Failed to determine CommitteeAssignments")
|
||||
for _, ss := range proposerIndexToSlots {
|
||||
@@ -190,7 +188,6 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
defer ClearCache()
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
ClearCache()
|
||||
@@ -258,8 +255,6 @@ func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *t
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
@@ -274,6 +269,7 @@ func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
ClearCache()
|
||||
epoch := primitives.Epoch(1)
|
||||
_, proposerIndexToSlots, err := CommitteeAssignments(context.Background(), state, epoch)
|
||||
require.NoError(t, err, "Failed to determine CommitteeAssignments")
|
||||
@@ -380,7 +376,6 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
defer ClearCache()
|
||||
for i, tt := range tests {
|
||||
ClearCache()
|
||||
require.NoError(t, state.SetSlot(tt.stateSlot))
|
||||
@@ -395,7 +390,6 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
|
||||
|
||||
func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
indices := make([]primitives.ValidatorIndex, validatorCount)
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// run ClearCache before each test to prevent cross-test side effects
|
||||
func TestMain(m *testing.M) {
|
||||
ClearCache()
|
||||
code := m.Run()
|
||||
os.Exit(code)
|
||||
}
|
||||
@@ -75,8 +75,6 @@ func TestTotalActiveBalance(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTotalActiveBal_ReturnMin(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
tests := []struct {
|
||||
vCount int
|
||||
}{
|
||||
@@ -98,8 +96,6 @@ func TestTotalActiveBal_ReturnMin(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTotalActiveBalance_WithCache(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
tests := []struct {
|
||||
vCount int
|
||||
wantCount int
|
||||
|
||||
@@ -25,7 +25,9 @@ var (
|
||||
// along with the sync committee root.
|
||||
// 1. Checks if the public key exists in the sync committee cache
|
||||
// 2. If 1 fails, checks if the public key exists in the input current sync committee object
|
||||
func IsCurrentPeriodSyncCommittee(st state.BeaconState, valIdx primitives.ValidatorIndex) (bool, error) {
|
||||
func IsCurrentPeriodSyncCommittee(
|
||||
st state.BeaconState, valIdx primitives.ValidatorIndex,
|
||||
) (bool, error) {
|
||||
root, err := syncPeriodBoundaryRoot(st)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -34,7 +36,7 @@ func IsCurrentPeriodSyncCommittee(st state.BeaconState, valIdx primitives.Valida
|
||||
if err == cache.ErrNonExistingSyncCommitteeKey {
|
||||
val, err := st.ValidatorAtIndex(valIdx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, nil
|
||||
}
|
||||
committee, err := st.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
@@ -71,7 +73,7 @@ func IsNextPeriodSyncCommittee(
|
||||
if err == cache.ErrNonExistingSyncCommitteeKey {
|
||||
val, err := st.ValidatorAtIndex(valIdx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, nil
|
||||
}
|
||||
committee, err := st.NextSyncCommittee()
|
||||
if err != nil {
|
||||
@@ -98,7 +100,7 @@ func CurrentPeriodSyncSubcommitteeIndices(
|
||||
if err == cache.ErrNonExistingSyncCommitteeKey {
|
||||
val, err := st.ValidatorAtIndex(valIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil
|
||||
}
|
||||
committee, err := st.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
@@ -132,7 +134,7 @@ func NextPeriodSyncSubcommitteeIndices(
|
||||
if err == cache.ErrNonExistingSyncCommitteeKey {
|
||||
val, err := st.ValidatorAtIndex(valIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil
|
||||
}
|
||||
committee, err := st.NextSyncCommittee()
|
||||
if err != nil {
|
||||
|
||||
@@ -17,8 +17,6 @@ import (
|
||||
)
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
@@ -39,6 +37,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ClearCache()
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
|
||||
|
||||
@@ -48,8 +47,6 @@ func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
@@ -76,8 +73,6 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
@@ -99,13 +94,11 @@ func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := IsCurrentPeriodSyncCommittee(state, 12390192)
|
||||
require.ErrorContains(t, "index 12390192 out of range", err)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
|
||||
func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
@@ -126,6 +119,7 @@ func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ClearCache()
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
|
||||
|
||||
@@ -182,13 +176,11 @@ func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := IsNextPeriodSyncCommittee(state, 120391029)
|
||||
require.ErrorContains(t, "index 120391029 out of range", err)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
@@ -209,6 +201,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ClearCache()
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
|
||||
|
||||
@@ -218,8 +211,6 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
@@ -239,6 +230,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
root, err := syncPeriodBoundaryRoot(state)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -260,7 +252,6 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
@@ -282,13 +273,11 @@ func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 129301923)
|
||||
require.ErrorContains(t, "index 129301923 out of range", err)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
|
||||
}
|
||||
|
||||
func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
@@ -309,6 +298,7 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ClearCache()
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
|
||||
|
||||
@@ -345,7 +335,6 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
|
||||
func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
@@ -367,7 +356,7 @@ func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
index, err := NextPeriodSyncSubcommitteeIndices(state, 21093019)
|
||||
require.ErrorContains(t, "index 21093019 out of range", err)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
|
||||
}
|
||||
|
||||
@@ -398,8 +387,6 @@ func TestUpdateSyncCommitteeCache_BadRoot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
@@ -425,6 +412,7 @@ func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ClearCache()
|
||||
comIdxs, err := CurrentPeriodSyncSubcommitteeIndices(state, 200)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -285,7 +285,6 @@ func BeaconProposerIndex(ctx context.Context, state state.ReadOnlyBeaconState) (
|
||||
|
||||
// ComputeProposerIndex returns the index sampled by effective balance, which is used to calculate proposer.
|
||||
//
|
||||
// nolint:dupword
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Bytes32) -> ValidatorIndex:
|
||||
|
||||
@@ -179,7 +179,6 @@ func TestIsSlashableValidator_OK(t *testing.T) {
|
||||
func TestBeaconProposerIndex_OK(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
c := params.BeaconConfig()
|
||||
c.MinGenesisActiveValidatorCount = 16384
|
||||
params.OverrideBeaconConfig(c)
|
||||
@@ -223,7 +222,6 @@ func TestBeaconProposerIndex_OK(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
defer ClearCache()
|
||||
for _, tt := range tests {
|
||||
ClearCache()
|
||||
require.NoError(t, state.SetSlot(tt.slot))
|
||||
@@ -236,7 +234,6 @@ func TestBeaconProposerIndex_OK(t *testing.T) {
|
||||
func TestBeaconProposerIndex_BadState(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
ClearCache()
|
||||
defer ClearCache()
|
||||
c := params.BeaconConfig()
|
||||
c.MinGenesisActiveValidatorCount = 16384
|
||||
params.OverrideBeaconConfig(c)
|
||||
@@ -348,7 +345,6 @@ func TestChurnLimit_OK(t *testing.T) {
|
||||
{validatorCount: 1000000, wantedChurn: 15 /* validatorCount/churnLimitQuotient */},
|
||||
{validatorCount: 2000000, wantedChurn: 30 /* validatorCount/churnLimitQuotient */},
|
||||
}
|
||||
defer ClearCache()
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
|
||||
@@ -520,7 +516,6 @@ func TestActiveValidatorIndices(t *testing.T) {
|
||||
want: []primitives.ValidatorIndex{0, 2, 3},
|
||||
},
|
||||
}
|
||||
defer ClearCache()
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.args.state)
|
||||
|
||||
@@ -81,6 +81,15 @@ func CanUpgradeToCapella(slot primitives.Slot) bool {
|
||||
return epochStart && capellaEpoch
|
||||
}
|
||||
|
||||
// CanUpgradeToDeneb returns true if the input `slot` can upgrade to Deneb.
|
||||
// Spec code:
|
||||
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == DENEB_FORK_EPOCH
|
||||
func CanUpgradeToDeneb(slot primitives.Slot) bool {
|
||||
epochStart := slots.IsEpochStart(slot)
|
||||
DenebEpoch := slots.ToEpoch(slot) == params.BeaconConfig().DenebForkEpoch
|
||||
return epochStart && DenebEpoch
|
||||
}
|
||||
|
||||
// CanProcessEpoch checks the eligibility to process epoch.
|
||||
// The epoch can be processed at the end of the last slot of every epoch.
|
||||
//
|
||||
|
||||
@@ -23,7 +23,6 @@ go_library(
|
||||
"//beacon-chain/core/execution:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition/interop:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
@@ -46,6 +45,7 @@ go_library(
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_protolambda_go_kzg//eth:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
|
||||
@@ -5,20 +5,15 @@ import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
)
|
||||
|
||||
type nextSlotCache struct {
|
||||
sync.Mutex
|
||||
prevRoot []byte
|
||||
lastRoot []byte
|
||||
prevState state.BeaconState
|
||||
lastState state.BeaconState
|
||||
sync.RWMutex
|
||||
root []byte
|
||||
state state.BeaconState
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -34,22 +29,19 @@ var (
|
||||
})
|
||||
)
|
||||
|
||||
// NextSlotState returns the saved state for the given blockroot.
|
||||
// It returns the last updated state if it matches. Otherwise it returns the previously
|
||||
// updated state if it matches its root. If no root matches it returns nil
|
||||
func NextSlotState(root []byte, wantedSlot types.Slot) state.BeaconState {
|
||||
nsc.Lock()
|
||||
defer nsc.Unlock()
|
||||
if bytes.Equal(root, nsc.lastRoot) && nsc.lastState.Slot() <= wantedSlot {
|
||||
nextSlotCacheHit.Inc()
|
||||
return nsc.lastState.Copy()
|
||||
// NextSlotState returns the saved state if the input root matches the root in `nextSlotCache`. Returns nil otherwise.
|
||||
// This is useful to check before processing slots. With a cache hit, it will return last processed state with slot plus
|
||||
// one advancement.
|
||||
func NextSlotState(_ context.Context, root []byte) (state.BeaconState, error) {
|
||||
nsc.RLock()
|
||||
defer nsc.RUnlock()
|
||||
if !bytes.Equal(root, nsc.root) || bytes.Equal(root, []byte{}) {
|
||||
nextSlotCacheMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
if bytes.Equal(root, nsc.prevRoot) && nsc.prevState.Slot() <= wantedSlot {
|
||||
nextSlotCacheHit.Inc()
|
||||
return nsc.prevState.Copy()
|
||||
}
|
||||
nextSlotCacheMiss.Inc()
|
||||
return nil
|
||||
nextSlotCacheHit.Inc()
|
||||
// Returning copied state.
|
||||
return nsc.state.Copy(), nil
|
||||
}
|
||||
|
||||
// UpdateNextSlotCache updates the `nextSlotCache`. It saves the input state after advancing the state slot by 1
|
||||
@@ -60,25 +52,13 @@ func UpdateNextSlotCache(ctx context.Context, root []byte, state state.BeaconSta
|
||||
copied := state.Copy()
|
||||
copied, err := ProcessSlots(ctx, copied, copied.Slot()+1)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not process slots")
|
||||
return err
|
||||
}
|
||||
|
||||
nsc.Lock()
|
||||
defer nsc.Unlock()
|
||||
|
||||
nsc.prevRoot = nsc.lastRoot
|
||||
nsc.prevState = nsc.lastState
|
||||
nsc.lastRoot = bytesutil.SafeCopyBytes(root)
|
||||
nsc.lastState = copied
|
||||
nsc.root = root
|
||||
nsc.state = copied
|
||||
return nil
|
||||
}
|
||||
|
||||
// LastCachedState returns the last cached state and root in the cache
|
||||
func LastCachedState() ([]byte, state.BeaconState) {
|
||||
nsc.Lock()
|
||||
defer nsc.Unlock()
|
||||
if nsc.lastState == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return bytesutil.SafeCopyBytes(nsc.lastRoot), nsc.lastState.Copy()
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
)
|
||||
@@ -14,36 +13,18 @@ import (
|
||||
func TestTrailingSlotState_RoundTrip(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := []byte{'a'}
|
||||
s := transition.NextSlotState(r, 0)
|
||||
s, err := transition.NextSlotState(ctx, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, nil, s)
|
||||
|
||||
s, _ = util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, transition.UpdateNextSlotCache(ctx, r, s))
|
||||
s = transition.NextSlotState(r, 1)
|
||||
s, err = transition.NextSlotState(ctx, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Slot(1), s.Slot())
|
||||
|
||||
lastRoot, lastState := transition.LastCachedState()
|
||||
require.DeepEqual(t, r, lastRoot)
|
||||
require.Equal(t, s.Slot(), lastState.Slot())
|
||||
|
||||
require.NoError(t, transition.UpdateNextSlotCache(ctx, r, s))
|
||||
s = transition.NextSlotState(r, 2)
|
||||
s, err = transition.NextSlotState(ctx, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Slot(2), s.Slot())
|
||||
|
||||
lastRoot, lastState = transition.LastCachedState()
|
||||
require.DeepEqual(t, r, lastRoot)
|
||||
require.Equal(t, s.Slot(), lastState.Slot())
|
||||
}
|
||||
|
||||
func TestTrailingSlotState_StateAdvancedBeyondRequest(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := []byte{'a'}
|
||||
s := transition.NextSlotState(r, 0)
|
||||
require.Equal(t, nil, s)
|
||||
|
||||
s, _ = util.DeterministicGenesisState(t, 1)
|
||||
assert.NoError(t, s.SetSlot(2))
|
||||
require.NoError(t, transition.UpdateNextSlotCache(ctx, r, s))
|
||||
s = transition.NextSlotState(r, 1)
|
||||
require.Equal(t, nil, s)
|
||||
}
|
||||
|
||||
@@ -147,15 +147,25 @@ func ProcessSlotsUsingNextSlotCache(
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.ProcessSlotsUsingNextSlotCache")
|
||||
defer span.End()
|
||||
|
||||
nextSlotState := NextSlotState(parentRoot, slot)
|
||||
if nextSlotState != nil {
|
||||
// Check whether the parent state has been advanced by 1 slot in next slot cache.
|
||||
nextSlotState, err := NextSlotState(ctx, parentRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cachedStateExists := nextSlotState != nil && !nextSlotState.IsNil()
|
||||
// If the next slot state is not nil (i.e. cache hit).
|
||||
// We replace next slot state with parent state.
|
||||
if cachedStateExists {
|
||||
parentState = nextSlotState
|
||||
}
|
||||
if parentState.Slot() == slot {
|
||||
|
||||
// In the event our cached state has advanced our
|
||||
// state to the desired slot, we exit early.
|
||||
if cachedStateExists && parentState.Slot() == slot {
|
||||
return parentState, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
// Since next slot cache only advances state by 1 slot,
|
||||
// we check if there's more slots that need to process.
|
||||
parentState, err = ProcessSlots(ctx, parentState, slot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process slots")
|
||||
@@ -292,6 +302,14 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot primitives.
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if time.CanUpgradeToDeneb(state.Slot()) {
|
||||
state, err = capella.UpgradeToDeneb(state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if highestSlot < state.Slot() {
|
||||
|
||||
@@ -6,16 +6,18 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/protolambda/go-kzg/eth"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
|
||||
b "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition/interop"
|
||||
v "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -57,9 +59,6 @@ func ExecuteStateTransitionNoVerifyAnySig(
|
||||
defer span.End()
|
||||
var err error
|
||||
|
||||
interop.WriteBlockToDisk(signed, false /* Has the block failed */)
|
||||
interop.WriteStateToDisk(st)
|
||||
|
||||
parentRoot := signed.Block().ParentRoot()
|
||||
st, err = ProcessSlotsUsingNextSlotCache(ctx, st, parentRoot[:], signed.Block().Slot())
|
||||
if err != nil {
|
||||
@@ -256,7 +255,7 @@ func ProcessOperationsNoVerifyAttsSigs(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case version.Altair, version.Bellatrix, version.Capella:
|
||||
case version.Altair, version.Bellatrix, version.Capella, version.Deneb:
|
||||
state, err = altairOperations(ctx, state, signedBeaconBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -356,9 +355,48 @@ func ProcessBlockForStateRoot(
|
||||
return nil, errors.Wrap(err, "process_sync_aggregate failed")
|
||||
}
|
||||
|
||||
if signed.Block().Version() == version.Deneb {
|
||||
err := ValidateBlobKzgs(ctx, signed.Block().Body())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not validate blob kzgs")
|
||||
}
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// ValidateBlobKzgs validates the blob kzgs in the beacon block.
|
||||
//
|
||||
// Spec code:
|
||||
// def process_blob_kzg_commitments(state: BeaconState, body: BeaconBlockBody):
|
||||
//
|
||||
// assert verify_kzg_commitments_against_transactions(body.execution_payload.transactions, body.blob_kzg_commitments)
|
||||
func ValidateBlobKzgs(ctx context.Context, body interfaces.ReadOnlyBeaconBlockBody) error {
|
||||
_, span := trace.StartSpan(ctx, "core.state.ValidateBlobKzgs")
|
||||
defer span.End()
|
||||
|
||||
payload, err := body.Execution()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get execution payload from block")
|
||||
}
|
||||
blkKzgs, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get blob kzg commitments from block")
|
||||
}
|
||||
kzgs := make(eth.KZGCommitmentSequenceImpl, len(blkKzgs))
|
||||
for i := range blkKzgs {
|
||||
kzgs[i] = bytesutil.ToBytes48(blkKzgs[i])
|
||||
}
|
||||
txs, err := payload.Transactions()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get transactions from payload")
|
||||
}
|
||||
if err := eth.VerifyKZGCommitmentsAgainstTransactions(txs, kzgs); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This calls altair block operations.
|
||||
func altairOperations(
|
||||
ctx context.Context,
|
||||
|
||||
@@ -150,7 +150,7 @@ func TestSlashValidator_OK(t *testing.T) {
|
||||
|
||||
maxBalance := params.BeaconConfig().MaxEffectiveBalance
|
||||
slashedBalance := state.Slashings()[state.Slot().Mod(uint64(params.BeaconConfig().EpochsPerSlashingsVector))]
|
||||
assert.Equal(t, maxBalance, slashedBalance, "Slashed balance isn't the expected amount")
|
||||
assert.Equal(t, maxBalance, slashedBalance, "Slashed balance isnt the expected amount")
|
||||
|
||||
whistleblowerReward := slashedBalance / params.BeaconConfig().WhistleBlowerRewardQuotient
|
||||
bal, err := state.BalanceAtIndex(proposer)
|
||||
|
||||
@@ -54,6 +54,11 @@ type ReadOnlyDatabase interface {
|
||||
// Fee recipients operations.
|
||||
FeeRecipientByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (common.Address, error)
|
||||
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
|
||||
// Blob operations.
|
||||
BlobSidecarsByRoot(ctx context.Context, beaconBlockRoot [32]byte) (*ethpb.BlobSidecars, error)
|
||||
BlobSidecarsBySlot(ctx context.Context, slot primitives.Slot) (*ethpb.BlobSidecars, error)
|
||||
|
||||
// origin checkpoint sync support
|
||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
@@ -89,6 +94,10 @@ type NoHeadAccessDatabase interface {
|
||||
SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, addrs []common.Address) error
|
||||
SaveRegistrationsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, regs []*ethpb.ValidatorRegistrationV1) error
|
||||
|
||||
// Blob operations.
|
||||
SaveBlobSidecar(ctx context.Context, blobSidecars *ethpb.BlobSidecars) error
|
||||
DeleteBlobSidecar(ctx context.Context, beaconBlockRoot [32]byte) error
|
||||
|
||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"archived_point.go",
|
||||
"backup.go",
|
||||
"blob.go",
|
||||
"blocks.go",
|
||||
"checkpoint.go",
|
||||
"deposit_contract.go",
|
||||
@@ -74,6 +75,7 @@ go_test(
|
||||
srcs = [
|
||||
"archived_point_test.go",
|
||||
"backup_test.go",
|
||||
"blob_test.go",
|
||||
"blocks_test.go",
|
||||
"checkpoint_test.go",
|
||||
"deposit_contract_test.go",
|
||||
@@ -107,6 +109,7 @@ go_test(
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/testing:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
|
||||
155
beacon-chain/db/kv/blob.go
Normal file
155
beacon-chain/db/kv/blob.go
Normal file
@@ -0,0 +1,155 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// SaveBlobSidecar saves the blobs for a given epoch in the sidecar bucket. When we receive a blob:
|
||||
//
|
||||
// 1. Convert slot using a modulo operator to [0, maxSlots] where maxSlots = MAX_BLOB_EPOCHS*SLOTS_PER_EPOCH
|
||||
//
|
||||
// 2. Compute key for blob as bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
|
||||
//
|
||||
// 3. Begin the save algorithm: If the incoming blob has a slot bigger than the saved slot at the spot
|
||||
// in the rotating keys buffer, we overwrite all elements for that slot.
|
||||
func (s *Store) SaveBlobSidecar(ctx context.Context, blobSidecars *ethpb.BlobSidecars) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlobSidecar")
|
||||
defer span.End()
|
||||
|
||||
if blobSidecars == nil || len(blobSidecars.Sidecars) == 0 {
|
||||
return errors.New("nil or empty blob sidecars")
|
||||
}
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
encodedBlobSidecar, err := encode(ctx, blobSidecars)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bkt := tx.Bucket(blobsBucket)
|
||||
c := bkt.Cursor()
|
||||
newKey := blobSidecarKey(blobSidecars.Sidecars[0])
|
||||
rotatingBufferPrefix := newKey[0:8]
|
||||
var replacingKey []byte
|
||||
for k, _ := c.Seek(rotatingBufferPrefix); bytes.HasPrefix(k, rotatingBufferPrefix); k, _ = c.Next() {
|
||||
if len(k) != 0 {
|
||||
replacingKey = k
|
||||
oldSlotBytes := replacingKey[8:16]
|
||||
oldSlot := bytesutil.BytesToSlotBigEndian(oldSlotBytes)
|
||||
if oldSlot >= blobSidecars.Sidecars[0].Slot {
|
||||
return fmt.Errorf("attempted to save blob with slot %d but already have older blob with slot %d", blobSidecars.Sidecars[0].Slot, oldSlot)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
// If there is no element stored at blob.slot % MAX_SLOTS_TO_PERSIST_BLOBS, then we simply
|
||||
// store the blob by key and exit early.
|
||||
if len(replacingKey) == 0 {
|
||||
return bkt.Put(newKey, encodedBlobSidecar)
|
||||
}
|
||||
|
||||
if err := bkt.Delete(replacingKey); err != nil {
|
||||
log.WithError(err).Warnf("Could not delete blob with key %#x", replacingKey)
|
||||
}
|
||||
return bkt.Put(newKey, encodedBlobSidecar)
|
||||
})
|
||||
}
|
||||
|
||||
// BlobSidecarsByRoot retrieves the blobs given a beacon block root.
|
||||
func (s *Store) BlobSidecarsByRoot(ctx context.Context, beaconBlockRoot [32]byte) (*ethpb.BlobSidecars, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobSidecarsByRoot")
|
||||
defer span.End()
|
||||
|
||||
var enc []byte
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
c := tx.Bucket(blobsBucket).Cursor()
|
||||
// Bucket size is bounded and bolt cursors are fast. Moreover, a thin caching layer can be added.
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
if bytes.HasSuffix(k, beaconBlockRoot[:]) {
|
||||
enc = v
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if enc == nil {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
sidecars := ðpb.BlobSidecars{}
|
||||
if err := decode(ctx, enc, sidecars); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// BlobSidecarsBySlot retrieves sidecars from a slot.
|
||||
func (s *Store) BlobSidecarsBySlot(ctx context.Context, slot types.Slot) (*ethpb.BlobSidecars, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobSidecarsBySlot")
|
||||
defer span.End()
|
||||
|
||||
var enc []byte
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
c := tx.Bucket(blobsBucket).Cursor()
|
||||
// Bucket size is bounded and bolt cursors are fast. Moreover, a thin caching layer can be added.
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
slotInKey := bytesutil.BytesToSlotBigEndian(k[8:16])
|
||||
if slotInKey == slot {
|
||||
enc = v
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if enc == nil {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
sidecars := ðpb.BlobSidecars{}
|
||||
if err := decode(ctx, enc, sidecars); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// DeleteBlobSidecar returns true if the blobs are in the db.
|
||||
func (s *Store) DeleteBlobSidecar(ctx context.Context, beaconBlockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.DeleteBlobSidecar")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blobsBucket)
|
||||
c := bkt.Cursor()
|
||||
for k, _ := c.First(); k != nil; k, _ = c.Next() {
|
||||
if bytes.HasSuffix(k, beaconBlockRoot[:]) {
|
||||
if err := bkt.Delete(k); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// We define a blob sidecar key as: bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
|
||||
// where slot_to_rotating_buffer(slot) = slot % MAX_SLOTS_TO_PERSIST_BLOBS.
|
||||
func blobSidecarKey(blob *ethpb.BlobSidecar) []byte {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
maxEpochsToPersistBlobs := params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
|
||||
maxSlotsToPersistBlobs := types.Slot(maxEpochsToPersistBlobs.Mul(uint64(slotsPerEpoch)))
|
||||
slotInRotatingBuffer := blob.Slot.ModSlot(maxSlotsToPersistBlobs)
|
||||
key := bytesutil.SlotToBytesBigEndian(slotInRotatingBuffer)
|
||||
key = append(key, bytesutil.SlotToBytesBigEndian(blob.Slot)...)
|
||||
key = append(key, blob.BlockRoot...)
|
||||
return key
|
||||
}
|
||||
177
beacon-chain/db/kv/blob_test.go
Normal file
177
beacon-chain/db/kv/blob_test.go
Normal file
@@ -0,0 +1,177 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestStore_BlobSidecars(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, 0)
|
||||
require.ErrorContains(t, "nil or empty blob sidecars", db.SaveBlobSidecar(ctx, scs))
|
||||
})
|
||||
t.Run("empty by root", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
got, err := db.BlobSidecarsByRoot(ctx, [32]byte{})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, (*ethpb.BlobSidecars)(nil), got)
|
||||
})
|
||||
t.Run("empty by slot", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
got, err := db.BlobSidecarsBySlot(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, (*ethpb.BlobSidecars)(nil), got)
|
||||
})
|
||||
t.Run("save and retrieve by root (one)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, 1)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, 1, len(scs.Sidecars))
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs.Sidecars[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(got.Sidecars))
|
||||
require.DeepEqual(t, scs, got)
|
||||
})
|
||||
t.Run("save and retrieve by root (max)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs.Sidecars))
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs.Sidecars[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(got.Sidecars))
|
||||
require.DeepEqual(t, scs, got)
|
||||
})
|
||||
t.Run("save and retrieve by slot (one)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, 1)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, 1, len(scs.Sidecars))
|
||||
got, err := db.BlobSidecarsBySlot(ctx, scs.Sidecars[0].Slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(got.Sidecars))
|
||||
require.DeepEqual(t, scs, got)
|
||||
})
|
||||
t.Run("save and retrieve by slot (max)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs.Sidecars))
|
||||
got, err := db.BlobSidecarsBySlot(ctx, scs.Sidecars[0].Slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(got.Sidecars))
|
||||
require.DeepEqual(t, scs, got)
|
||||
})
|
||||
t.Run("delete works", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs.Sidecars))
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs.Sidecars[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(got.Sidecars))
|
||||
require.DeepEqual(t, scs, got)
|
||||
require.NoError(t, db.DeleteBlobSidecar(ctx, bytesutil.ToBytes32(scs.Sidecars[0].BlockRoot)))
|
||||
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs.Sidecars[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, (*ethpb.BlobSidecars)(nil), got)
|
||||
})
|
||||
t.Run("saving a blob with older slot", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs.Sidecars))
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs.Sidecars[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(got.Sidecars))
|
||||
require.DeepEqual(t, scs, got)
|
||||
require.ErrorContains(t, "but already have older blob with slot", db.SaveBlobSidecar(ctx, scs))
|
||||
})
|
||||
t.Run("saving a new blob for rotation", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs.Sidecars))
|
||||
oldBlockRoot := scs.Sidecars[0].BlockRoot
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs.Sidecars[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(got.Sidecars))
|
||||
require.DeepEqual(t, scs, got)
|
||||
|
||||
newScs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||
newRetentionSlot := primitives.Slot(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest.Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
|
||||
newScs.Sidecars[0].Slot = scs.Sidecars[0].Slot + newRetentionSlot
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, newScs))
|
||||
|
||||
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(oldBlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, (*ethpb.BlobSidecars)(nil), got)
|
||||
|
||||
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(newScs.Sidecars[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, newScs, got)
|
||||
})
|
||||
}
|
||||
|
||||
func generateBlobSidecars(t *testing.T, n uint64) *ethpb.BlobSidecars {
|
||||
blobSidecars := make([]*ethpb.BlobSidecar, n)
|
||||
for i := uint64(0); i < n; i++ {
|
||||
blobSidecars[i] = generateBlobSidecar(t)
|
||||
}
|
||||
return ðpb.BlobSidecars{
|
||||
Sidecars: blobSidecars,
|
||||
}
|
||||
}
|
||||
|
||||
func generateBlobSidecar(t *testing.T) *ethpb.BlobSidecar {
|
||||
blockRoot := make([]byte, 32)
|
||||
_, err := rand.Read(blockRoot)
|
||||
require.NoError(t, err)
|
||||
index := make([]byte, 8)
|
||||
_, err = rand.Read(index)
|
||||
require.NoError(t, err)
|
||||
slot := make([]byte, 8)
|
||||
_, err = rand.Read(slot)
|
||||
require.NoError(t, err)
|
||||
blockParentRoot := make([]byte, 32)
|
||||
_, err = rand.Read(blockParentRoot)
|
||||
require.NoError(t, err)
|
||||
proposerIndex := make([]byte, 8)
|
||||
_, err = rand.Read(proposerIndex)
|
||||
require.NoError(t, err)
|
||||
blobData := make([]byte, 131072)
|
||||
_, err = rand.Read(blobData)
|
||||
require.NoError(t, err)
|
||||
blob := &enginev1.Blob{
|
||||
Data: blobData,
|
||||
}
|
||||
kzgCommitment := make([]byte, 48)
|
||||
_, err = rand.Read(kzgCommitment)
|
||||
require.NoError(t, err)
|
||||
kzgProof := make([]byte, 48)
|
||||
_, err = rand.Read(kzgProof)
|
||||
require.NoError(t, err)
|
||||
|
||||
return ðpb.BlobSidecar{
|
||||
BlockRoot: blockRoot,
|
||||
Index: binary.LittleEndian.Uint64(index),
|
||||
Slot: primitives.Slot(binary.LittleEndian.Uint64(slot)),
|
||||
BlockParentRoot: blockParentRoot,
|
||||
ProposerIndex: primitives.ValidatorIndex(binary.LittleEndian.Uint64(proposerIndex)),
|
||||
Blob: blob,
|
||||
KzgCommitment: kzgCommitment,
|
||||
KzgProof: kzgProof,
|
||||
}
|
||||
}
|
||||
@@ -818,6 +818,16 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.ReadOnlySignedBea
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(capellaBlindKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal blinded Capella block")
|
||||
}
|
||||
case hasDenebKey(enc):
|
||||
rawBlock = ðpb.SignedBeaconBlockDeneb{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(denebKey):]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case hasDenebBlindKey(enc):
|
||||
rawBlock = ðpb.SignedBlindedBeaconBlockDeneb{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(denebBlindKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal blinded Deneb block")
|
||||
}
|
||||
default:
|
||||
// Marshal block bytes to phase 0 beacon block.
|
||||
rawBlock = ðpb.SignedBeaconBlock{}
|
||||
@@ -854,6 +864,8 @@ func marshalBlockFull(
|
||||
return nil, err
|
||||
}
|
||||
switch blk.Version() {
|
||||
case version.Deneb:
|
||||
return snappy.Encode(nil, append(denebKey, encodedBlock...)), nil
|
||||
case version.Capella:
|
||||
return snappy.Encode(nil, append(capellaKey, encodedBlock...)), nil
|
||||
case version.Bellatrix:
|
||||
@@ -888,6 +900,8 @@ func marshalBlockBlinded(
|
||||
return nil, errors.Wrap(err, "could not marshal blinded block")
|
||||
}
|
||||
switch blk.Version() {
|
||||
case version.Deneb:
|
||||
return snappy.Encode(nil, append(denebBlindKey, encodedBlock...)), nil
|
||||
case version.Capella:
|
||||
return snappy.Encode(nil, append(capellaBlindKey, encodedBlock...)), nil
|
||||
case version.Bellatrix:
|
||||
|
||||
@@ -3,7 +3,6 @@ package kv
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
@@ -543,7 +542,7 @@ func TestStore_Blocks_Retrieve_SlotRangeWithStep(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 150, len(retrieved))
|
||||
for _, b := range retrieved {
|
||||
assert.Equal(t, primitives.Slot(0), (b.Block().Slot()-100)%step, "Unexpected block slot %d", b.Block().Slot())
|
||||
assert.Equal(t, primitives.Slot(0), (b.Block().Slot()-100)%step, "Unexpect block slot %d", b.Block().Slot())
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -911,25 +910,25 @@ func TestStore_RegistrationsByValidatorID(t *testing.T) {
|
||||
ids := []primitives.ValidatorIndex{0, 0, 0}
|
||||
regs := []*ethpb.ValidatorRegistrationV1{{}, {}, {}, {}}
|
||||
require.ErrorContains(t, "ids and registrations must be the same length", db.SaveRegistrationsByValidatorIDs(ctx, ids, regs))
|
||||
timestamp := time.Now().Unix()
|
||||
|
||||
ids = []primitives.ValidatorIndex{0, 1, 2}
|
||||
regs = []*ethpb.ValidatorRegistrationV1{
|
||||
{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("a"), 20),
|
||||
GasLimit: 1,
|
||||
Timestamp: uint64(timestamp),
|
||||
Timestamp: 2,
|
||||
Pubkey: bytesutil.PadTo([]byte("b"), 48),
|
||||
},
|
||||
{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("c"), 20),
|
||||
GasLimit: 3,
|
||||
Timestamp: uint64(timestamp),
|
||||
Timestamp: 4,
|
||||
Pubkey: bytesutil.PadTo([]byte("d"), 48),
|
||||
},
|
||||
{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("e"), 20),
|
||||
GasLimit: 5,
|
||||
Timestamp: uint64(timestamp),
|
||||
Timestamp: 6,
|
||||
Pubkey: bytesutil.PadTo([]byte("f"), 48),
|
||||
},
|
||||
}
|
||||
@@ -939,7 +938,7 @@ func TestStore_RegistrationsByValidatorID(t *testing.T) {
|
||||
require.DeepEqual(t, ðpb.ValidatorRegistrationV1{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("a"), 20),
|
||||
GasLimit: 1,
|
||||
Timestamp: uint64(timestamp),
|
||||
Timestamp: 2,
|
||||
Pubkey: bytesutil.PadTo([]byte("b"), 48),
|
||||
}, f)
|
||||
f, err = db.RegistrationByValidatorID(ctx, 1)
|
||||
@@ -947,7 +946,7 @@ func TestStore_RegistrationsByValidatorID(t *testing.T) {
|
||||
require.DeepEqual(t, ðpb.ValidatorRegistrationV1{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("c"), 20),
|
||||
GasLimit: 3,
|
||||
Timestamp: uint64(timestamp),
|
||||
Timestamp: 4,
|
||||
Pubkey: bytesutil.PadTo([]byte("d"), 48),
|
||||
}, f)
|
||||
f, err = db.RegistrationByValidatorID(ctx, 2)
|
||||
@@ -955,7 +954,7 @@ func TestStore_RegistrationsByValidatorID(t *testing.T) {
|
||||
require.DeepEqual(t, ðpb.ValidatorRegistrationV1{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("e"), 20),
|
||||
GasLimit: 5,
|
||||
Timestamp: uint64(timestamp),
|
||||
Timestamp: 6,
|
||||
Pubkey: bytesutil.PadTo([]byte("f"), 48),
|
||||
}, f)
|
||||
_, err = db.RegistrationByValidatorID(ctx, 3)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user