mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
350 Commits
v5.0.0-rc.
...
testRebase
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fb53a3817e | ||
|
|
577838ebfb | ||
|
|
8efd97d527 | ||
|
|
d682840291 | ||
|
|
59726e8ea7 | ||
|
|
279a0a1cb7 | ||
|
|
8af287765b | ||
|
|
ccc49e0c36 | ||
|
|
2ce5b45752 | ||
|
|
f6ef7d51c1 | ||
|
|
3f43586628 | ||
|
|
d82d1c33ac | ||
|
|
594a8fffff | ||
|
|
f6f9ce74a0 | ||
|
|
e5c39be32c | ||
|
|
c942a79093 | ||
|
|
915041c485 | ||
|
|
1b48083776 | ||
|
|
b3c54ec2e6 | ||
|
|
970908723f | ||
|
|
faa26117de | ||
|
|
9d3472237f | ||
|
|
6f0b0ff6b4 | ||
|
|
f7a4636c40 | ||
|
|
e99ef55928 | ||
|
|
ecf41951fb | ||
|
|
4ab8721f78 | ||
|
|
83e1c5b9df | ||
|
|
085fd6283b | ||
|
|
663ac056ab | ||
|
|
89ada9f8a6 | ||
|
|
c3cdb36481 | ||
|
|
0e9f683562 | ||
|
|
f3e5e7f495 | ||
|
|
264547bb4a | ||
|
|
f03726f7b3 | ||
|
|
a274bdfd3c | ||
|
|
4a83ab6d69 | ||
|
|
38d5a2bb13 | ||
|
|
c7def8ae77 | ||
|
|
2083c70ccf | ||
|
|
3301b5107a | ||
|
|
ec3164474a | ||
|
|
777f430ae8 | ||
|
|
b28fc2965b | ||
|
|
4392b80154 | ||
|
|
0b5f5f58d5 | ||
|
|
9a9323d13a | ||
|
|
3ad99f64b6 | ||
|
|
fd95f27356 | ||
|
|
36411c4f2a | ||
|
|
8b4b3a269b | ||
|
|
2f76ba542f | ||
|
|
637cbc88e8 | ||
|
|
fadff022a0 | ||
|
|
05784a6c28 | ||
|
|
5a48e002dd | ||
|
|
d6f86269a4 | ||
|
|
422438f515 | ||
|
|
e5b25071f9 | ||
|
|
498ee635e1 | ||
|
|
c238c00630 | ||
|
|
3eacc37831 | ||
|
|
5267b4b4d4 | ||
|
|
ec84a1b49c | ||
|
|
afb7383225 | ||
|
|
a00b40fa81 | ||
|
|
365c6252ba | ||
|
|
7c81c7da90 | ||
|
|
f8950c8c40 | ||
|
|
18be899aef | ||
|
|
9dc3b645c4 | ||
|
|
318561999d | ||
|
|
ae5b0b4391 | ||
|
|
74b5f6ecf2 | ||
|
|
aea2a469cc | ||
|
|
7a394062e1 | ||
|
|
8070fc8ece | ||
|
|
d6aeaf77b3 | ||
|
|
40434ac209 | ||
|
|
0aab919d7c | ||
|
|
a8ecf5d118 | ||
|
|
8c0d6b27d0 | ||
|
|
2e6f1de29a | ||
|
|
657750b803 | ||
|
|
af5eb82217 | ||
|
|
84e7f33fd9 | ||
|
|
ca83d29eef | ||
|
|
0d49f6c142 | ||
|
|
041e81aad2 | ||
|
|
028504ae9a | ||
|
|
91c55c6880 | ||
|
|
78cf75a0ed | ||
|
|
fa370724f1 | ||
|
|
5edc64d88c | ||
|
|
7898e65d4e | ||
|
|
6d63dbe1af | ||
|
|
eab9daf5f5 | ||
|
|
4becd7b375 | ||
|
|
f230a6af58 | ||
|
|
539b981ac3 | ||
|
|
aad29ff9fc | ||
|
|
4722446caf | ||
|
|
b8aad84285 | ||
|
|
5f0d6074d6 | ||
|
|
9d6a2f5390 | ||
|
|
490ddbf782 | ||
|
|
adc875b20d | ||
|
|
8cd249c1c8 | ||
|
|
305d5850e7 | ||
|
|
df3a9f218d | ||
|
|
ae451a3a02 | ||
|
|
17561a6576 | ||
|
|
b842b7ea01 | ||
|
|
9bbe12e28c | ||
|
|
0674cf64cc | ||
|
|
3413d05b34 | ||
|
|
070a765d24 | ||
|
|
8ac1647436 | ||
|
|
dfe31c9242 | ||
|
|
b7866be3a9 | ||
|
|
8413660d5f | ||
|
|
e037491756 | ||
|
|
ea2624b5ab | ||
|
|
1b40f941cf | ||
|
|
57830435d7 | ||
|
|
44d850de51 | ||
|
|
b08e691127 | ||
|
|
968e82b02d | ||
|
|
de04ce8329 | ||
|
|
5efecff631 | ||
|
|
3ab759e163 | ||
|
|
836d369c6c | ||
|
|
568273453b | ||
|
|
7a4ecb6060 | ||
|
|
82f0ea5b11 | ||
|
|
6fddd13cb2 | ||
|
|
43c7659d18 | ||
|
|
2d15e53dab | ||
|
|
2f2152e039 | ||
|
|
2542189efc | ||
|
|
8e6d39a44b | ||
|
|
c35889d4c6 | ||
|
|
10dedd5ced | ||
|
|
d2966a4c5b | ||
|
|
62b5c43d87 | ||
|
|
b04baa93cd | ||
|
|
2e84208169 | ||
|
|
2265af58ae | ||
|
|
4d190c41cc | ||
|
|
0fbb27d8e3 | ||
|
|
3df3e84270 | ||
|
|
30cc23c5de | ||
|
|
9befb6bd06 | ||
|
|
8a12b78684 | ||
|
|
46168607e8 | ||
|
|
1272b9e186 | ||
|
|
fcbe19445a | ||
|
|
2b4dffa87d | ||
|
|
49a6d02e12 | ||
|
|
2b06dfd4a3 | ||
|
|
6e81b4e84b | ||
|
|
0de1282e1c | ||
|
|
e3db52ca1f | ||
|
|
c5a36d4c70 | ||
|
|
e28b6695ba | ||
|
|
de177f74fb | ||
|
|
e4310aef73 | ||
|
|
d71079e1d8 | ||
|
|
c08d2f36b0 | ||
|
|
839a80e339 | ||
|
|
a35535043e | ||
|
|
323dd7b22d | ||
|
|
102128ca2e | ||
|
|
f3dd75a2c4 | ||
|
|
0869814a0e | ||
|
|
41edee9fe9 | ||
|
|
2fa3694746 | ||
|
|
e9606b3635 | ||
|
|
ed7c4bb6a7 | ||
|
|
c93fea4ec4 | ||
|
|
aa847991e0 | ||
|
|
5f1b903bdf | ||
|
|
49f3531aed | ||
|
|
9b2934f1f6 | ||
|
|
26355768a0 | ||
|
|
80bff0dc2d | ||
|
|
c312a88aa3 | ||
|
|
625818d556 | ||
|
|
2c5a2e8ec7 | ||
|
|
ae16d5f52c | ||
|
|
d69be8a766 | ||
|
|
8df62a537b | ||
|
|
bf5e667351 | ||
|
|
0e5c2bd18e | ||
|
|
3233e64ace | ||
|
|
751117a308 | ||
|
|
8d9024f01f | ||
|
|
a9862f32f3 | ||
|
|
c8d6f47749 | ||
|
|
a6f134e48e | ||
|
|
fdbb5136d9 | ||
|
|
2c66918594 | ||
|
|
a0dac292ff | ||
|
|
75857e7177 | ||
|
|
0369f70b0b | ||
|
|
276b97530f | ||
|
|
d5daf49a9a | ||
|
|
feb16ae4aa | ||
|
|
219301339c | ||
|
|
aec349f75a | ||
|
|
5f909caedf | ||
|
|
ba6dff3adb | ||
|
|
8cd05f098b | ||
|
|
425f5387fa | ||
|
|
f2ce115ade | ||
|
|
090a3e1ded | ||
|
|
c0acb7d352 | ||
|
|
0d6070e6fc | ||
|
|
bd00f851f0 | ||
|
|
1a0c07deec | ||
|
|
04f231a400 | ||
|
|
be1bfcce63 | ||
|
|
8cf5d79852 | ||
|
|
f7912e7c20 | ||
|
|
caa8be5dd1 | ||
|
|
0c15a30a34 | ||
|
|
7bce1c0714 | ||
|
|
d1084cbe48 | ||
|
|
2cc3f69a3f | ||
|
|
a861489a83 | ||
|
|
0e1c585f7d | ||
|
|
9df20e616c | ||
|
|
53fdd2d062 | ||
|
|
2b4bb5d890 | ||
|
|
38f208d70d | ||
|
|
65b90abdda | ||
|
|
f3b49d4eaf | ||
|
|
5b1da7353c | ||
|
|
9f17e65860 | ||
|
|
9b2d53b0d1 | ||
|
|
d6f9196707 | ||
|
|
1b0e09369e | ||
|
|
12482eeb40 | ||
|
|
acc307b959 | ||
|
|
c1d75c295a | ||
|
|
fad118cb04 | ||
|
|
cdd1d819df | ||
|
|
97edffaff5 | ||
|
|
6de7df6b9d | ||
|
|
14d7416c16 | ||
|
|
6782df917a | ||
|
|
3d2230223f | ||
|
|
b008a6422d | ||
|
|
d19365507f | ||
|
|
c05e39a668 | ||
|
|
63c2b3563a | ||
|
|
a6e86c6731 | ||
|
|
32fb183392 | ||
|
|
cade09ba0b | ||
|
|
f85ddfe265 | ||
|
|
3b97094ea4 | ||
|
|
acdbf7c491 | ||
|
|
1cc1effd75 | ||
|
|
f7f1d249f2 | ||
|
|
02abb3e3c0 | ||
|
|
2255c8b287 | ||
|
|
27ecf448a7 | ||
|
|
e243f04e44 | ||
|
|
fca1adbad7 | ||
|
|
b692722ddf | ||
|
|
c4f6020677 | ||
|
|
d779e65d4e | ||
|
|
357211b7d9 | ||
|
|
2dd48343a2 | ||
|
|
7f931bf65b | ||
|
|
fda4589251 | ||
|
|
34593d34d4 | ||
|
|
4d18e590ed | ||
|
|
ec8b67cb12 | ||
|
|
a817aa0a8d | ||
|
|
d76f55e97a | ||
|
|
2de21eb22f | ||
|
|
58b8c31c93 | ||
|
|
f343333880 | ||
|
|
8e0b1b7e1f | ||
|
|
65f71b3a48 | ||
|
|
9fcb9b86af | ||
|
|
aa63c4e7f2 | ||
|
|
d6ae838bbf | ||
|
|
d49afb370c | ||
|
|
4d3a6d84d2 | ||
|
|
9c5d16e161 | ||
|
|
4731304187 | ||
|
|
02cbcf8545 | ||
|
|
4e10734ae4 | ||
|
|
e19c99c3e2 | ||
|
|
697bcd418c | ||
|
|
ec7949fa4b | ||
|
|
cb8eb4e955 | ||
|
|
800f3b572f | ||
|
|
9d3af41acb | ||
|
|
07a0a95ee7 | ||
|
|
9e7352704c | ||
|
|
2616de1eb1 | ||
|
|
b2e3c29ab3 | ||
|
|
83538251aa | ||
|
|
2442280e37 | ||
|
|
4608569495 | ||
|
|
20d013a30b | ||
|
|
b0a2115a26 | ||
|
|
102518e106 | ||
|
|
e49ed4d554 | ||
|
|
21775eed52 | ||
|
|
ee9274a9bc | ||
|
|
ef21d3adf8 | ||
|
|
b6ce6c2eba | ||
|
|
b3caaa9acc | ||
|
|
d6fb8c29c9 | ||
|
|
3df7a1f067 | ||
|
|
4c3dbae3c0 | ||
|
|
68b78dd520 | ||
|
|
2e2ef4a179 | ||
|
|
b61d17731e | ||
|
|
6d3c6a6331 | ||
|
|
f1615c4c88 | ||
|
|
87b127365f | ||
|
|
5215ed03fd | ||
|
|
0453d18395 | ||
|
|
0132c1b17d | ||
|
|
d9d2ee75de | ||
|
|
ddb321e0ce | ||
|
|
5735379963 | ||
|
|
1d5a09c05d | ||
|
|
70e1b11aeb | ||
|
|
e100fb0c08 | ||
|
|
789c3f8078 | ||
|
|
0b261cba5e | ||
|
|
7a9608ea20 | ||
|
|
f795e09ecf | ||
|
|
e6a6365bdd | ||
|
|
4c66e4d060 | ||
|
|
daad29d0de | ||
|
|
9f67ad9496 | ||
|
|
0ee0653a15 | ||
|
|
4ff91bebf8 | ||
|
|
f85e027141 | ||
|
|
e09ae75c9f | ||
|
|
cb80d5ad32 | ||
|
|
24b029bbef |
7
.bazelrc
7
.bazelrc
@@ -6,6 +6,12 @@ import %workspace%/build/bazelrc/debug.bazelrc
|
||||
import %workspace%/build/bazelrc/hermetic-cc.bazelrc
|
||||
import %workspace%/build/bazelrc/performance.bazelrc
|
||||
|
||||
# hermetic_cc_toolchain v3.0.1 required changes.
|
||||
common --enable_platform_specific_config
|
||||
build:linux --sandbox_add_mount_pair=/tmp
|
||||
build:macos --sandbox_add_mount_pair=/var/tmp
|
||||
build:windows --sandbox_add_mount_pair=C:\Temp
|
||||
|
||||
# E2E run with debug gotag
|
||||
test:e2e --define gotags=debug
|
||||
|
||||
@@ -16,6 +22,7 @@ coverage --define=coverage_enabled=1
|
||||
build --workspace_status_command=./hack/workspace_status.sh
|
||||
|
||||
build --define blst_disabled=false
|
||||
build --compilation_mode=opt
|
||||
run --define blst_disabled=false
|
||||
|
||||
build:blst_disabled --define blst_disabled=true
|
||||
|
||||
@@ -1 +1 @@
|
||||
7.0.0
|
||||
7.1.0
|
||||
|
||||
@@ -12,8 +12,7 @@
|
||||
build:remote-cache --remote_download_minimal
|
||||
build:remote-cache --remote_build_event_upload=minimal
|
||||
build:remote-cache --remote_cache=grpc://bazel-remote-cache:9092
|
||||
# Does not work with rules_oci. See https://github.com/bazel-contrib/rules_oci/issues/292
|
||||
#build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
|
||||
build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
|
||||
build:remote-cache --remote_local_fallback
|
||||
build:remote-cache --experimental_remote_cache_async
|
||||
build:remote-cache --experimental_remote_merkle_tree_cache
|
||||
|
||||
2
.github/actions/gomodtidy/Dockerfile
vendored
2
.github/actions/gomodtidy/Dockerfile
vendored
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.21-alpine
|
||||
FROM golang:1.22-alpine
|
||||
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
|
||||
4
.github/workflows/fuzz.yml
vendored
4
.github/workflows/fuzz.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.21.5'
|
||||
go-version: '1.22.3'
|
||||
- id: list
|
||||
uses: shogo82148/actions-go-fuzz/list@v0
|
||||
with:
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.21.5'
|
||||
go-version: '1.22.3'
|
||||
- uses: shogo82148/actions-go-fuzz/run@v0
|
||||
with:
|
||||
packages: ${{ matrix.package }}
|
||||
|
||||
12
.github/workflows/go.yml
vendored
12
.github/workflows/go.yml
vendored
@@ -28,14 +28,14 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Go 1.21
|
||||
- name: Set up Go 1.22
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.21.5'
|
||||
go-version: '1.22.3'
|
||||
- name: Run Gosec Security Scanner
|
||||
run: | # https://github.com/securego/gosec/issues/469
|
||||
export PATH=$PATH:$(go env GOPATH)/bin
|
||||
go install github.com/securego/gosec/v2/cmd/gosec@v2.15.0
|
||||
go install github.com/securego/gosec/v2/cmd/gosec@v2.19.0
|
||||
gosec -exclude-generated -exclude=G307 -exclude-dir=crypto/bls/herumi ./...
|
||||
|
||||
lint:
|
||||
@@ -45,10 +45,10 @@ jobs:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Go 1.21
|
||||
- name: Set up Go 1.22
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.21.5'
|
||||
go-version: '1.22.3'
|
||||
id: go
|
||||
|
||||
- name: Golangci-lint
|
||||
@@ -64,7 +64,7 @@ jobs:
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.21.5'
|
||||
go-version: '1.22.3'
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -41,3 +41,6 @@ jwt.hex
|
||||
|
||||
# manual testing
|
||||
tmp
|
||||
|
||||
# spectest coverage reports
|
||||
report.txt
|
||||
|
||||
@@ -6,7 +6,7 @@ run:
|
||||
- proto
|
||||
- tools/analyzers
|
||||
timeout: 10m
|
||||
go: '1.21.5'
|
||||
go: '1.22.3'
|
||||
|
||||
linters:
|
||||
enable-all: true
|
||||
@@ -34,7 +34,6 @@ linters:
|
||||
- dogsled
|
||||
- dupl
|
||||
- durationcheck
|
||||
- errorlint
|
||||
- exhaustive
|
||||
- exhaustruct
|
||||
- forbidigo
|
||||
@@ -52,6 +51,7 @@ linters:
|
||||
- gofumpt
|
||||
- gomnd
|
||||
- gomoddirectives
|
||||
- gosec
|
||||
- inamedparam
|
||||
- interfacebloat
|
||||
- ireturn
|
||||
|
||||
@@ -224,6 +224,7 @@ nogo(
|
||||
"@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/defers:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/directive:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/errorsas:go_default_library",
|
||||
# fieldalignment disabled
|
||||
#"@org_golang_x_tools//go/analysis/passes/fieldalignment:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/findcall:go_default_library",
|
||||
|
||||
842
MODULE.bazel.lock
generated
842
MODULE.bazel.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -2,7 +2,7 @@
|
||||
|
||||
[](https://buildkite.com/prysmatic-labs/prysm)
|
||||
[](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.3.0)
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.4.0)
|
||||
[](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.2/src/engine)
|
||||
[](https://discord.gg/prysmaticlabs)
|
||||
[](https://www.gitpoap.io/gh/prysmaticlabs/prysm)
|
||||
|
||||
103
WORKSPACE
103
WORKSPACE
@@ -16,34 +16,20 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
|
||||
|
||||
rules_pkg_dependencies()
|
||||
|
||||
HERMETIC_CC_TOOLCHAIN_VERSION = "v3.0.1"
|
||||
|
||||
http_archive(
|
||||
name = "hermetic_cc_toolchain",
|
||||
sha256 = "973ab22945b921ef45b8e1d6ce01ca7ce1b8a462167449a36e297438c4ec2755",
|
||||
strip_prefix = "hermetic_cc_toolchain-5098046bccc15d2962f3cc8e7e53d6a2a26072dc",
|
||||
sha256 = "3bc6ec127622fdceb4129cb06b6f7ab098c4d539124dde96a6318e7c32a53f7a",
|
||||
urls = [
|
||||
"https://github.com/uber/hermetic_cc_toolchain/archive/5098046bccc15d2962f3cc8e7e53d6a2a26072dc.tar.gz", # 2023-06-28
|
||||
"https://mirror.bazel.build/github.com/uber/hermetic_cc_toolchain/releases/download/{0}/hermetic_cc_toolchain-{0}.tar.gz".format(HERMETIC_CC_TOOLCHAIN_VERSION),
|
||||
"https://github.com/uber/hermetic_cc_toolchain/releases/download/{0}/hermetic_cc_toolchain-{0}.tar.gz".format(HERMETIC_CC_TOOLCHAIN_VERSION),
|
||||
],
|
||||
)
|
||||
|
||||
load("@hermetic_cc_toolchain//toolchain:defs.bzl", zig_toolchains = "toolchains")
|
||||
|
||||
# Temporarily use a nightly build until 0.12.0 is released.
|
||||
# See: https://github.com/prysmaticlabs/prysm/issues/13130
|
||||
zig_toolchains(
|
||||
host_platform_sha256 = {
|
||||
"linux-aarch64": "45afb8e32adde825165f4f293fcea9ecea503f7f9ec0e9bf4435afe70e67fb70",
|
||||
"linux-x86_64": "f136c6a8a0f6adcb057d73615fbcd6f88281b3593f7008d5f7ed514ff925c02e",
|
||||
"macos-aarch64": "05d995853c05243151deff47b60bdc2674f1e794a939eaeca0f42312da031cee",
|
||||
"macos-x86_64": "721754ba5a50f31e8a1f0e1a74cace26f8246576878ac4a8591b0ee7b6db1fc1",
|
||||
"windows-x86_64": "93f5248b2ea8c5ee8175e15b1384e133edc1cd49870b3ea259062a2e04164343",
|
||||
},
|
||||
url_formats = [
|
||||
"https://ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
|
||||
"https://mirror.bazel.build/ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
|
||||
"https://prysmaticlabs.com/mirror/ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
|
||||
],
|
||||
version = "0.12.0-dev.1349+fa022d1ec",
|
||||
)
|
||||
zig_toolchains()
|
||||
|
||||
# Register zig sdk toolchains with support for Ubuntu 20.04 (Focal Fossa) which has an EOL date of April, 2025.
|
||||
# For ubuntu glibc support, see https://launchpad.net/ubuntu/+source/glibc
|
||||
@@ -81,10 +67,10 @@ bazel_skylib_workspace()
|
||||
|
||||
http_archive(
|
||||
name = "bazel_gazelle",
|
||||
sha256 = "d3fa66a39028e97d76f9e2db8f1b0c11c099e8e01bf363a923074784e451f809",
|
||||
integrity = "sha256-MpOL2hbmcABjA1R5Bj2dJMYO2o15/Uc5Vj9Q0zHLMgk=",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.33.0/bazel-gazelle-v0.33.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.33.0/bazel-gazelle-v0.33.0.tar.gz",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.35.0/bazel-gazelle-v0.35.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.35.0/bazel-gazelle-v0.35.0.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -113,6 +99,13 @@ http_archive(
|
||||
url = "https://github.com/GoogleContainerTools/distroless/archive/9dc924b9fe812eec2fa0061824dcad39eb09d0d6.tar.gz", # 2024-01-24
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "aspect_bazel_lib",
|
||||
sha256 = "f5ea76682b209cc0bd90d0f5a3b26d2f7a6a2885f0c5f615e72913f4805dbb0d",
|
||||
strip_prefix = "bazel-lib-2.5.0",
|
||||
url = "https://github.com/aspect-build/bazel-lib/releases/download/v2.5.0/bazel-lib-v2.5.0.tar.gz",
|
||||
)
|
||||
|
||||
load("@aspect_bazel_lib//lib:repositories.bzl", "aspect_bazel_lib_dependencies", "aspect_bazel_lib_register_toolchains")
|
||||
|
||||
aspect_bazel_lib_dependencies()
|
||||
@@ -121,9 +114,9 @@ aspect_bazel_lib_register_toolchains()
|
||||
|
||||
http_archive(
|
||||
name = "rules_oci",
|
||||
sha256 = "c71c25ed333a4909d2dd77e0b16c39e9912525a98c7fa85144282be8d04ef54c",
|
||||
strip_prefix = "rules_oci-1.3.4",
|
||||
url = "https://github.com/bazel-contrib/rules_oci/releases/download/v1.3.4/rules_oci-v1.3.4.tar.gz",
|
||||
sha256 = "4a276e9566c03491649eef63f27c2816cc222f41ccdebd97d2c5159e84917c3b",
|
||||
strip_prefix = "rules_oci-1.7.4",
|
||||
url = "https://github.com/bazel-contrib/rules_oci/releases/download/v1.7.4/rules_oci-v1.7.4.tar.gz",
|
||||
)
|
||||
|
||||
load("@rules_oci//oci:dependencies.bzl", "rules_oci_dependencies")
|
||||
@@ -144,17 +137,13 @@ http_archive(
|
||||
# Expose internals of go_test for custom build transitions.
|
||||
"//third_party:io_bazel_rules_go_test.patch",
|
||||
],
|
||||
sha256 = "d6ab6b57e48c09523e93050f13698f708428cfd5e619252e369d377af6597707",
|
||||
sha256 = "80a98277ad1311dacd837f9b16db62887702e9f1d1c4c9f796d0121a46c8e184",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.43.0/rules_go-v0.43.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.43.0/rules_go-v0.43.0.zip",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.46.0/rules_go-v0.46.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.46.0/rules_go-v0.46.0.zip",
|
||||
],
|
||||
)
|
||||
|
||||
load("//:distroless_deps.bzl", "distroless_deps")
|
||||
|
||||
distroless_deps()
|
||||
|
||||
# Override default import in rules_go with special patch until
|
||||
# https://github.com/gogo/protobuf/pull/582 is merged.
|
||||
git_repository(
|
||||
@@ -193,10 +182,14 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.21.6",
|
||||
go_version = "1.22.4",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
load("//:distroless_deps.bzl", "distroless_deps")
|
||||
|
||||
distroless_deps()
|
||||
|
||||
http_archive(
|
||||
name = "io_kubernetes_build",
|
||||
sha256 = "b84fbd1173acee9d02a7d3698ad269fdf4f7aa081e9cecd40e012ad0ad8cfa2a",
|
||||
@@ -234,9 +227,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.4.0-beta.7"
|
||||
|
||||
consensus_spec_test_version = "v1.4.0-beta.7-hotfix"
|
||||
consensus_spec_version = "v1.5.0-alpha.3"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -252,8 +243,8 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "c282c0f86f23f3d2e0f71f5975769a4077e62a7e3c7382a16bd26a7e589811a0",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_test_version,
|
||||
integrity = "sha256-+byv+GUOQytex5GgtjBGVoNDseJZbiBdAjEtlgCbjEo=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -268,8 +259,8 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "4649c35aa3b8eb0cfdc81bee7c05649f90ef36bede5b0513e1f2e8baf37d6033",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_test_version,
|
||||
integrity = "sha256-JJUy/jT1h3kGQkinTuzL7gMOA1+qgmPgJXVrYuH63Cg=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -284,8 +275,8 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "c5a03f724f757456ffaabd2a899992a71d2baf45ee4db65ca3518f2b7ee928c8",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_test_version,
|
||||
integrity = "sha256-T2VM4Qd0SwgGnTjWxjOX297DqEsovO9Ueij1UEJy48Y=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -299,7 +290,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "049c29267310e6b88280f4f834a75866c2f5b9036fa97acb9d9c6db8f64d9118",
|
||||
integrity = "sha256-OP9BCBcQ7i+93bwj7ktY8pZ5uWsGjgTe4XTp7BDhX+I=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
@@ -335,36 +326,20 @@ filegroup(
|
||||
url = "https://github.com/eth-clients/eth2-networks/archive/934c948e69205dcf2deb87e4ae6cc140c335f94d.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "goerli_testnet",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "configs",
|
||||
srcs = [
|
||||
"prater/config.yaml",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "43fc0f55ddff7b511713e2de07aa22846a67432df997296fb4fc09cd8ed1dcdb",
|
||||
strip_prefix = "goerli-6522ac6684693740cd4ddcc2a0662e03702aa4a1",
|
||||
url = "https://github.com/eth-clients/goerli/archive/6522ac6684693740cd4ddcc2a0662e03702aa4a1.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "holesky_testnet",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "configs",
|
||||
srcs = [
|
||||
"custom_config_data/config.yaml",
|
||||
"metadata/config.yaml",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "5f4be6fd088683ea9db45c863b9c5a1884422449e5b59fd2d561d3ba0f73ffd9",
|
||||
strip_prefix = "holesky-9d9aabf2d4de51334ee5fed6c79a4d55097d1a43",
|
||||
url = "https://github.com/eth-clients/holesky/archive/9d9aabf2d4de51334ee5fed6c79a4d55097d1a43.tar.gz", # 2024-01-22
|
||||
integrity = "sha256-b7ZTT+olF+VXEJYNTV5jggNtCkt9dOejm1i2VE+zy+0=",
|
||||
strip_prefix = "holesky-874c199423ccd180607320c38cbaca05d9a1573a",
|
||||
url = "https://github.com/eth-clients/holesky/archive/874c199423ccd180607320c38cbaca05d9a1573a.tar.gz", # 2024-06-18
|
||||
)
|
||||
|
||||
http_archive(
|
||||
|
||||
@@ -1,11 +1,24 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"constants.go",
|
||||
"headers.go",
|
||||
"jwt.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//crypto/rand:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["jwt_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//testing/require:go_default_library"],
|
||||
)
|
||||
|
||||
@@ -6,11 +6,14 @@ go_library(
|
||||
"checkpoint.go",
|
||||
"client.go",
|
||||
"doc.go",
|
||||
"health.go",
|
||||
"log.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/beacon",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/client:go_default_library",
|
||||
"//api/client/beacon/iface:go_default_library",
|
||||
"//api/server:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
@@ -36,10 +39,12 @@ go_test(
|
||||
srcs = [
|
||||
"checkpoint_test.go",
|
||||
"client_test.go",
|
||||
"health_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api/client:go_default_library",
|
||||
"//api/client/beacon/testing:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
@@ -53,5 +58,6 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@org_uber_go_mock//gomock:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/io/file"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/mod/semver"
|
||||
)
|
||||
|
||||
@@ -74,7 +74,12 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error detecting chain config for finalized state")
|
||||
}
|
||||
log.Printf("detected supported config in remote finalized state, name=%s, fork=%s", vu.Config.ConfigName, version.String(vu.Fork))
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"name": vu.Config.ConfigName,
|
||||
"fork": version.String(vu.Fork),
|
||||
}).Info("Detected supported config in remote finalized state")
|
||||
|
||||
s, err := vu.UnmarshalBeaconState(sb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error unmarshaling finalized state to correct version")
|
||||
@@ -108,10 +113,10 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
|
||||
}
|
||||
|
||||
log.
|
||||
WithField("block_slot", b.Block().Slot()).
|
||||
WithField("state_slot", s.Slot()).
|
||||
WithField("state_root", hexutil.Encode(sr[:])).
|
||||
WithField("block_root", hexutil.Encode(br[:])).
|
||||
WithField("blockSlot", b.Block().Slot()).
|
||||
WithField("stateSlot", s.Slot()).
|
||||
WithField("stateRoot", hexutil.Encode(sr[:])).
|
||||
WithField("blockRoot", hexutil.Encode(br[:])).
|
||||
Info("Downloaded checkpoint sync state and block.")
|
||||
return &OriginData{
|
||||
st: s,
|
||||
|
||||
@@ -9,22 +9,20 @@ import (
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
blocktest "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/forks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz/detect"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/forks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
type testRT struct {
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/forks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -309,9 +309,9 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*stru
|
||||
}
|
||||
for _, failure := range errorJson.Failures {
|
||||
w := request[failure.Index].Message
|
||||
log.WithFields(log.Fields{
|
||||
"validator_index": w.ValidatorIndex,
|
||||
"withdrawal_address": w.ToExecutionAddress,
|
||||
log.WithFields(logrus.Fields{
|
||||
"validatorIndex": w.ValidatorIndex,
|
||||
"withdrawalAddress": w.ToExecutionAddress,
|
||||
}).Error(failure.Message)
|
||||
}
|
||||
return errors.Errorf("POST error %d: %s", errorJson.Code, errorJson.Message)
|
||||
@@ -341,9 +341,9 @@ type forkScheduleResponse struct {
|
||||
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
|
||||
ofs := make(forks.OrderedSchedule, 0)
|
||||
for _, d := range fsr.Data {
|
||||
epoch, err := strconv.Atoi(d.Epoch)
|
||||
epoch, err := strconv.ParseUint(d.Epoch, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrapf(err, "error parsing epoch %s", d.Epoch)
|
||||
}
|
||||
vSlice, err := hexutil.Decode(d.CurrentVersion)
|
||||
if err != nil {
|
||||
@@ -355,7 +355,7 @@ func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, e
|
||||
version := bytesutil.ToBytes4(vSlice)
|
||||
ofs = append(ofs, forks.ForkScheduleEntry{
|
||||
Version: version,
|
||||
Epoch: primitives.Epoch(uint64(epoch)),
|
||||
Epoch: primitives.Epoch(epoch),
|
||||
})
|
||||
}
|
||||
sort.Sort(ofs)
|
||||
|
||||
60
api/client/beacon/health.go
Normal file
60
api/client/beacon/health.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface"
|
||||
)
|
||||
|
||||
type NodeHealthTracker struct {
|
||||
isHealthy *bool
|
||||
healthChan chan bool
|
||||
node iface.HealthNode
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
func NewNodeHealthTracker(node iface.HealthNode) *NodeHealthTracker {
|
||||
return &NodeHealthTracker{
|
||||
node: node,
|
||||
healthChan: make(chan bool, 1),
|
||||
}
|
||||
}
|
||||
|
||||
// HealthUpdates provides a read-only channel for health updates.
|
||||
func (n *NodeHealthTracker) HealthUpdates() <-chan bool {
|
||||
return n.healthChan
|
||||
}
|
||||
|
||||
func (n *NodeHealthTracker) IsHealthy() bool {
|
||||
n.RLock()
|
||||
defer n.RUnlock()
|
||||
if n.isHealthy == nil {
|
||||
return false
|
||||
}
|
||||
return *n.isHealthy
|
||||
}
|
||||
|
||||
func (n *NodeHealthTracker) CheckHealth(ctx context.Context) bool {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
|
||||
newStatus := n.node.IsHealthy(ctx)
|
||||
if n.isHealthy == nil {
|
||||
n.isHealthy = &newStatus
|
||||
}
|
||||
|
||||
isStatusChanged := newStatus != *n.isHealthy
|
||||
if isStatusChanged {
|
||||
// Update the health status
|
||||
n.isHealthy = &newStatus
|
||||
// Send the new status to the health channel, potentially overwriting the existing value
|
||||
select {
|
||||
case <-n.healthChan:
|
||||
n.healthChan <- newStatus
|
||||
default:
|
||||
n.healthChan <- newStatus
|
||||
}
|
||||
}
|
||||
return newStatus
|
||||
}
|
||||
112
api/client/beacon/health_test.go
Normal file
112
api/client/beacon/health_test.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
healthTesting "github.com/prysmaticlabs/prysm/v5/api/client/beacon/testing"
|
||||
"go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
func TestNodeHealth_IsHealthy(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
isHealthy bool
|
||||
want bool
|
||||
}{
|
||||
{"initially healthy", true, true},
|
||||
{"initially unhealthy", false, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
n := &NodeHealthTracker{
|
||||
isHealthy: &tt.isHealthy,
|
||||
healthChan: make(chan bool, 1),
|
||||
}
|
||||
if got := n.IsHealthy(); got != tt.want {
|
||||
t.Errorf("IsHealthy() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeHealth_UpdateNodeHealth(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
initial bool // Initial health status
|
||||
newStatus bool // Status to update to
|
||||
shouldSend bool // Should a message be sent through the channel
|
||||
}{
|
||||
{"healthy to unhealthy", true, false, true},
|
||||
{"unhealthy to healthy", false, true, true},
|
||||
{"remain healthy", true, true, false},
|
||||
{"remain unhealthy", false, false, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
client := healthTesting.NewMockHealthClient(ctrl)
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(tt.newStatus)
|
||||
n := &NodeHealthTracker{
|
||||
isHealthy: &tt.initial,
|
||||
node: client,
|
||||
healthChan: make(chan bool, 1),
|
||||
}
|
||||
|
||||
s := n.CheckHealth(context.Background())
|
||||
// Check if health status was updated
|
||||
if s != tt.newStatus {
|
||||
t.Errorf("UpdateNodeHealth() failed to update isHealthy from %v to %v", tt.initial, tt.newStatus)
|
||||
}
|
||||
|
||||
select {
|
||||
case status := <-n.HealthUpdates():
|
||||
if !tt.shouldSend {
|
||||
t.Errorf("UpdateNodeHealth() unexpectedly sent status %v to HealthCh", status)
|
||||
} else if status != tt.newStatus {
|
||||
t.Errorf("UpdateNodeHealth() sent wrong status %v, want %v", status, tt.newStatus)
|
||||
}
|
||||
default:
|
||||
if tt.shouldSend {
|
||||
t.Error("UpdateNodeHealth() did not send any status to HealthCh when expected")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeHealth_Concurrency(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
client := healthTesting.NewMockHealthClient(ctrl)
|
||||
n := NewNodeHealthTracker(client)
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Number of goroutines to spawn for both reading and writing
|
||||
numGoroutines := 6
|
||||
|
||||
wg.Add(numGoroutines * 2) // for readers and writers
|
||||
|
||||
// Concurrently update health status
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(false).Times(1)
|
||||
n.CheckHealth(context.Background())
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(true).Times(1)
|
||||
n.CheckHealth(context.Background())
|
||||
}()
|
||||
}
|
||||
|
||||
// Concurrently read health status
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
_ = n.IsHealthy() // Just read the value
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait() // Wait for all goroutines to finish
|
||||
}
|
||||
8
api/client/beacon/iface/BUILD.bazel
Normal file
8
api/client/beacon/iface/BUILD.bazel
Normal file
@@ -0,0 +1,8 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["health.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
13
api/client/beacon/iface/health.go
Normal file
13
api/client/beacon/iface/health.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package iface
|
||||
|
||||
import "context"
|
||||
|
||||
type HealthTracker interface {
|
||||
HealthUpdates() <-chan bool
|
||||
IsHealthy() bool
|
||||
CheckHealth(ctx context.Context) bool
|
||||
}
|
||||
|
||||
type HealthNode interface {
|
||||
IsHealthy(ctx context.Context) bool
|
||||
}
|
||||
5
api/client/beacon/log.go
Normal file
5
api/client/beacon/log.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package beacon
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "beacon")
|
||||
12
api/client/beacon/testing/BUILD.bazel
Normal file
12
api/client/beacon/testing/BUILD.bazel
Normal file
@@ -0,0 +1,12 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["mock.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/beacon/testing",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/client/beacon/iface:go_default_library",
|
||||
"@org_uber_go_mock//gomock:go_default_library",
|
||||
],
|
||||
)
|
||||
59
api/client/beacon/testing/mock.go
Normal file
59
api/client/beacon/testing/mock.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface"
|
||||
"go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
var (
|
||||
_ = iface.HealthNode(&MockHealthClient{})
|
||||
)
|
||||
|
||||
// MockHealthClient is a mock of HealthClient interface.
|
||||
type MockHealthClient struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockHealthClientMockRecorder
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// MockHealthClientMockRecorder is the mock recorder for MockHealthClient.
|
||||
type MockHealthClientMockRecorder struct {
|
||||
mock *MockHealthClient
|
||||
}
|
||||
|
||||
// IsHealthy mocks base method.
|
||||
func (m *MockHealthClient) IsHealthy(arg0 context.Context) bool {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "IsHealthy", arg0)
|
||||
ret0, ok := ret[0].(bool)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return ret0
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockHealthClient) EXPECT() *MockHealthClientMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// IsHealthy indicates an expected call of IsHealthy.
|
||||
func (mr *MockHealthClientMockRecorder) IsHealthy(arg0 any) *gomock.Call {
|
||||
mr.mock.Lock()
|
||||
defer mr.mock.Unlock()
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsHealthy", reflect.TypeOf((*MockHealthClient)(nil).IsHealthy), arg0)
|
||||
}
|
||||
|
||||
// NewMockHealthClient creates a new mock instance.
|
||||
func NewMockHealthClient(ctrl *gomock.Controller) *MockHealthClient {
|
||||
mock := &MockHealthClient{ctrl: ctrl}
|
||||
mock.recorder = &MockHealthClientMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
@@ -11,6 +11,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/builder",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
@@ -28,6 +29,7 @@ go_library(
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -49,9 +51,11 @@ go_test(
|
||||
"//math:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
consensus_types "github.com/prysmaticlabs/prysm/v5/consensus-types"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
@@ -22,7 +23,7 @@ type SignedBid interface {
|
||||
type Bid interface {
|
||||
Header() (interfaces.ExecutionData, error)
|
||||
BlobKzgCommitments() ([][]byte, error)
|
||||
Value() []byte
|
||||
Value() primitives.Wei
|
||||
Pubkey() []byte
|
||||
Version() int
|
||||
IsNil() bool
|
||||
@@ -125,8 +126,8 @@ func (b builderBid) Version() int {
|
||||
}
|
||||
|
||||
// Value --
|
||||
func (b builderBid) Value() []byte {
|
||||
return b.p.Value
|
||||
func (b builderBid) Value() primitives.Wei {
|
||||
return primitives.LittleEndianBytesToWei(b.p.Value)
|
||||
}
|
||||
|
||||
// Pubkey --
|
||||
@@ -165,7 +166,7 @@ func WrappedBuilderBidCapella(p *ethpb.BuilderBidCapella) (Bid, error) {
|
||||
// Header returns the execution data interface.
|
||||
func (b builderBidCapella) Header() (interfaces.ExecutionData, error) {
|
||||
// We have to convert big endian to little endian because the value is coming from the execution layer.
|
||||
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, blocks.PayloadValueToWei(b.p.Value))
|
||||
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header)
|
||||
}
|
||||
|
||||
// BlobKzgCommitments --
|
||||
@@ -179,8 +180,8 @@ func (b builderBidCapella) Version() int {
|
||||
}
|
||||
|
||||
// Value --
|
||||
func (b builderBidCapella) Value() []byte {
|
||||
return b.p.Value
|
||||
func (b builderBidCapella) Value() primitives.Wei {
|
||||
return primitives.LittleEndianBytesToWei(b.p.Value)
|
||||
}
|
||||
|
||||
// Pubkey --
|
||||
@@ -222,8 +223,8 @@ func (b builderBidDeneb) Version() int {
|
||||
}
|
||||
|
||||
// Value --
|
||||
func (b builderBidDeneb) Value() []byte {
|
||||
return b.p.Value
|
||||
func (b builderBidDeneb) Value() primitives.Wei {
|
||||
return primitives.LittleEndianBytesToWei(b.p.Value)
|
||||
}
|
||||
|
||||
// Pubkey --
|
||||
@@ -249,7 +250,7 @@ func (b builderBidDeneb) HashTreeRootWith(hh *ssz.Hasher) error {
|
||||
// Header --
|
||||
func (b builderBidDeneb) Header() (interfaces.ExecutionData, error) {
|
||||
// We have to convert big endian to little endian because the value is coming from the execution layer.
|
||||
return blocks.WrappedExecutionPayloadHeaderDeneb(b.p.Header, blocks.PayloadValueToWei(b.p.Value))
|
||||
return blocks.WrappedExecutionPayloadHeaderDeneb(b.p.Header)
|
||||
}
|
||||
|
||||
// BlobKzgCommitments --
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -14,11 +13,11 @@ import (
|
||||
"text/template"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -57,8 +56,8 @@ func (*requestLogger) observe(r *http.Request) (e error) {
|
||||
b := bytes.NewBuffer(nil)
|
||||
if r.Body == nil {
|
||||
log.WithFields(log.Fields{
|
||||
"body-base64": "(nil value)",
|
||||
"url": r.URL.String(),
|
||||
"bodyBase64": "(nil value)",
|
||||
"url": r.URL.String(),
|
||||
}).Info("builder http request")
|
||||
return nil
|
||||
}
|
||||
@@ -74,8 +73,8 @@ func (*requestLogger) observe(r *http.Request) (e error) {
|
||||
}
|
||||
r.Body = io.NopCloser(b)
|
||||
log.WithFields(log.Fields{
|
||||
"body-base64": string(body),
|
||||
"url": r.URL.String(),
|
||||
"bodyBase64": string(body),
|
||||
"url": r.URL.String(),
|
||||
}).Info("builder http request")
|
||||
|
||||
return nil
|
||||
@@ -282,127 +281,68 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
|
||||
return err
|
||||
}
|
||||
|
||||
var errResponseVersionMismatch = errors.New("builder API response uses a different version than requested in " + api.VersionHeader + " header")
|
||||
|
||||
// SubmitBlindedBlock calls the builder API endpoint that binds the validator to the builder and submits the block.
|
||||
// The response is the full execution payload used to create the blinded block.
|
||||
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
if !sb.IsBlinded() {
|
||||
return nil, nil, errNotBlinded
|
||||
}
|
||||
switch sb.Version() {
|
||||
case version.Bellatrix:
|
||||
psb, err := sb.PbBlindedBellatrixBlock()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
|
||||
}
|
||||
b, err := structs.SignedBlindedBeaconBlockBellatrixFromConsensus(ðpb.SignedBlindedBeaconBlockBellatrix{Block: psb.Block, Signature: bytesutil.SafeCopyBytes(psb.Signature)})
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not convert SignedBlindedBeaconBlockBellatrix to json marshalable type")
|
||||
}
|
||||
body, err := json.Marshal(b)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockBellatrix value body in SubmitBlindedBlock")
|
||||
}
|
||||
versionOpt := func(r *http.Request) {
|
||||
r.Header.Add("Eth-Consensus-Version", version.String(version.Bellatrix))
|
||||
}
|
||||
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockBellatrix to the builder api")
|
||||
}
|
||||
ep := &ExecPayloadResponse{}
|
||||
if err := json.Unmarshal(rb, ep); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlock response")
|
||||
}
|
||||
if strings.ToLower(ep.Version) != version.String(version.Bellatrix) {
|
||||
return nil, nil, errors.New("not a bellatrix payload")
|
||||
}
|
||||
p, err := ep.ToProto()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
|
||||
}
|
||||
payload, err := blocks.WrappedExecutionPayload(p)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
|
||||
}
|
||||
return payload, nil, nil
|
||||
case version.Capella:
|
||||
psb, err := sb.PbBlindedCapellaBlock()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
|
||||
}
|
||||
b, err := structs.SignedBlindedBeaconBlockCapellaFromConsensus(ðpb.SignedBlindedBeaconBlockCapella{Block: psb.Block, Signature: bytesutil.SafeCopyBytes(psb.Signature)})
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not convert SignedBlindedBeaconBlockCapella to json marshalable type")
|
||||
}
|
||||
body, err := json.Marshal(b)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockCapella value body in SubmitBlindedBlockCapella")
|
||||
}
|
||||
versionOpt := func(r *http.Request) {
|
||||
r.Header.Add("Eth-Consensus-Version", version.String(version.Capella))
|
||||
}
|
||||
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockCapella to the builder api")
|
||||
}
|
||||
ep := &ExecPayloadResponseCapella{}
|
||||
if err := json.Unmarshal(rb, ep); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockCapella response")
|
||||
}
|
||||
if strings.ToLower(ep.Version) != version.String(version.Capella) {
|
||||
return nil, nil, errors.New("not a capella payload")
|
||||
}
|
||||
p, err := ep.ToProto()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
|
||||
}
|
||||
payload, err := blocks.WrappedExecutionPayloadCapella(p, big.NewInt(0))
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
|
||||
}
|
||||
return payload, nil, nil
|
||||
case version.Deneb:
|
||||
psb, err := sb.PbBlindedDenebBlock()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
|
||||
}
|
||||
b, err := structs.SignedBlindedBeaconBlockDenebFromConsensus(ðpb.SignedBlindedBeaconBlockDeneb{Message: psb.Message, Signature: bytesutil.SafeCopyBytes(psb.Signature)})
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not convert SignedBlindedBeaconBlockDeneb to json marshalable type")
|
||||
}
|
||||
body, err := json.Marshal(b)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockDeneb value body in SubmitBlindedBlockDeneb")
|
||||
}
|
||||
|
||||
versionOpt := func(r *http.Request) {
|
||||
r.Header.Add("Eth-Consensus-Version", version.String(version.Deneb))
|
||||
}
|
||||
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockDeneb to the builder api")
|
||||
}
|
||||
ep := &ExecPayloadResponseDeneb{}
|
||||
if err := json.Unmarshal(rb, ep); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockDeneb response")
|
||||
}
|
||||
if strings.ToLower(ep.Version) != version.String(version.Deneb) {
|
||||
return nil, nil, errors.New("not a deneb payload")
|
||||
}
|
||||
p, blobBundle, err := ep.ToProto()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
|
||||
}
|
||||
payload, err := blocks.WrappedExecutionPayloadDeneb(p, big.NewInt(0))
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
|
||||
}
|
||||
return payload, blobBundle, nil
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("unsupported block version %s", version.String(sb.Version()))
|
||||
// massage the proto struct type data into the api response type.
|
||||
mj, err := structs.SignedBeaconBlockMessageJsoner(sb)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error generating blinded beacon block post request")
|
||||
}
|
||||
|
||||
body, err := json.Marshal(mj)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error marshaling blinded block post request to json")
|
||||
}
|
||||
postOpts := func(r *http.Request) {
|
||||
r.Header.Add("Eth-Consensus-Version", version.String(sb.Version()))
|
||||
r.Header.Set("Content-Type", api.JsonMediaType)
|
||||
r.Header.Set("Accept", api.JsonMediaType)
|
||||
}
|
||||
// post the blinded block - the execution payload response should contain the unblinded payload, along with the
|
||||
// blobs bundle if it is post deneb.
|
||||
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), postOpts)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error posting the blinded block to the builder api")
|
||||
}
|
||||
// ExecutionPayloadResponse parses just the outer container and the Value key, enabling it to use the .Value
|
||||
// key to determine which underlying data type to use to finish the unmarshaling.
|
||||
ep := &ExecutionPayloadResponse{}
|
||||
if err := json.Unmarshal(rb, ep); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error unmarshaling the builder ExecutionPayloadResponse")
|
||||
}
|
||||
if strings.ToLower(ep.Version) != version.String(sb.Version()) {
|
||||
return nil, nil, errors.Wrapf(errResponseVersionMismatch, "req=%s, recv=%s", strings.ToLower(ep.Version), version.String(sb.Version()))
|
||||
}
|
||||
// This parses the rest of the response and returns the inner data field.
|
||||
pp, err := ep.ParsePayload()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "failed to parse execution payload from builder with version=%s", ep.Version)
|
||||
}
|
||||
// Get the payload as a proto.Message so it can be wrapped as an execution payload interface.
|
||||
pb, err := pp.PayloadProto()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ed, err := blocks.NewWrappedExecutionData(pb)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
bb, ok := pp.(BlobBundler)
|
||||
if ok {
|
||||
bbpb, err := bb.BundleProto()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "failed to extract blobs bundle from builder response with version=%s", ep.Version)
|
||||
}
|
||||
return ed, bbpb, nil
|
||||
}
|
||||
return ed, nil, nil
|
||||
}
|
||||
|
||||
// Status asks the remote builder server for a health check. A response of 200 with an empty body is the success/healthy
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
@@ -16,6 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
types "github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
@@ -198,12 +198,12 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(expectedTxRoot, withdrawalsRoot))
|
||||
require.Equal(t, uint64(1), bidHeader.GasUsed())
|
||||
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
// this matches the value in the testExampleHeaderResponse
|
||||
bidStr := "652312848583266388373324160190187140051835877600158453279131187530910662656"
|
||||
value, err := stringToUint256(bidStr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
|
||||
bidValue := bytesutil.ReverseByteOrder(bid.Value())
|
||||
require.DeepEqual(t, bidValue, value.Bytes())
|
||||
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
|
||||
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
|
||||
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
|
||||
})
|
||||
t.Run("capella", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
@@ -230,12 +230,11 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
|
||||
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
bidStr := "652312848583266388373324160190187140051835877600158453279131187530910662656"
|
||||
value, err := stringToUint256(bidStr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
|
||||
bidValue := bytesutil.ReverseByteOrder(bid.Value())
|
||||
require.DeepEqual(t, bidValue, value.Bytes())
|
||||
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
|
||||
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
|
||||
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
|
||||
})
|
||||
t.Run("deneb", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
@@ -262,12 +261,13 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
|
||||
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
|
||||
bidStr := "652312848583266388373324160190187140051835877600158453279131187530910662656"
|
||||
value, err := stringToUint256(bidStr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
|
||||
bidValue := bytesutil.ReverseByteOrder(bid.Value())
|
||||
require.DeepEqual(t, bidValue, value.Bytes())
|
||||
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
|
||||
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
|
||||
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
|
||||
|
||||
kcgCommitments, err := bid.BlobKzgCommitments()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(kcgCommitments) > 0, true)
|
||||
@@ -321,6 +321,8 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Accept"))
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayload)),
|
||||
@@ -347,6 +349,8 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "capella", r.Header.Get("Eth-Consensus-Version"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Accept"))
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadCapella)),
|
||||
@@ -376,6 +380,8 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "deneb", r.Header.Get("Eth-Consensus-Version"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Accept"))
|
||||
var req structs.SignedBlindedBeaconBlockDeneb
|
||||
err := json.NewDecoder(r.Body).Decode(&req)
|
||||
require.NoError(t, err)
|
||||
@@ -426,7 +432,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
|
||||
require.NoError(t, err)
|
||||
_, _, err = c.SubmitBlindedBlock(ctx, sbbb)
|
||||
require.ErrorContains(t, "not a bellatrix payload", err)
|
||||
require.ErrorIs(t, err, errResponseVersionMismatch)
|
||||
})
|
||||
t.Run("not blinded", func(t *testing.T) {
|
||||
sbb, err := blocks.NewSignedBeaconBlock(ð.SignedBeaconBlockBellatrix{Block: ð.BeaconBlockBellatrix{Body: ð.BeaconBlockBodyBellatrix{ExecutionPayload: &v1.ExecutionPayload{}}}})
|
||||
|
||||
@@ -9,11 +9,15 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var errInvalidUint256 = errors.New("invalid Uint256")
|
||||
@@ -44,6 +48,9 @@ func sszBytesToUint256(b []byte) (Uint256, error) {
|
||||
|
||||
// SSZBytes creates an ssz-style (little-endian byte slice) representation of the Uint256.
|
||||
func (s Uint256) SSZBytes() []byte {
|
||||
if s.Int == nil {
|
||||
s.Int = big.NewInt(0)
|
||||
}
|
||||
if !math.IsValidUint256(s.Int) {
|
||||
return []byte{}
|
||||
}
|
||||
@@ -91,6 +98,9 @@ func (s Uint256) MarshalJSON() ([]byte, error) {
|
||||
|
||||
// MarshalText returns a text byte representation of Uint256.
|
||||
func (s Uint256) MarshalText() ([]byte, error) {
|
||||
if s.Int == nil {
|
||||
s.Int = big.NewInt(0)
|
||||
}
|
||||
if !math.IsValidUint256(s.Int) {
|
||||
return nil, errors.Wrapf(errInvalidUint256, "value=%s", s.Int)
|
||||
}
|
||||
@@ -146,6 +156,8 @@ func (bb *BuilderBid) ToProto() (*eth.BuilderBid, error) {
|
||||
}
|
||||
return ð.BuilderBid{
|
||||
Header: header,
|
||||
// Note that SSZBytes() reverses byte order for the little-endian representation.
|
||||
// Uint256.Bytes() is big-endian, SSZBytes takes this value and reverses it.
|
||||
Value: bb.Value.SSZBytes(),
|
||||
Pubkey: bb.Pubkey,
|
||||
}, nil
|
||||
@@ -265,6 +277,11 @@ func (r *ExecPayloadResponse) ToProto() (*v1.ExecutionPayload, error) {
|
||||
return r.Data.ToProto()
|
||||
}
|
||||
|
||||
func (r *ExecutionPayload) PayloadProto() (proto.Message, error) {
|
||||
pb, err := r.ToProto()
|
||||
return pb, err
|
||||
}
|
||||
|
||||
// ToProto returns a ExecutionPayload Proto
|
||||
func (p *ExecutionPayload) ToProto() (*v1.ExecutionPayload, error) {
|
||||
txs := make([][]byte, len(p.Transactions))
|
||||
@@ -396,6 +413,51 @@ func FromProtoDeneb(payload *v1.ExecutionPayloadDeneb) (ExecutionPayloadDeneb, e
|
||||
}, nil
|
||||
}
|
||||
|
||||
var errInvalidTypeConversion = errors.New("unable to translate between api and foreign type")
|
||||
|
||||
// ExecutionPayloadResponseFromData converts an ExecutionData interface value to a payload response.
|
||||
// This involves serializing the execution payload value so that the abstract payload envelope can be used.
|
||||
func ExecutionPayloadResponseFromData(ed interfaces.ExecutionData, bundle *v1.BlobsBundle) (*ExecutionPayloadResponse, error) {
|
||||
pb := ed.Proto()
|
||||
var data interface{}
|
||||
var err error
|
||||
var ver string
|
||||
switch pbStruct := pb.(type) {
|
||||
case *v1.ExecutionPayload:
|
||||
ver = version.String(version.Bellatrix)
|
||||
data, err = FromProto(pbStruct)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert a Bellatrix ExecutionPayload to an API response")
|
||||
}
|
||||
case *v1.ExecutionPayloadCapella:
|
||||
ver = version.String(version.Capella)
|
||||
data, err = FromProtoCapella(pbStruct)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert a Capella ExecutionPayload to an API response")
|
||||
}
|
||||
case *v1.ExecutionPayloadDeneb:
|
||||
ver = version.String(version.Deneb)
|
||||
payloadStruct, err := FromProtoDeneb(pbStruct)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert a Deneb ExecutionPayload to an API response")
|
||||
}
|
||||
data = &ExecutionPayloadDenebAndBlobsBundle{
|
||||
ExecutionPayload: &payloadStruct,
|
||||
BlobsBundle: FromBundleProto(bundle),
|
||||
}
|
||||
default:
|
||||
return nil, errInvalidTypeConversion
|
||||
}
|
||||
encoded, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to marshal execution payload version=%s", ver)
|
||||
}
|
||||
return &ExecutionPayloadResponse{
|
||||
Version: ver,
|
||||
Data: encoded,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ExecHeaderResponseCapella is the response of builder API /eth/v1/builder/header/{slot}/{parent_hash}/{pubkey} for Capella.
|
||||
type ExecHeaderResponseCapella struct {
|
||||
Data struct {
|
||||
@@ -424,6 +486,8 @@ func (bb *BuilderBidCapella) ToProto() (*eth.BuilderBidCapella, error) {
|
||||
}
|
||||
return ð.BuilderBidCapella{
|
||||
Header: header,
|
||||
// Note that SSZBytes() reverses byte order for the little-endian representation.
|
||||
// Uint256.Bytes() is big-endian, SSZBytes takes this value and reverses it.
|
||||
Value: bytesutil.SafeCopyBytes(bb.Value.SSZBytes()),
|
||||
Pubkey: bytesutil.SafeCopyBytes(bb.Pubkey),
|
||||
}, nil
|
||||
@@ -523,6 +587,42 @@ type ExecPayloadResponseCapella struct {
|
||||
Data ExecutionPayloadCapella `json:"data"`
|
||||
}
|
||||
|
||||
// ExecutionPayloadResponse allows for unmarshaling just the Version field of the payload.
|
||||
// This allows it to return different ExecutionPayload types based on the version field.
|
||||
type ExecutionPayloadResponse struct {
|
||||
Version string `json:"version"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
}
|
||||
|
||||
// ParsedPayload can retrieve the underlying protobuf message for the given execution payload response.
|
||||
type ParsedPayload interface {
|
||||
PayloadProto() (proto.Message, error)
|
||||
}
|
||||
|
||||
// BlobBundler can retrieve the underlying blob bundle protobuf message for the given execution payload response.
|
||||
type BlobBundler interface {
|
||||
BundleProto() (*v1.BlobsBundle, error)
|
||||
}
|
||||
|
||||
func (r *ExecutionPayloadResponse) ParsePayload() (ParsedPayload, error) {
|
||||
var toProto ParsedPayload
|
||||
switch r.Version {
|
||||
case version.String(version.Bellatrix):
|
||||
toProto = &ExecutionPayload{}
|
||||
case version.String(version.Capella):
|
||||
toProto = &ExecutionPayloadCapella{}
|
||||
case version.String(version.Deneb):
|
||||
toProto = &ExecutionPayloadDenebAndBlobsBundle{}
|
||||
default:
|
||||
return nil, consensusblocks.ErrUnsupportedVersion
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(r.Data, toProto); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal the response .Data field with the stated version schema")
|
||||
}
|
||||
return toProto, nil
|
||||
}
|
||||
|
||||
// ExecutionPayloadCapella is a field of ExecPayloadResponseCapella.
|
||||
type ExecutionPayloadCapella struct {
|
||||
ParentHash hexutil.Bytes `json:"parent_hash"`
|
||||
@@ -547,6 +647,11 @@ func (r *ExecPayloadResponseCapella) ToProto() (*v1.ExecutionPayloadCapella, err
|
||||
return r.Data.ToProto()
|
||||
}
|
||||
|
||||
func (p *ExecutionPayloadCapella) PayloadProto() (proto.Message, error) {
|
||||
pb, err := p.ToProto()
|
||||
return pb, err
|
||||
}
|
||||
|
||||
// ToProto returns a ExecutionPayloadCapella Proto.
|
||||
func (p *ExecutionPayloadCapella) ToProto() (*v1.ExecutionPayloadCapella, error) {
|
||||
txs := make([][]byte, len(p.Transactions))
|
||||
@@ -921,8 +1026,10 @@ func (bb *BuilderBidDeneb) ToProto() (*eth.BuilderBidDeneb, error) {
|
||||
return ð.BuilderBidDeneb{
|
||||
Header: header,
|
||||
BlobKzgCommitments: kzgCommitments,
|
||||
Value: bytesutil.SafeCopyBytes(bb.Value.SSZBytes()),
|
||||
Pubkey: bytesutil.SafeCopyBytes(bb.Pubkey),
|
||||
// Note that SSZBytes() reverses byte order for the little-endian representation.
|
||||
// Uint256.Bytes() is big-endian, SSZBytes takes this value and reverses it.
|
||||
Value: bytesutil.SafeCopyBytes(bb.Value.SSZBytes()),
|
||||
Pubkey: bytesutil.SafeCopyBytes(bb.Pubkey),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -1128,6 +1235,12 @@ func (r *ExecPayloadResponseDeneb) ToProto() (*v1.ExecutionPayloadDeneb, *v1.Blo
|
||||
if r.Data == nil {
|
||||
return nil, nil, errors.New("data field in response is empty")
|
||||
}
|
||||
if r.Data.ExecutionPayload == nil {
|
||||
return nil, nil, errors.Wrap(consensusblocks.ErrNilObject, "nil execution payload")
|
||||
}
|
||||
if r.Data.BlobsBundle == nil {
|
||||
return nil, nil, errors.Wrap(consensusblocks.ErrNilObject, "nil blobs bundle")
|
||||
}
|
||||
payload, err := r.Data.ExecutionPayload.ToProto()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -1139,8 +1252,26 @@ func (r *ExecPayloadResponseDeneb) ToProto() (*v1.ExecutionPayloadDeneb, *v1.Blo
|
||||
return payload, bundle, nil
|
||||
}
|
||||
|
||||
func (r *ExecutionPayloadDenebAndBlobsBundle) PayloadProto() (proto.Message, error) {
|
||||
if r.ExecutionPayload == nil {
|
||||
return nil, errors.Wrap(consensusblocks.ErrNilObject, "nil execution payload in combined deneb payload")
|
||||
}
|
||||
pb, err := r.ExecutionPayload.ToProto()
|
||||
return pb, err
|
||||
}
|
||||
|
||||
func (r *ExecutionPayloadDenebAndBlobsBundle) BundleProto() (*v1.BlobsBundle, error) {
|
||||
if r.BlobsBundle == nil {
|
||||
return nil, errors.Wrap(consensusblocks.ErrNilObject, "nil blobs bundle")
|
||||
}
|
||||
return r.BlobsBundle.ToProto()
|
||||
}
|
||||
|
||||
// ToProto returns the ExecutionPayloadDeneb Proto.
|
||||
func (p *ExecutionPayloadDeneb) ToProto() (*v1.ExecutionPayloadDeneb, error) {
|
||||
if p == nil {
|
||||
return nil, errors.Wrap(consensusblocks.ErrNilObject, "nil execution payload")
|
||||
}
|
||||
txs := make([][]byte, len(p.Transactions))
|
||||
for i := range p.Transactions {
|
||||
txs[i] = bytesutil.SafeCopyBytes(p.Transactions[i])
|
||||
|
||||
@@ -12,12 +12,15 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
@@ -1600,7 +1603,6 @@ func TestBuilderBidUnmarshalUint256(t *testing.T) {
|
||||
require.NoError(t, expectedValue.UnmarshalText([]byte(base10)))
|
||||
r := &ExecHeaderResponse{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testBuilderBid), r))
|
||||
//require.Equal(t, expectedValue, r.Data.Message.Value)
|
||||
marshaled := r.Data.Message.Value.String()
|
||||
require.Equal(t, base10, marshaled)
|
||||
require.Equal(t, 0, expectedValue.Cmp(r.Data.Message.Value.Int))
|
||||
@@ -1907,3 +1909,41 @@ func TestErrorMessage_non200Err(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptyResponseBody(t *testing.T) {
|
||||
t.Run("empty buffer", func(t *testing.T) {
|
||||
var b []byte
|
||||
r := &ExecutionPayloadResponse{}
|
||||
err := json.Unmarshal(b, r)
|
||||
var syntaxError *json.SyntaxError
|
||||
ok := errors.As(err, &syntaxError)
|
||||
require.Equal(t, true, ok)
|
||||
})
|
||||
t.Run("empty object", func(t *testing.T) {
|
||||
empty := []byte("{}")
|
||||
emptyResponse := &ExecutionPayloadResponse{}
|
||||
require.NoError(t, json.Unmarshal(empty, emptyResponse))
|
||||
_, err := emptyResponse.ParsePayload()
|
||||
require.ErrorIs(t, err, consensusblocks.ErrUnsupportedVersion)
|
||||
})
|
||||
versions := []int{version.Bellatrix, version.Capella, version.Deneb}
|
||||
for i := range versions {
|
||||
vstr := version.String(versions[i])
|
||||
t.Run("populated version without payload"+vstr, func(t *testing.T) {
|
||||
in := &ExecutionPayloadResponse{Version: vstr}
|
||||
encoded, err := json.Marshal(in)
|
||||
require.NoError(t, err)
|
||||
epr := &ExecutionPayloadResponse{}
|
||||
require.NoError(t, json.Unmarshal(encoded, epr))
|
||||
pp, err := epr.ParsePayload()
|
||||
require.NoError(t, err)
|
||||
pb, err := pp.PayloadProto()
|
||||
if err == nil {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, pb == nil)
|
||||
} else {
|
||||
require.ErrorIs(t, err, consensusblocks.ErrNilObject)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,6 +21,9 @@ var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
|
||||
// ErrInvalidNodeVersion indicates that the /eth/v1/node/version API response format was not recognized.
|
||||
var ErrInvalidNodeVersion = errors.New("invalid node version response")
|
||||
|
||||
// ErrConnectionIssue represents a connection problem.
|
||||
var ErrConnectionIssue = errors.New("could not connect")
|
||||
|
||||
// Non200Err is a function that parses an HTTP response to handle responses that are not 200 with a formatted error.
|
||||
func Non200Err(response *http.Response) error {
|
||||
bodyBytes, err := io.ReadAll(response.Body)
|
||||
|
||||
24
api/client/event/BUILD.bazel
Normal file
24
api/client/event/BUILD.bazel
Normal file
@@ -0,0 +1,24 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["event_stream.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/event",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/client:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["event_stream_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
148
api/client/event/event_stream.go
Normal file
148
api/client/event/event_stream.go
Normal file
@@ -0,0 +1,148 @@
|
||||
package event
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
EventHead = "head"
|
||||
EventBlock = "block"
|
||||
EventAttestation = "attestation"
|
||||
EventVoluntaryExit = "voluntary_exit"
|
||||
EventBlsToExecutionChange = "bls_to_execution_change"
|
||||
EventProposerSlashing = "proposer_slashing"
|
||||
EventAttesterSlashing = "attester_slashing"
|
||||
EventFinalizedCheckpoint = "finalized_checkpoint"
|
||||
EventChainReorg = "chain_reorg"
|
||||
EventContributionAndProof = "contribution_and_proof"
|
||||
EventLightClientFinalityUpdate = "light_client_finality_update"
|
||||
EventLightClientOptimisticUpdate = "light_client_optimistic_update"
|
||||
EventPayloadAttributes = "payload_attributes"
|
||||
EventBlobSidecar = "blob_sidecar"
|
||||
EventError = "error"
|
||||
EventConnectionError = "connection_error"
|
||||
)
|
||||
|
||||
var (
|
||||
_ = EventStreamClient(&EventStream{})
|
||||
)
|
||||
|
||||
var DefaultEventTopics = []string{EventHead}
|
||||
|
||||
type EventStreamClient interface {
|
||||
Subscribe(eventsChannel chan<- *Event)
|
||||
}
|
||||
|
||||
type Event struct {
|
||||
EventType string
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// EventStream is responsible for subscribing to the Beacon API events endpoint
|
||||
// and dispatching received events to subscribers.
|
||||
type EventStream struct {
|
||||
ctx context.Context
|
||||
httpClient *http.Client
|
||||
host string
|
||||
topics []string
|
||||
}
|
||||
|
||||
func NewEventStream(ctx context.Context, httpClient *http.Client, host string, topics []string) (*EventStream, error) {
|
||||
// Check if the host is a valid URL
|
||||
_, err := url.ParseRequestURI(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(topics) == 0 {
|
||||
return nil, errors.New("no topics provided")
|
||||
}
|
||||
|
||||
return &EventStream{
|
||||
ctx: ctx,
|
||||
httpClient: httpClient,
|
||||
host: host,
|
||||
topics: topics,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
|
||||
allTopics := strings.Join(h.topics, ",")
|
||||
log.WithField("topics", allTopics).Info("Listening to Beacon API events")
|
||||
fullUrl := h.host + "/eth/v1/events?topics=" + allTopics
|
||||
req, err := http.NewRequestWithContext(h.ctx, http.MethodGet, fullUrl, nil)
|
||||
if err != nil {
|
||||
eventsChannel <- &Event{
|
||||
EventType: EventConnectionError,
|
||||
Data: []byte(errors.Wrap(err, "failed to create HTTP request").Error()),
|
||||
}
|
||||
}
|
||||
req.Header.Set("Accept", api.EventStreamMediaType)
|
||||
req.Header.Set("Connection", api.KeepAlive)
|
||||
resp, err := h.httpClient.Do(req)
|
||||
if err != nil {
|
||||
eventsChannel <- &Event{
|
||||
EventType: EventConnectionError,
|
||||
Data: []byte(errors.Wrap(err, client.ErrConnectionIssue.Error()).Error()),
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if closeErr := resp.Body.Close(); closeErr != nil {
|
||||
log.WithError(closeErr).Error("Failed to close events response body")
|
||||
}
|
||||
}()
|
||||
// Create a new scanner to read lines from the response body
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
|
||||
var eventType, data string // Variables to store event type and data
|
||||
|
||||
// Iterate over lines of the event stream
|
||||
for scanner.Scan() {
|
||||
select {
|
||||
case <-h.ctx.Done():
|
||||
log.Info("Context canceled, stopping event stream")
|
||||
close(eventsChannel)
|
||||
return
|
||||
default:
|
||||
line := scanner.Text() // TODO(13730): scanner does not handle /r and does not fully adhere to https://html.spec.whatwg.org/multipage/server-sent-events.html#the-eventsource-interface
|
||||
// Handle the event based on your specific format
|
||||
if line == "" {
|
||||
// Empty line indicates the end of an event
|
||||
if eventType != "" && data != "" {
|
||||
// Process the event when both eventType and data are set
|
||||
eventsChannel <- &Event{EventType: eventType, Data: []byte(data)}
|
||||
}
|
||||
|
||||
// Reset eventType and data for the next event
|
||||
eventType, data = "", ""
|
||||
continue
|
||||
}
|
||||
et, ok := strings.CutPrefix(line, "event: ")
|
||||
if ok {
|
||||
// Extract event type from the "event" field
|
||||
eventType = et
|
||||
}
|
||||
d, ok := strings.CutPrefix(line, "data: ")
|
||||
if ok {
|
||||
// Extract data from the "data" field
|
||||
data = d
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
eventsChannel <- &Event{
|
||||
EventType: EventConnectionError,
|
||||
Data: []byte(errors.Wrap(err, errors.Wrap(client.ErrConnectionIssue, "scanner failed").Error()).Error()),
|
||||
}
|
||||
}
|
||||
}
|
||||
80
api/client/event/event_stream_test.go
Normal file
80
api/client/event/event_stream_test.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package event
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func TestNewEventStream(t *testing.T) {
|
||||
validURL := "http://localhost:8080"
|
||||
invalidURL := "://invalid"
|
||||
topics := []string{"topic1", "topic2"}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
host string
|
||||
topics []string
|
||||
wantErr bool
|
||||
}{
|
||||
{"Valid input", validURL, topics, false},
|
||||
{"Invalid URL", invalidURL, topics, true},
|
||||
{"No topics", validURL, []string{}, true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := NewEventStream(context.Background(), &http.Client{}, tt.host, tt.topics)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("NewEventStream() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEventStream(t *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/eth/v1/events", func(w http.ResponseWriter, r *http.Request) {
|
||||
flusher, ok := w.(http.Flusher)
|
||||
require.Equal(t, true, ok)
|
||||
for i := 1; i <= 2; i++ {
|
||||
_, err := fmt.Fprintf(w, "event: head\ndata: data%d\n\n", i)
|
||||
require.NoError(t, err)
|
||||
flusher.Flush() // Trigger flush to simulate streaming data
|
||||
time.Sleep(100 * time.Millisecond) // Simulate delay between events
|
||||
}
|
||||
})
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
topics := []string{"head"}
|
||||
eventsChannel := make(chan *Event, 1)
|
||||
stream, err := NewEventStream(context.Background(), http.DefaultClient, server.URL, topics)
|
||||
require.NoError(t, err)
|
||||
go stream.Subscribe(eventsChannel)
|
||||
|
||||
// Collect events
|
||||
var events []*Event
|
||||
|
||||
for len(events) != 2 {
|
||||
select {
|
||||
case event := <-eventsChannel:
|
||||
log.Info(event)
|
||||
events = append(events, event)
|
||||
}
|
||||
}
|
||||
|
||||
// Assertions to verify the events content
|
||||
expectedData := []string{"data1", "data2"}
|
||||
for i, event := range events {
|
||||
if string(event.Data) != expectedData[i] {
|
||||
t.Errorf("Expected event data %q, got %q", expectedData[i], string(event.Data))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -4,4 +4,6 @@ const (
|
||||
WebUrlPrefix = "/v2/validator/"
|
||||
WebApiUrlPrefix = "/api/v2/validator/"
|
||||
KeymanagerApiPrefix = "/eth/v1"
|
||||
SystemLogsPrefix = "health/logs"
|
||||
AuthTokenFileName = "auth-token"
|
||||
)
|
||||
|
||||
@@ -14,7 +14,7 @@ go_library(
|
||||
"//validator:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//api/server:go_default_library",
|
||||
"//api/server/middleware:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library",
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"github.com/gorilla/mux"
|
||||
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/middleware"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
@@ -104,7 +104,7 @@ func (g *Gateway) Start() {
|
||||
}
|
||||
}
|
||||
|
||||
corsMux := server.CorsHandler(g.cfg.allowedOrigins).Middleware(g.cfg.router)
|
||||
corsMux := middleware.CorsHandler(g.cfg.allowedOrigins).Middleware(g.cfg.router)
|
||||
|
||||
if g.cfg.muxHandler != nil {
|
||||
g.cfg.router.PathPrefix("/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
32
api/jwt.go
Normal file
32
api/jwt.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/rand"
|
||||
)
|
||||
|
||||
// GenerateRandomHexString generates a random hex string that follows the standards for jwt token
|
||||
// used for beacon node -> execution client
|
||||
// used for web client -> validator client
|
||||
func GenerateRandomHexString() (string, error) {
|
||||
secret := make([]byte, 32)
|
||||
randGen := rand.NewGenerator()
|
||||
n, err := randGen.Read(secret)
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else if n != 32 {
|
||||
return "", errors.New("rand: unexpected length")
|
||||
}
|
||||
return hexutil.Encode(secret), nil
|
||||
}
|
||||
|
||||
// ValidateAuthToken validating auth token for web
|
||||
func ValidateAuthToken(token string) error {
|
||||
b, err := hexutil.Decode(token)
|
||||
// token should be hex-encoded and at least 256 bits
|
||||
if err != nil || len(b) < 32 {
|
||||
return errors.New("invalid auth token: token should be hex-encoded and at least 256 bits")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
13
api/jwt_test.go
Normal file
13
api/jwt_test.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestGenerateRandomHexString(t *testing.T) {
|
||||
token, err := GenerateRandomHexString()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, ValidateAuthToken(token))
|
||||
}
|
||||
@@ -2,29 +2,14 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"error.go",
|
||||
"middleware.go",
|
||||
"util.go",
|
||||
],
|
||||
srcs = ["error.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/server",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_rs_cors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"error_test.go",
|
||||
"middleware_test.go",
|
||||
"util_test.go",
|
||||
],
|
||||
srcs = ["error_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
deps = ["//testing/assert:go_default_library"],
|
||||
)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
@@ -15,7 +16,8 @@ type DecodeError struct {
|
||||
// NewDecodeError wraps an error (either the initial decoding error or another DecodeError).
|
||||
// The current field that failed decoding must be passed in.
|
||||
func NewDecodeError(err error, field string) *DecodeError {
|
||||
de, ok := err.(*DecodeError)
|
||||
var de *DecodeError
|
||||
ok := errors.As(err, &de)
|
||||
if ok {
|
||||
return &DecodeError{path: append([]string{field}, de.path...), err: de.err}
|
||||
}
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/rs/cors"
|
||||
)
|
||||
|
||||
// NormalizeQueryValuesHandler normalizes an input query of "key=value1,value2,value3" to "key=value1&key=value2&key=value3"
|
||||
func NormalizeQueryValuesHandler(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
query := r.URL.Query()
|
||||
NormalizeQueryValues(query)
|
||||
r.URL.RawQuery = query.Encode()
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// CorsHandler sets the cors settings on api endpoints
|
||||
func CorsHandler(allowOrigins []string) mux.MiddlewareFunc {
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: allowOrigins,
|
||||
AllowedMethods: []string{http.MethodPost, http.MethodGet, http.MethodDelete, http.MethodOptions},
|
||||
AllowCredentials: true,
|
||||
MaxAge: 600,
|
||||
AllowedHeaders: []string{"*"},
|
||||
})
|
||||
|
||||
return c.Handler
|
||||
}
|
||||
29
api/server/middleware/BUILD.bazel
Normal file
29
api/server/middleware/BUILD.bazel
Normal file
@@ -0,0 +1,29 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"middleware.go",
|
||||
"util.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/server/middleware",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_rs_cors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"middleware_test.go",
|
||||
"util_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
)
|
||||
112
api/server/middleware/middleware.go
Normal file
112
api/server/middleware/middleware.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/rs/cors"
|
||||
)
|
||||
|
||||
// NormalizeQueryValuesHandler normalizes an input query of "key=value1,value2,value3" to "key=value1&key=value2&key=value3"
|
||||
func NormalizeQueryValuesHandler(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
query := r.URL.Query()
|
||||
NormalizeQueryValues(query)
|
||||
r.URL.RawQuery = query.Encode()
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// CorsHandler sets the cors settings on api endpoints
|
||||
func CorsHandler(allowOrigins []string) mux.MiddlewareFunc {
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: allowOrigins,
|
||||
AllowedMethods: []string{http.MethodPost, http.MethodGet, http.MethodDelete, http.MethodOptions},
|
||||
AllowCredentials: true,
|
||||
MaxAge: 600,
|
||||
AllowedHeaders: []string{"*"},
|
||||
})
|
||||
|
||||
return c.Handler
|
||||
}
|
||||
|
||||
// ContentTypeHandler checks request for the appropriate media types otherwise returning a http.StatusUnsupportedMediaType error
|
||||
func ContentTypeHandler(acceptedMediaTypes []string) mux.MiddlewareFunc {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// skip the GET request
|
||||
if r.Method == http.MethodGet {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
if contentType == "" {
|
||||
http.Error(w, "Content-Type header is missing", http.StatusUnsupportedMediaType)
|
||||
return
|
||||
}
|
||||
|
||||
accepted := false
|
||||
for _, acceptedType := range acceptedMediaTypes {
|
||||
if strings.Contains(strings.TrimSpace(contentType), strings.TrimSpace(acceptedType)) {
|
||||
accepted = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !accepted {
|
||||
http.Error(w, fmt.Sprintf("Unsupported media type: %s", contentType), http.StatusUnsupportedMediaType)
|
||||
return
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// AcceptHeaderHandler checks if the client's response preference is handled
|
||||
func AcceptHeaderHandler(serverAcceptedTypes []string) mux.MiddlewareFunc {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
acceptHeader := r.Header.Get("Accept")
|
||||
// header is optional and should skip if not provided
|
||||
if acceptHeader == "" {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
accepted := false
|
||||
acceptTypes := strings.Split(acceptHeader, ",")
|
||||
// follows rules defined in https://datatracker.ietf.org/doc/html/rfc2616#section-14.1
|
||||
for _, acceptType := range acceptTypes {
|
||||
acceptType = strings.TrimSpace(acceptType)
|
||||
if acceptType == "*/*" {
|
||||
accepted = true
|
||||
break
|
||||
}
|
||||
for _, serverAcceptedType := range serverAcceptedTypes {
|
||||
if strings.HasPrefix(acceptType, serverAcceptedType) {
|
||||
accepted = true
|
||||
break
|
||||
}
|
||||
if acceptType != "/*" && strings.HasSuffix(acceptType, "/*") && strings.HasPrefix(serverAcceptedType, acceptType[:len(acceptType)-2]) {
|
||||
accepted = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if accepted {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !accepted {
|
||||
http.Error(w, fmt.Sprintf("Not Acceptable: %s", acceptHeader), http.StatusNotAcceptable)
|
||||
return
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
204
api/server/middleware/middleware_test.go
Normal file
204
api/server/middleware/middleware_test.go
Normal file
@@ -0,0 +1,204 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestNormalizeQueryValuesHandler(t *testing.T) {
|
||||
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, err := w.Write([]byte("next handler"))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
handler := NormalizeQueryValuesHandler(nextHandler)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
inputQuery string
|
||||
expectedQuery string
|
||||
}{
|
||||
{
|
||||
name: "3 values",
|
||||
inputQuery: "key=value1,value2,value3",
|
||||
expectedQuery: "key=value1&key=value2&key=value3", // replace with expected normalized value
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
req, err := http.NewRequest("GET", "/test?"+test.inputQuery, http.NoBody)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusOK {
|
||||
t.Errorf("handler returned wrong status code: got %v want %v", rr.Code, http.StatusOK)
|
||||
}
|
||||
|
||||
if req.URL.RawQuery != test.expectedQuery {
|
||||
t.Errorf("query not normalized: got %v want %v", req.URL.RawQuery, test.expectedQuery)
|
||||
}
|
||||
|
||||
if rr.Body.String() != "next handler" {
|
||||
t.Errorf("next handler was not executed")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestContentTypeHandler(t *testing.T) {
|
||||
acceptedMediaTypes := []string{api.JsonMediaType, api.OctetStreamMediaType}
|
||||
|
||||
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, err := w.Write([]byte("next handler"))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
handler := ContentTypeHandler(acceptedMediaTypes)(nextHandler)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
contentType string
|
||||
expectedStatusCode int
|
||||
isGet bool
|
||||
}{
|
||||
{
|
||||
name: "Accepted Content-Type - application/json",
|
||||
contentType: api.JsonMediaType,
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
name: "Accepted Content-Type - ssz format",
|
||||
contentType: api.OctetStreamMediaType,
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
name: "Unsupported Content-Type - text/plain",
|
||||
contentType: "text/plain",
|
||||
expectedStatusCode: http.StatusUnsupportedMediaType,
|
||||
},
|
||||
{
|
||||
name: "Missing Content-Type",
|
||||
contentType: "",
|
||||
expectedStatusCode: http.StatusUnsupportedMediaType,
|
||||
},
|
||||
{
|
||||
name: "GET request skips content type check",
|
||||
contentType: "",
|
||||
expectedStatusCode: http.StatusOK,
|
||||
isGet: true,
|
||||
},
|
||||
{
|
||||
name: "Content type contains charset is ok",
|
||||
contentType: "application/json; charset=utf-8",
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
httpMethod := http.MethodPost
|
||||
if tt.isGet {
|
||||
httpMethod = http.MethodGet
|
||||
}
|
||||
req := httptest.NewRequest(httpMethod, "/", nil)
|
||||
if tt.contentType != "" {
|
||||
req.Header.Set("Content-Type", tt.contentType)
|
||||
}
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if status := rr.Code; status != tt.expectedStatusCode {
|
||||
t.Errorf("handler returned wrong status code: got %v want %v", status, tt.expectedStatusCode)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAcceptHeaderHandler(t *testing.T) {
|
||||
acceptedTypes := []string{"application/json", "application/octet-stream"}
|
||||
|
||||
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, err := w.Write([]byte("next handler"))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
handler := AcceptHeaderHandler(acceptedTypes)(nextHandler)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
acceptHeader string
|
||||
expectedStatusCode int
|
||||
}{
|
||||
{
|
||||
name: "Accepted Accept-Type - application/json",
|
||||
acceptHeader: "application/json",
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
name: "Accepted Accept-Type - application/octet-stream",
|
||||
acceptHeader: "application/octet-stream",
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
name: "Accepted Accept-Type with parameters",
|
||||
acceptHeader: "application/json;q=0.9, application/octet-stream;q=0.8",
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
name: "Unsupported Accept-Type - text/plain",
|
||||
acceptHeader: "text/plain",
|
||||
expectedStatusCode: http.StatusNotAcceptable,
|
||||
},
|
||||
{
|
||||
name: "Missing Accept header",
|
||||
acceptHeader: "",
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
name: "*/* is accepted",
|
||||
acceptHeader: "*/*",
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
name: "application/* is accepted",
|
||||
acceptHeader: "application/*",
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
name: "/* is unsupported",
|
||||
acceptHeader: "/*",
|
||||
expectedStatusCode: http.StatusNotAcceptable,
|
||||
},
|
||||
{
|
||||
name: "application/ is unsupported",
|
||||
acceptHeader: "application/",
|
||||
expectedStatusCode: http.StatusNotAcceptable,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
req := httptest.NewRequest("GET", "/", nil)
|
||||
if tt.acceptHeader != "" {
|
||||
req.Header.Set("Accept", tt.acceptHeader)
|
||||
}
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if status := rr.Code; status != tt.expectedStatusCode {
|
||||
t.Errorf("handler returned wrong status code: got %v want %v", status, tt.expectedStatusCode)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package server
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
@@ -1,4 +1,4 @@
|
||||
package server
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"testing"
|
||||
@@ -1,54 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestNormalizeQueryValuesHandler(t *testing.T) {
|
||||
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, err := w.Write([]byte("next handler"))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
handler := NormalizeQueryValuesHandler(nextHandler)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
inputQuery string
|
||||
expectedQuery string
|
||||
}{
|
||||
{
|
||||
name: "3 values",
|
||||
inputQuery: "key=value1,value2,value3",
|
||||
expectedQuery: "key=value1&key=value2&key=value3", // replace with expected normalized value
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
req, err := http.NewRequest("GET", "/test?"+test.inputQuery, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusOK {
|
||||
t.Errorf("handler returned wrong status code: got %v want %v", rr.Code, http.StatusOK)
|
||||
}
|
||||
|
||||
if req.URL.RawQuery != test.expectedQuery {
|
||||
t.Errorf("query not normalized: got %v want %v", req.URL.RawQuery, test.expectedQuery)
|
||||
}
|
||||
|
||||
if rr.Body.String() != "next handler" {
|
||||
t.Errorf("next handler was not executed")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -26,6 +26,7 @@ go_library(
|
||||
"//api/server:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/validator:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
|
||||
@@ -1,10 +1,34 @@
|
||||
package structs
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
// MessageJsoner describes a signed consensus type wrapper that can return the `.Message` field in a json envelope
|
||||
// encoded as a []byte, for use as a json.RawMessage value when encoding the outer envelope.
|
||||
type MessageJsoner interface {
|
||||
MessageRawJson() ([]byte, error)
|
||||
}
|
||||
|
||||
// SignedMessageJsoner embeds MessageJsoner and adds a method to also retrieve the Signature field as a string.
|
||||
type SignedMessageJsoner interface {
|
||||
MessageJsoner
|
||||
SigString() string
|
||||
}
|
||||
|
||||
type SignedBeaconBlock struct {
|
||||
Message *BeaconBlock `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBeaconBlock{}
|
||||
|
||||
func (s *SignedBeaconBlock) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBeaconBlock) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BeaconBlock struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
@@ -29,6 +53,16 @@ type SignedBeaconBlockAltair struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBeaconBlockAltair{}
|
||||
|
||||
func (s *SignedBeaconBlockAltair) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBeaconBlockAltair) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BeaconBlockAltair struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
@@ -54,6 +88,16 @@ type SignedBeaconBlockBellatrix struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBeaconBlockBellatrix{}
|
||||
|
||||
func (s *SignedBeaconBlockBellatrix) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBeaconBlockBellatrix) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BeaconBlockBellatrix struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
@@ -80,6 +124,16 @@ type SignedBlindedBeaconBlockBellatrix struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBlindedBeaconBlockBellatrix{}
|
||||
|
||||
func (s *SignedBlindedBeaconBlockBellatrix) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBlindedBeaconBlockBellatrix) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBellatrix struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
@@ -106,6 +160,16 @@ type SignedBeaconBlockCapella struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBeaconBlockCapella{}
|
||||
|
||||
func (s *SignedBeaconBlockCapella) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBeaconBlockCapella) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BeaconBlockCapella struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
@@ -133,6 +197,16 @@ type SignedBlindedBeaconBlockCapella struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBlindedBeaconBlockCapella{}
|
||||
|
||||
func (s *SignedBlindedBeaconBlockCapella) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBlindedBeaconBlockCapella) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockCapella struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
@@ -172,6 +246,16 @@ type SignedBeaconBlockDeneb struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBeaconBlockDeneb{}
|
||||
|
||||
func (s *SignedBeaconBlockDeneb) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBeaconBlockDeneb) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BeaconBlockDeneb struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
@@ -208,6 +292,16 @@ type SignedBlindedBeaconBlockDeneb struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBlindedBeaconBlockDeneb{}
|
||||
|
||||
func (s *SignedBlindedBeaconBlockDeneb) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBlindedBeaconBlockDeneb) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBodyDeneb struct {
|
||||
RandaoReveal string `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
|
||||
@@ -8,11 +8,10 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/validator"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/validator"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
|
||||
@@ -6,8 +6,10 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -15,6 +17,8 @@ import (
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
var ErrUnsupportedConversion = errors.New("Could not determine api struct type to use for value")
|
||||
|
||||
func (h *SignedBeaconBlockHeader) ToConsensus() (*eth.SignedBeaconBlockHeader, error) {
|
||||
msg, err := h.Message.ToConsensus()
|
||||
if err != nil {
|
||||
@@ -1852,7 +1856,34 @@ func BeaconBlockFromConsensus(b *eth.BeaconBlock) *BeaconBlock {
|
||||
}
|
||||
}
|
||||
|
||||
func SignedBeaconBlockFromConsensus(b *eth.SignedBeaconBlock) *SignedBeaconBlock {
|
||||
func SignedBeaconBlockMessageJsoner(block interfaces.ReadOnlySignedBeaconBlock) (SignedMessageJsoner, error) {
|
||||
pb, err := block.Proto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch pbStruct := pb.(type) {
|
||||
case *eth.SignedBeaconBlock:
|
||||
return SignedBeaconBlockPhase0FromConsensus(pbStruct), nil
|
||||
case *eth.SignedBeaconBlockAltair:
|
||||
return SignedBeaconBlockAltairFromConsensus(pbStruct), nil
|
||||
case *eth.SignedBlindedBeaconBlockBellatrix:
|
||||
return SignedBlindedBeaconBlockBellatrixFromConsensus(pbStruct)
|
||||
case *eth.SignedBeaconBlockBellatrix:
|
||||
return SignedBeaconBlockBellatrixFromConsensus(pbStruct)
|
||||
case *eth.SignedBlindedBeaconBlockCapella:
|
||||
return SignedBlindedBeaconBlockCapellaFromConsensus(pbStruct)
|
||||
case *eth.SignedBeaconBlockCapella:
|
||||
return SignedBeaconBlockCapellaFromConsensus(pbStruct)
|
||||
case *eth.SignedBlindedBeaconBlockDeneb:
|
||||
return SignedBlindedBeaconBlockDenebFromConsensus(pbStruct)
|
||||
case *eth.SignedBeaconBlockDeneb:
|
||||
return SignedBeaconBlockDenebFromConsensus(pbStruct)
|
||||
default:
|
||||
return nil, ErrUnsupportedConversion
|
||||
}
|
||||
}
|
||||
|
||||
func SignedBeaconBlockPhase0FromConsensus(b *eth.SignedBeaconBlock) *SignedBeaconBlock {
|
||||
return &SignedBeaconBlock{
|
||||
Message: BeaconBlockFromConsensus(b.Block),
|
||||
Signature: hexutil.Encode(b.Signature),
|
||||
|
||||
@@ -88,7 +88,7 @@ func TestToggle(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestToogleMultipleTimes(t *testing.T) {
|
||||
func TestToggleMultipleTimes(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
v := New()
|
||||
@@ -101,16 +101,16 @@ func TestToogleMultipleTimes(t *testing.T) {
|
||||
|
||||
expected := i%2 != 0
|
||||
if v.IsSet() != expected {
|
||||
t.Fatalf("AtomicBool.Toogle() doesn't work after %d calls, expected: %v, got %v", i, expected, v.IsSet())
|
||||
t.Fatalf("AtomicBool.Toggle() doesn't work after %d calls, expected: %v, got %v", i, expected, v.IsSet())
|
||||
}
|
||||
|
||||
if pre == v.IsSet() {
|
||||
t.Fatalf("AtomicBool.Toogle() returned wrong value at the %dth calls, expected: %v, got %v", i, !v.IsSet(), pre)
|
||||
t.Fatalf("AtomicBool.Toggle() returned wrong value at the %dth calls, expected: %v, got %v", i, !v.IsSet(), pre)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestToogleAfterOverflow(t *testing.T) {
|
||||
func TestToggleAfterOverflow(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var value int32 = math.MaxInt32
|
||||
@@ -122,7 +122,7 @@ func TestToogleAfterOverflow(t *testing.T) {
|
||||
v.Toggle()
|
||||
expected := math.MaxInt32%2 == 0
|
||||
if v.IsSet() != expected {
|
||||
t.Fatalf("AtomicBool.Toogle() doesn't work after overflow, expected: %v, got %v", expected, v.IsSet())
|
||||
t.Fatalf("AtomicBool.Toggle() doesn't work after overflow, expected: %v, got %v", expected, v.IsSet())
|
||||
}
|
||||
|
||||
// make sure overflow happened
|
||||
@@ -135,7 +135,7 @@ func TestToogleAfterOverflow(t *testing.T) {
|
||||
v.Toggle()
|
||||
expected = !expected
|
||||
if v.IsSet() != expected {
|
||||
t.Fatalf("AtomicBool.Toogle() doesn't work after the second call after overflow, expected: %v, got %v", expected, v.IsSet())
|
||||
t.Fatalf("AtomicBool.Toggle() doesn't work after the second call after overflow, expected: %v, got %v", expected, v.IsSet())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ package event
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sync"
|
||||
)
|
||||
|
||||
@@ -219,12 +220,9 @@ type caseList []reflect.SelectCase
|
||||
|
||||
// find returns the index of a case containing the given channel.
|
||||
func (cs caseList) find(channel interface{}) int {
|
||||
for i, cas := range cs {
|
||||
if cas.Chan.Interface() == channel {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
return slices.IndexFunc(cs, func(selectCase reflect.SelectCase) bool {
|
||||
return selectCase.Chan.Interface() == channel
|
||||
})
|
||||
}
|
||||
|
||||
// delete removes the given case from cs.
|
||||
|
||||
@@ -63,7 +63,7 @@ func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (interface{}, err
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// calculateChunkSize calculates a suitable chunk size for the purposes of parallelisation.
|
||||
// calculateChunkSize calculates a suitable chunk size for the purposes of parallelization.
|
||||
func calculateChunkSize(items int) int {
|
||||
// Start with a simple even split
|
||||
chunkSize := items / runtime.GOMAXPROCS(0)
|
||||
|
||||
2
bazel.sh
2
bazel.sh
@@ -2,7 +2,7 @@
|
||||
|
||||
# This script serves as a wrapper around bazel to limit the scope of environment variables that
|
||||
# may change the action output. Using this script should result in a higher cache hit ratio for
|
||||
# cached actions with a more heremtic build.
|
||||
# cached actions with a more hermetic build.
|
||||
|
||||
env -i \
|
||||
PATH=/usr/bin:/bin \
|
||||
|
||||
@@ -26,6 +26,7 @@ go_library(
|
||||
"receive_attestation.go",
|
||||
"receive_blob.go",
|
||||
"receive_block.go",
|
||||
"receive_data_column.go",
|
||||
"service.go",
|
||||
"tracked_proposer.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
@@ -48,6 +49,7 @@ go_library(
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -64,6 +66,7 @@ go_library(
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/slasher/types:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
@@ -137,7 +140,8 @@ go_test(
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
@@ -157,6 +161,7 @@ go_test(
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
@@ -181,6 +186,7 @@ go_test(
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -6,12 +6,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
f "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -21,6 +18,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ChainInfoFetcher defines a common interface for methods in blockchain service which
|
||||
@@ -399,14 +397,6 @@ func (s *Service) InForkchoice(root [32]byte) bool {
|
||||
return s.cfg.ForkChoiceStore.HasNode(root)
|
||||
}
|
||||
|
||||
// IsViableForCheckpoint returns whether the given checkpoint is a checkpoint in any
|
||||
// chain known to forkchoice
|
||||
func (s *Service) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.IsViableForCheckpoint(cp)
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot takes the root as argument instead of the current head
|
||||
// and returns true if it is optimistic.
|
||||
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
|
||||
@@ -518,13 +508,6 @@ func (s *Service) Ancestor(ctx context.Context, root []byte, slot primitives.Slo
|
||||
return ar[:], nil
|
||||
}
|
||||
|
||||
// SetOptimisticToInvalid wraps the corresponding method in forkchoice
|
||||
func (s *Service) SetOptimisticToInvalid(ctx context.Context, root, parent, lvh [32]byte) ([][32]byte, error) {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parent, lvh)
|
||||
}
|
||||
|
||||
// SetGenesisTime sets the genesis time of beacon chain.
|
||||
func (s *Service) SetGenesisTime(t time.Time) {
|
||||
s.genesisTime = t
|
||||
@@ -563,3 +546,9 @@ func (s *Service) RecentBlockSlot(root [32]byte) (primitives.Slot, error) {
|
||||
func (s *Service) inRegularSync() bool {
|
||||
return s.cfg.SyncChecker.Synced()
|
||||
}
|
||||
|
||||
// validating returns true if the beacon is tracking some validators that have
|
||||
// registered for proposing.
|
||||
func (s *Service) validating() bool {
|
||||
return s.cfg.TrackedValidatorsCache.Validating()
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@ var (
|
||||
)
|
||||
|
||||
var errMaxBlobsExceeded = errors.New("Expected commitments in block exceeds MAX_BLOBS_PER_BLOCK")
|
||||
var errMaxDataColumnsExceeded = errors.New("Expected data columns for node exceeds NUMBER_OF_COLUMNS")
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
// The beacon node shall not be accepting nor building blocks that branch off from an invalid block.
|
||||
|
||||
@@ -74,8 +74,8 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
}
|
||||
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, arg.attributes)
|
||||
if err != nil {
|
||||
switch err {
|
||||
case execution.ErrAcceptedSyncingPayloadStatus:
|
||||
switch {
|
||||
case errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus):
|
||||
forkchoiceUpdatedOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"headSlot": headBlk.Slot(),
|
||||
@@ -83,7 +83,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
|
||||
}).Info("Called fork choice updated with optimistic block")
|
||||
return payloadID, nil
|
||||
case execution.ErrInvalidPayloadStatus:
|
||||
case errors.Is(err, execution.ErrInvalidPayloadStatus):
|
||||
forkchoiceUpdatedInvalidNodeCount.Inc()
|
||||
headRoot := arg.headRoot
|
||||
if len(lastValidHash) == 0 {
|
||||
@@ -139,7 +139,6 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
"newHeadRoot": fmt.Sprintf("%#x", bytesutil.Trunc(r[:])),
|
||||
}).Warn("Pruned invalid blocks")
|
||||
return pid, invalidBlock{error: ErrInvalidPayload, root: arg.headRoot, invalidAncestorRoots: invalidRoots}
|
||||
|
||||
default:
|
||||
log.WithError(err).Error(ErrUndefinedExecutionEngineError)
|
||||
return nil, nil
|
||||
@@ -231,18 +230,18 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
} else {
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, []common.Hash{}, &common.Hash{} /*empty version hashes and root before Deneb*/)
|
||||
}
|
||||
switch err {
|
||||
case nil:
|
||||
switch {
|
||||
case err == nil:
|
||||
newPayloadValidNodeCount.Inc()
|
||||
return true, nil
|
||||
case execution.ErrAcceptedSyncingPayloadStatus:
|
||||
case errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus):
|
||||
newPayloadOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blk.Block().Slot(),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
}).Info("Called new payload with optimistic block")
|
||||
return false, nil
|
||||
case execution.ErrInvalidPayloadStatus:
|
||||
case errors.Is(err, execution.ErrInvalidPayloadStatus):
|
||||
lvh := bytesutil.ToBytes32(lastValidHash)
|
||||
return false, invalidBlock{
|
||||
error: ErrInvalidPayload,
|
||||
@@ -256,7 +255,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
|
||||
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, lvh [32]byte) error {
|
||||
newPayloadInvalidNodeCount.Inc()
|
||||
invalidRoots, err := s.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
|
||||
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -324,8 +323,8 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
|
||||
var attr payloadattribute.Attributer
|
||||
switch st.Version() {
|
||||
case version.Deneb:
|
||||
withdrawals, err := st.ExpectedWithdrawals()
|
||||
case version.Deneb, version.Electra:
|
||||
withdrawals, _, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||
return emptyAttri
|
||||
@@ -342,7 +341,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
return emptyAttri
|
||||
}
|
||||
case version.Capella:
|
||||
withdrawals, err := st.ExpectedWithdrawals()
|
||||
withdrawals, _, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||
return emptyAttri
|
||||
|
||||
@@ -855,41 +855,63 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
|
||||
require.Equal(t, 0, len(a))
|
||||
}
|
||||
|
||||
func Test_GetPayloadAttributeDeneb(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx := tr.ctx
|
||||
func Test_GetPayloadAttributeV3(t *testing.T) {
|
||||
var testCases = []struct {
|
||||
name string
|
||||
st bstate.BeaconState
|
||||
}{
|
||||
{
|
||||
name: "deneb",
|
||||
st: func() bstate.BeaconState {
|
||||
st, _ := util.DeterministicGenesisStateDeneb(t, 1)
|
||||
return st
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "electra",
|
||||
st: func() bstate.BeaconState {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
return st
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
st, _ := util.DeterministicGenesisStateDeneb(t, 1)
|
||||
attr := service.getPayloadAttribute(ctx, st, 0, []byte{})
|
||||
require.Equal(t, true, attr.IsEmpty())
|
||||
for _, test := range testCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
// Cache hit, advance state, no fee recipient
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
|
||||
a, err := attr.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(a))
|
||||
attr := service.getPayloadAttribute(ctx, test.st, 0, []byte{})
|
||||
require.Equal(t, true, attr.IsEmpty())
|
||||
|
||||
// Cache hit, advance state, has fee recipient
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
|
||||
a, err = attr.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(a))
|
||||
// Cache hit, advance state, no fee recipient
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
attr = service.getPayloadAttribute(ctx, test.st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
|
||||
a, err := attr.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(a))
|
||||
|
||||
attrV3, err := attr.PbV3()
|
||||
require.NoError(t, err)
|
||||
hr := service.headRoot()
|
||||
require.Equal(t, hr, [32]byte(attrV3.ParentBeaconBlockRoot))
|
||||
// Cache hit, advance state, has fee recipient
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
attr = service.getPayloadAttribute(ctx, test.st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
|
||||
a, err = attr.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(a))
|
||||
|
||||
attrV3, err := attr.PbV3()
|
||||
require.NoError(t, err)
|
||||
hr := service.headRoot()
|
||||
require.Equal(t, hr, [32]byte(attrV3.ParentBeaconBlockRoot))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
|
||||
@@ -401,7 +401,7 @@ func (s *Service) saveOrphanedOperations(ctx context.Context, orphanedRoot [32]b
|
||||
}
|
||||
for _, a := range orphanedBlk.Block().Body().Attestations() {
|
||||
// if the attestation is one epoch older, it wouldn't been useful to save it.
|
||||
if a.Data.Slot+params.BeaconConfig().SlotsPerEpoch < s.CurrentSlot() {
|
||||
if a.GetData().Slot+params.BeaconConfig().SlotsPerEpoch < s.CurrentSlot() {
|
||||
continue
|
||||
}
|
||||
if helpers.IsAggregated(a) {
|
||||
|
||||
@@ -61,7 +61,7 @@ func TestService_headNextSyncCommitteeIndices(t *testing.T) {
|
||||
indices, err := c.headNextSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
|
||||
require.NoError(t, err)
|
||||
|
||||
// NextSyncCommittee should be be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
|
||||
// NextSyncCommittee should be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
|
||||
require.NotEqual(t, 0, len(indices))
|
||||
}
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot()-1, wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, slots.PrevSlot(wsb.Block().Slot()), wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -238,7 +238,7 @@ func TestRetrieveHead_ReadOnly(t *testing.T) {
|
||||
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, wsb.Block().Slot()-1, wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, slots.PrevSlot(wsb.Block().Slot()), wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -312,14 +312,14 @@ func TestSaveOrphanedAtts(t *testing.T) {
|
||||
|
||||
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
|
||||
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
wantAtts := []*ethpb.Attestation{
|
||||
wantAtts := []ethpb.Att{
|
||||
blk3.Block.Body.Attestations[0],
|
||||
blk2.Block.Body.Attestations[0],
|
||||
blk1.Block.Body.Attestations[0],
|
||||
}
|
||||
atts := service.cfg.AttPool.AggregatedAttestations()
|
||||
sort.Slice(atts, func(i, j int) bool {
|
||||
return atts[i].Data.Slot > atts[j].Data.Slot
|
||||
return atts[i].GetData().Slot > atts[j].GetData().Slot
|
||||
})
|
||||
require.DeepEqual(t, wantAtts, atts)
|
||||
}
|
||||
@@ -389,14 +389,14 @@ func TestSaveOrphanedOps(t *testing.T) {
|
||||
|
||||
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
|
||||
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
wantAtts := []*ethpb.Attestation{
|
||||
wantAtts := []ethpb.Att{
|
||||
blk3.Block.Body.Attestations[0],
|
||||
blk2.Block.Body.Attestations[0],
|
||||
blk1.Block.Body.Attestations[0],
|
||||
}
|
||||
atts := service.cfg.AttPool.AggregatedAttestations()
|
||||
sort.Slice(atts, func(i, j int) bool {
|
||||
return atts[i].Data.Slot > atts[j].Data.Slot
|
||||
return atts[i].GetData().Slot > atts[j].GetData().Slot
|
||||
})
|
||||
require.DeepEqual(t, wantAtts, atts)
|
||||
require.Equal(t, 1, len(service.cfg.SlashingPool.PendingProposerSlashings(ctx, st, false)))
|
||||
@@ -517,14 +517,14 @@ func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
|
||||
|
||||
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
|
||||
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
wantAtts := []*ethpb.Attestation{
|
||||
wantAtts := []ethpb.Att{
|
||||
blk3.Block.Body.Attestations[0],
|
||||
blk2.Block.Body.Attestations[0],
|
||||
blk1.Block.Body.Attestations[0],
|
||||
}
|
||||
atts := service.cfg.AttPool.AggregatedAttestations()
|
||||
sort.Slice(atts, func(i, j int) bool {
|
||||
return atts[i].Data.Slot > atts[j].Data.Slot
|
||||
return atts[i].GetData().Slot > atts[j].GetData().Slot
|
||||
})
|
||||
require.DeepEqual(t, wantAtts, atts)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"kzg.go",
|
||||
"trusted_setup.go",
|
||||
"validation.go",
|
||||
],
|
||||
@@ -12,6 +13,9 @@ go_library(
|
||||
deps = [
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto/kzg4844:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
109
beacon-chain/blockchain/kzg/kzg.go
Normal file
109
beacon-chain/blockchain/kzg/kzg.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package kzg
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
ckzg4844 "github.com/ethereum/c-kzg-4844/bindings/go"
|
||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||
)
|
||||
|
||||
// BytesPerBlob is the number of bytes in a single blob.
|
||||
const BytesPerBlob = ckzg4844.BytesPerBlob
|
||||
|
||||
// Blob represents a serialized chunk of data.
|
||||
type Blob [BytesPerBlob]byte
|
||||
|
||||
// BytesPerCell is the number of bytes in a single cell.
|
||||
const BytesPerCell = ckzg4844.BytesPerCell
|
||||
|
||||
// Cell represents a chunk of an encoded Blob.
|
||||
type Cell [BytesPerCell]byte
|
||||
|
||||
// Commitment represent a KZG commitment to a Blob.
|
||||
type Commitment [48]byte
|
||||
|
||||
// Proof represents a KZG proof that attests to the validity of a Blob or parts of it.
|
||||
type Proof [48]byte
|
||||
|
||||
// Bytes48 is a 48-byte array.
|
||||
type Bytes48 = ckzg4844.Bytes48
|
||||
|
||||
// Bytes32 is a 32-byte array.
|
||||
type Bytes32 = ckzg4844.Bytes32
|
||||
|
||||
// CellsAndProofs represents the Cells and Proofs corresponding to
|
||||
// a single blob.
|
||||
type CellsAndProofs struct {
|
||||
Cells []Cell
|
||||
Proofs []Proof
|
||||
}
|
||||
|
||||
func BlobToKZGCommitment(blob *Blob) (Commitment, error) {
|
||||
comm, err := kzg4844.BlobToCommitment(kzg4844.Blob(*blob))
|
||||
if err != nil {
|
||||
return Commitment{}, err
|
||||
}
|
||||
return Commitment(comm), nil
|
||||
}
|
||||
|
||||
func ComputeBlobKZGProof(blob *Blob, commitment Commitment) (Proof, error) {
|
||||
proof, err := kzg4844.ComputeBlobProof(kzg4844.Blob(*blob), kzg4844.Commitment(commitment))
|
||||
if err != nil {
|
||||
return [48]byte{}, err
|
||||
}
|
||||
return Proof(proof), nil
|
||||
}
|
||||
|
||||
func ComputeCellsAndKZGProofs(blob *Blob) (CellsAndProofs, error) {
|
||||
ckzgBlob := (*ckzg4844.Blob)(blob)
|
||||
ckzgCells, ckzgProofs, err := ckzg4844.ComputeCellsAndKZGProofs(ckzgBlob)
|
||||
if err != nil {
|
||||
return CellsAndProofs{}, err
|
||||
}
|
||||
|
||||
return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:])
|
||||
}
|
||||
|
||||
func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, cells []Cell, proofsBytes []Bytes48) (bool, error) {
|
||||
// Convert `Cell` type to `ckzg4844.Cell`
|
||||
ckzgCells := make([]ckzg4844.Cell, len(cells))
|
||||
for i := range cells {
|
||||
ckzgCells[i] = ckzg4844.Cell(cells[i])
|
||||
}
|
||||
|
||||
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
|
||||
}
|
||||
|
||||
func RecoverCellsAndKZGProofs(cellIndices []uint64, partialCells []Cell) (CellsAndProofs, error) {
|
||||
// Convert `Cell` type to `ckzg4844.Cell`
|
||||
ckzgPartialCells := make([]ckzg4844.Cell, len(partialCells))
|
||||
for i := range partialCells {
|
||||
ckzgPartialCells[i] = ckzg4844.Cell(partialCells[i])
|
||||
}
|
||||
|
||||
ckzgCells, ckzgProofs, err := ckzg4844.RecoverCellsAndKZGProofs(cellIndices, ckzgPartialCells)
|
||||
if err != nil {
|
||||
return CellsAndProofs{}, err
|
||||
}
|
||||
|
||||
return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:])
|
||||
}
|
||||
|
||||
// Convert cells/proofs to the CellsAndProofs type defined in this package.
|
||||
func makeCellsAndProofs(ckzgCells []ckzg4844.Cell, ckzgProofs []ckzg4844.KZGProof) (CellsAndProofs, error) {
|
||||
if len(ckzgCells) != len(ckzgProofs) {
|
||||
return CellsAndProofs{}, errors.New("different number of cells/proofs")
|
||||
}
|
||||
|
||||
var cells []Cell
|
||||
var proofs []Proof
|
||||
for i := range ckzgCells {
|
||||
cells = append(cells, Cell(ckzgCells[i]))
|
||||
proofs = append(proofs, Proof(ckzgProofs[i]))
|
||||
}
|
||||
|
||||
return CellsAndProofs{
|
||||
Cells: cells,
|
||||
Proofs: proofs,
|
||||
}, nil
|
||||
}
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"encoding/json"
|
||||
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
CKZG "github.com/ethereum/c-kzg-4844/bindings/go"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -12,17 +14,53 @@ var (
|
||||
//go:embed trusted_setup.json
|
||||
embeddedTrustedSetup []byte // 1.2Mb
|
||||
kzgContext *GoKZG.Context
|
||||
kzgLoaded bool
|
||||
)
|
||||
|
||||
type TrustedSetup struct {
|
||||
G1Monomial [GoKZG.ScalarsPerBlob]GoKZG.G1CompressedHexStr `json:"g1_monomial"`
|
||||
G1Lagrange [GoKZG.ScalarsPerBlob]GoKZG.G1CompressedHexStr `json:"g1_lagrange"`
|
||||
G2Monomial [65]GoKZG.G2CompressedHexStr `json:"g2_monomial"`
|
||||
}
|
||||
|
||||
func Start() error {
|
||||
parsedSetup := GoKZG.JSONTrustedSetup{}
|
||||
err := json.Unmarshal(embeddedTrustedSetup, &parsedSetup)
|
||||
trustedSetup := &TrustedSetup{}
|
||||
err := json.Unmarshal(embeddedTrustedSetup, trustedSetup)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not parse trusted setup JSON")
|
||||
}
|
||||
kzgContext, err = GoKZG.NewContext4096(&parsedSetup)
|
||||
kzgContext, err = GoKZG.NewContext4096(&GoKZG.JSONTrustedSetup{
|
||||
SetupG2: trustedSetup.G2Monomial[:],
|
||||
SetupG1Lagrange: trustedSetup.G1Lagrange})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not initialize go-kzg context")
|
||||
}
|
||||
|
||||
// Length of a G1 point, converted from hex to binary.
|
||||
g1MonomialBytes := make([]byte, len(trustedSetup.G1Monomial)*(len(trustedSetup.G1Monomial[0])-2)/2)
|
||||
for i, g1 := range &trustedSetup.G1Monomial {
|
||||
copy(g1MonomialBytes[i*(len(g1)-2)/2:], hexutil.MustDecode(g1))
|
||||
}
|
||||
// Length of a G1 point, converted from hex to binary.
|
||||
g1LagrangeBytes := make([]byte, len(trustedSetup.G1Lagrange)*(len(trustedSetup.G1Lagrange[0])-2)/2)
|
||||
for i, g1 := range &trustedSetup.G1Lagrange {
|
||||
copy(g1LagrangeBytes[i*(len(g1)-2)/2:], hexutil.MustDecode(g1))
|
||||
}
|
||||
// Length of a G2 point, converted from hex to binary.
|
||||
g2MonomialBytes := make([]byte, len(trustedSetup.G2Monomial)*(len(trustedSetup.G2Monomial[0])-2)/2)
|
||||
for i, g2 := range &trustedSetup.G2Monomial {
|
||||
copy(g2MonomialBytes[i*(len(g2)-2)/2:], hexutil.MustDecode(g2))
|
||||
}
|
||||
if !kzgLoaded {
|
||||
// TODO: Provide a configuration option for this.
|
||||
var precompute uint = 0
|
||||
|
||||
// Free the current trusted setup before running this method. CKZG
|
||||
// panics if the same setup is run multiple times.
|
||||
if err = CKZG.LoadTrustedSetup(g1MonomialBytes, g1LagrangeBytes, g2MonomialBytes, precompute); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
kzgLoaded = true
|
||||
return nil
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -69,7 +69,7 @@ func NewLightClientOptimisticUpdateFromBeaconState(
|
||||
// assert sum(block.message.body.sync_aggregate.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS
|
||||
syncAggregate, err := block.Block().Body().SyncAggregate()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get sync aggregate %v", err)
|
||||
return nil, fmt.Errorf("could not get sync aggregate %w", err)
|
||||
}
|
||||
|
||||
if syncAggregate.SyncCommitteeBits.Count() < params.BeaconConfig().MinSyncCommitteeParticipants {
|
||||
@@ -85,18 +85,18 @@ func NewLightClientOptimisticUpdateFromBeaconState(
|
||||
header := state.LatestBlockHeader()
|
||||
stateRoot, err := state.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get state root %v", err)
|
||||
return nil, fmt.Errorf("could not get state root %w", err)
|
||||
}
|
||||
header.StateRoot = stateRoot[:]
|
||||
|
||||
headerRoot, err := header.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get header root %v", err)
|
||||
return nil, fmt.Errorf("could not get header root %w", err)
|
||||
}
|
||||
|
||||
blockRoot, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get block root %v", err)
|
||||
return nil, fmt.Errorf("could not get block root %w", err)
|
||||
}
|
||||
|
||||
if headerRoot != blockRoot {
|
||||
@@ -114,14 +114,14 @@ func NewLightClientOptimisticUpdateFromBeaconState(
|
||||
// attested_header.state_root = hash_tree_root(attested_state)
|
||||
attestedStateRoot, err := attestedState.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get attested state root %v", err)
|
||||
return nil, fmt.Errorf("could not get attested state root %w", err)
|
||||
}
|
||||
attestedHeader.StateRoot = attestedStateRoot[:]
|
||||
|
||||
// assert hash_tree_root(attested_header) == block.message.parent_root
|
||||
attestedHeaderRoot, err := attestedHeader.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get attested header root %v", err)
|
||||
return nil, fmt.Errorf("could not get attested header root %w", err)
|
||||
}
|
||||
|
||||
if attestedHeaderRoot != block.Block().ParentRoot() {
|
||||
@@ -175,13 +175,13 @@ func NewLightClientFinalityUpdateFromBeaconState(
|
||||
if finalizedBlock.Block().Slot() != 0 {
|
||||
tempFinalizedHeader, err := finalizedBlock.Header()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get finalized header %v", err)
|
||||
return nil, fmt.Errorf("could not get finalized header %w", err)
|
||||
}
|
||||
finalizedHeader = migration.V1Alpha1SignedHeaderToV1(tempFinalizedHeader).GetMessage()
|
||||
|
||||
finalizedHeaderRoot, err := finalizedHeader.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get finalized header root %v", err)
|
||||
return nil, fmt.Errorf("could not get finalized header root %w", err)
|
||||
}
|
||||
|
||||
if finalizedHeaderRoot != bytesutil.ToBytes32(attestedState.FinalizedCheckpoint().Root) {
|
||||
@@ -204,7 +204,7 @@ func NewLightClientFinalityUpdateFromBeaconState(
|
||||
var bErr error
|
||||
finalityBranch, bErr = attestedState.FinalizedRootProof(ctx)
|
||||
if bErr != nil {
|
||||
return nil, fmt.Errorf("could not get finalized root proof %v", bErr)
|
||||
return nil, fmt.Errorf("could not get finalized root proof %w", bErr)
|
||||
}
|
||||
} else {
|
||||
finalizedHeader = ðpbv1.BeaconBlockHeader{
|
||||
|
||||
@@ -82,19 +82,20 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
|
||||
if level >= logrus.DebugLevel {
|
||||
parentRoot := block.ParentRoot()
|
||||
lf := logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"justifiedEpoch": justified.Epoch,
|
||||
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
|
||||
"version": version.String(block.Version()),
|
||||
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
|
||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
|
||||
"deposits": len(block.Body().Deposits()),
|
||||
"slot": block.Slot(),
|
||||
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"justifiedEpoch": justified.Epoch,
|
||||
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
|
||||
"version": version.String(block.Version()),
|
||||
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
|
||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
|
||||
"dataAvailabilityWaitedTime": daWaitedTime,
|
||||
"deposits": len(block.Body().Deposits()),
|
||||
}
|
||||
log.WithFields(lf).Debug("Synced new block")
|
||||
} else {
|
||||
|
||||
@@ -369,6 +369,6 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
|
||||
|
||||
func reportAttestationInclusion(blk interfaces.ReadOnlyBeaconBlock) {
|
||||
for _, att := range blk.Body().Attestations() {
|
||||
attestationInclusionDelay.Observe(float64(blk.Slot() - att.Data.Slot))
|
||||
attestationInclusionDelay.Observe(float64(blk.Slot() - att.GetData().Slot))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -118,9 +118,9 @@ func WithBLSToExecPool(p blstoexec.PoolManager) Option {
|
||||
}
|
||||
|
||||
// WithP2PBroadcaster to broadcast messages after appropriate processing.
|
||||
func WithP2PBroadcaster(p p2p.Broadcaster) Option {
|
||||
func WithP2PBroadcaster(p p2p.Acceser) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.P2p = p
|
||||
s.cfg.P2P = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -163,7 +163,7 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
|
||||
parentHash, totalDifficulty, err := service.getBlkParentHashAndTD(ctx, h[:])
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, p, bytesutil.ToBytes32(parentHash))
|
||||
require.Equal(t, td, totalDifficulty.String())
|
||||
require.Equal(t, td, totalDifficulty.Hex())
|
||||
|
||||
_, _, err = service.getBlkParentHashAndTD(ctx, []byte{'c'})
|
||||
require.ErrorContains(t, "could not get pow block: block not found", err)
|
||||
|
||||
@@ -36,17 +36,17 @@ import (
|
||||
//
|
||||
// # Update latest messages for attesting indices
|
||||
// update_latest_messages(store, indexed_attestation.attesting_indices, attestation)
|
||||
func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation, disparity time.Duration) error {
|
||||
func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time.Duration) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onAttestation")
|
||||
defer span.End()
|
||||
|
||||
if err := helpers.ValidateNilAttestation(a); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.ValidateSlotTargetEpoch(a.Data); err != nil {
|
||||
if err := helpers.ValidateSlotTargetEpoch(a.GetData()); err != nil {
|
||||
return err
|
||||
}
|
||||
tgt := ethpb.CopyCheckpoint(a.Data.Target)
|
||||
tgt := ethpb.CopyCheckpoint(a.GetData().Target)
|
||||
|
||||
// Note that target root check is ignored here because it was performed in sync's validation pipeline:
|
||||
// validate_aggregate_proof.go and validate_beacon_attestation.go
|
||||
@@ -67,7 +67,7 @@ func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation, dispa
|
||||
}
|
||||
|
||||
// Verify attestation beacon block is known and not from the future.
|
||||
if err := s.verifyBeaconBlock(ctx, a.Data); err != nil {
|
||||
if err := s.verifyBeaconBlock(ctx, a.GetData()); err != nil {
|
||||
return errors.Wrap(err, "could not verify attestation beacon block")
|
||||
}
|
||||
|
||||
@@ -75,16 +75,16 @@ func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation, dispa
|
||||
// validate_aggregate_proof.go and validate_beacon_attestation.go
|
||||
|
||||
// Verify attestations can only affect the fork choice of subsequent slots.
|
||||
if err := slots.VerifyTime(genesisTime, a.Data.Slot+1, disparity); err != nil {
|
||||
if err := slots.VerifyTime(genesisTime, a.GetData().Slot+1, disparity); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Use the target state to verify attesting indices are valid.
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, baseState, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
committees, err := helpers.AttestationCommittees(ctx, baseState, a)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, a, committee)
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, a, committees...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -97,7 +97,7 @@ func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation, dispa
|
||||
// We assume trusted attestation in this function has verified signature.
|
||||
|
||||
// Update forkchoice store with the new attestation for updating weight.
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indexedAtt.AttestingIndices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indexedAtt.GetAttestingIndices(), bytesutil.ToBytes32(a.GetData().BeaconBlockRoot), a.GetData().Target.Epoch)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -18,17 +18,63 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
|
||||
headEpoch := slots.ToEpoch(s.HeadSlot())
|
||||
if c.Epoch < headEpoch {
|
||||
return nil
|
||||
}
|
||||
if !s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
|
||||
return nil
|
||||
}
|
||||
if c.Epoch == headEpoch {
|
||||
targetSlot, err := s.cfg.ForkChoiceStore.Slot([32]byte(c.Root))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if slots.ToEpoch(targetSlot)+1 < headEpoch {
|
||||
return nil
|
||||
}
|
||||
st, err := s.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return st
|
||||
}
|
||||
slot, err := slots.EpochStart(c.Epoch)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
// Try if we have already set the checkpoint cache
|
||||
epochKey := strconv.FormatUint(uint64(c.Epoch), 10 /* base 10 */)
|
||||
lock := async.NewMultilock(string(c.Root) + epochKey)
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
cachedState, err := s.checkpointStateCache.StateByCheckpoint(c)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if cachedState != nil && !cachedState.IsNil() {
|
||||
return cachedState
|
||||
}
|
||||
st, err := s.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, c.Root, slot)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if err := s.checkpointStateCache.AddCheckpointState(c, st); err != nil {
|
||||
return nil
|
||||
}
|
||||
return st
|
||||
}
|
||||
|
||||
// getAttPreState retrieves the att pre state by either from the cache or the DB.
|
||||
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
|
||||
// If the attestation is recent and canonical we can use the head state to compute the shuffling.
|
||||
headEpoch := slots.ToEpoch(s.HeadSlot())
|
||||
if c.Epoch == headEpoch {
|
||||
targetSlot, err := s.cfg.ForkChoiceStore.Slot([32]byte(c.Root))
|
||||
if err == nil && slots.ToEpoch(targetSlot)+1 >= headEpoch {
|
||||
if s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
|
||||
return s.HeadStateReadOnly(ctx)
|
||||
}
|
||||
}
|
||||
if st := s.getRecentPreState(ctx, c); st != nil {
|
||||
return st, nil
|
||||
}
|
||||
// Use a multilock to allow scoped holding of a mutex by a checkpoint root + epoch
|
||||
// allowing us to behave smarter in terms of how this function is used concurrently.
|
||||
|
||||
@@ -7,9 +7,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
@@ -73,7 +75,7 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
a *ethpb.Attestation
|
||||
a ethpb.Att
|
||||
wantedErr string
|
||||
}{
|
||||
{
|
||||
@@ -125,25 +127,58 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
eval := func(ctx context.Context, service *Service, genesisState state.BeaconState, pks []bls.SecretKey) {
|
||||
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
att, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
tRoot := bytesutil.ToBytes32(att[0].GetData().Target.Root)
|
||||
copied := genesisState.Copy()
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
ojc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
ofc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
|
||||
}
|
||||
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
att, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
t.Run("pre-Electra", func(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
eval(ctx, service, genesisState, pks)
|
||||
})
|
||||
t.Run("post-Electra", func(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
genesisState, pks := util.DeterministicGenesisStateElectra(t, 64)
|
||||
eval(ctx, service, genesisState, pks)
|
||||
})
|
||||
}
|
||||
|
||||
func TestService_GetRecentPreState(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := context.Background()
|
||||
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
tRoot := bytesutil.ToBytes32(att[0].Data.Target.Root)
|
||||
copied := genesisState.Copy()
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
|
||||
cp0 := ðpb.Checkpoint{Epoch: 0, Root: ckRoot}
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
ojc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
ofc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
slot: 31,
|
||||
}
|
||||
require.NotNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{Epoch: 1, Root: ckRoot}))
|
||||
}
|
||||
|
||||
func TestService_GetAttPreState_Concurrency(t *testing.T) {
|
||||
|
||||
@@ -6,12 +6,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
@@ -31,6 +33,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// A custom slot deadline for processing state slots in our cache.
|
||||
@@ -307,16 +311,16 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
if err := helpers.UpdateProposerIndicesInCache(ctx, st, e); err != nil {
|
||||
return errors.Wrap(err, "could not update proposer index cache")
|
||||
}
|
||||
go func() {
|
||||
go func(ep primitives.Epoch) {
|
||||
// Use a custom deadline here, since this method runs asynchronously.
|
||||
// We ignore the parent method's context and instead create a new one
|
||||
// with a custom deadline, therefore using the background context instead.
|
||||
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
|
||||
defer cancel()
|
||||
if err := helpers.UpdateCommitteeCache(slotCtx, st, e+1); err != nil {
|
||||
if err := helpers.UpdateCommitteeCache(slotCtx, st, ep+1); err != nil {
|
||||
log.WithError(err).Warn("Could not update committee cache")
|
||||
}
|
||||
}()
|
||||
}(e)
|
||||
// The latest block header is from the previous epoch
|
||||
r, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
if err != nil {
|
||||
@@ -365,17 +369,17 @@ func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot,
|
||||
func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, st state.BeaconState) error {
|
||||
// Feed in block's attestations to fork choice store.
|
||||
for _, a := range blk.Body().Attestations() {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, st, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
committees, err := helpers.AttestationCommittees(ctx, st, a)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indices, err := attestation.AttestingIndices(a.AggregationBits, committee)
|
||||
indices, err := attestation.AttestingIndices(a, committees...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r := bytesutil.ToBytes32(a.Data.BeaconBlockRoot)
|
||||
r := bytesutil.ToBytes32(a.GetData().BeaconBlockRoot)
|
||||
if s.cfg.ForkChoiceStore.HasNode(r) {
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, r, a.Data.Target.Epoch)
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, r, a.GetData().Target.Epoch)
|
||||
} else if err := s.cfg.AttPool.SaveBlockAttestation(a); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -386,7 +390,7 @@ func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.Re
|
||||
// InsertSlashingsToForkChoiceStore inserts attester slashing indices to fork choice store.
|
||||
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
|
||||
// This function requires a write lock on forkchoice.
|
||||
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []*ethpb.AttesterSlashing) {
|
||||
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []ethpb.AttSlashing) {
|
||||
for _, slashing := range slashings {
|
||||
indices := blocks.SlashableAttesterIndices(slashing)
|
||||
for _, index := range indices {
|
||||
@@ -499,7 +503,7 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
|
||||
}
|
||||
indices, err := bs.Indices(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "indices")
|
||||
}
|
||||
missing := make(map[uint64]struct{}, len(expected))
|
||||
for i := range expected {
|
||||
@@ -513,12 +517,35 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
func missingDataColumns(bs *filesystem.BlobStorage, root [32]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if len(expected) > int(params.BeaconConfig().NumberOfColumns) {
|
||||
return nil, errMaxDataColumnsExceeded
|
||||
}
|
||||
indices, err := bs.ColumnIndices(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
missing := make(map[uint64]bool, len(expected))
|
||||
for col := range expected {
|
||||
if !indices[col] {
|
||||
missing[col] = true
|
||||
}
|
||||
}
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
// isDataAvailable blocks until all BlobSidecars committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
// The function will first check the database to see if all sidecars have been persisted. If any
|
||||
// sidecars are missing, it will then read from the blobNotifier channel for the given root until the channel is
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if coreTime.PeerDASIsActive(signed.Block().Slot()) {
|
||||
return s.isDataAvailableDataColumns(ctx, root, signed)
|
||||
}
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
@@ -548,7 +575,7 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
// get a map of BlobSidecar indices that are not currently available.
|
||||
missing, err := missingIndices(s.blobStorage, root, kzgCommitments)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "missing indices")
|
||||
}
|
||||
// If there are no missing indices, all BlobSidecars are available.
|
||||
if len(missing) == 0 {
|
||||
@@ -558,6 +585,25 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
// The gossip handler for blobs writes the index of each verified blob referencing the given
|
||||
// root to the channel returned by blobNotifiers.forRoot.
|
||||
nc := s.blobNotifiers.forRoot(root)
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(signed.Block().Slot()+1, s.genesisTime)
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
nst := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
if len(missing) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": signed.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": len(missing),
|
||||
}).Error("Still waiting for blobs DA check at slot end.")
|
||||
})
|
||||
defer nst.Stop()
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case idx := <-nc:
|
||||
@@ -571,7 +617,104 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
s.blobNotifiers.delete(root)
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return errors.Wrap(ctx.Err(), "context deadline waiting for blob sidecars")
|
||||
return errors.Wrapf(ctx.Err(), "context deadline waiting for blob sidecars slot: %d, BlockRoot: %#x", block.Slot(), root)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) isDataAvailableDataColumns(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
|
||||
block := signed.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(block.Slot()), slots.ToEpoch(s.CurrentSlot())) {
|
||||
return nil
|
||||
}
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get KZG commitments")
|
||||
}
|
||||
// If block has not commitments there is nothing to wait for.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
colMap, err := peerdas.CustodyColumns(s.cfg.P2P.NodeID(), peerdas.CustodySubnetCount())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Expected is the number of custodied data columnns a node is expected to have.
|
||||
expected := len(colMap)
|
||||
if expected == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Subscribe to newsly data columns stored in the database.
|
||||
rootIndexChan := make(chan filesystem.RootIndexPair)
|
||||
subscription := s.blobStorage.DataColumnFeed.Subscribe(rootIndexChan)
|
||||
defer subscription.Unsubscribe()
|
||||
|
||||
// Get a map of data column indices that are not currently available.
|
||||
missing, err := missingDataColumns(s.blobStorage, root, colMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If there are no missing indices, all data column sidecars are available.
|
||||
// This is the happy path.
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(signed.Block().Slot()+1, s.genesisTime)
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
nst := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
if len(missing) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": signed.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"columnsExpected": expected,
|
||||
"columnsWaiting": len(missing),
|
||||
}).Error("Still waiting for data columns DA check at slot end.")
|
||||
})
|
||||
defer nst.Stop()
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case rootIndex := <-rootIndexChan:
|
||||
if rootIndex.Root != root {
|
||||
// This is not the root we are looking for.
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove the index from the missing map.
|
||||
delete(missing, rootIndex.Index)
|
||||
|
||||
// Exit if there is no more missing data columns.
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
missingIndexes := make([]uint64, 0, len(missing))
|
||||
for val := range missing {
|
||||
copiedVal := val
|
||||
missingIndexes = append(missingIndexes, copiedVal)
|
||||
}
|
||||
return errors.Wrapf(ctx.Err(), "context deadline waiting for data column sidecars slot: %d, BlockRoot: %#x, missing %v", block.Slot(), root, missingIndexes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,9 +7,6 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
@@ -25,6 +22,8 @@ import (
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v5/proto/eth/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// CurrentSlot returns the current slot based on time.
|
||||
@@ -60,7 +59,7 @@ func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcu
|
||||
|
||||
// logNonCanonicalBlockReceived prints a message informing that the received
|
||||
// block is not the head of the chain. It requires the caller holds a lock on
|
||||
// Foprkchoice.
|
||||
// Forkchoice.
|
||||
func (s *Service) logNonCanonicalBlockReceived(blockRoot [32]byte, headRoot [32]byte) {
|
||||
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
|
||||
if err != nil {
|
||||
|
||||
@@ -824,7 +824,10 @@ func TestRemoveBlockAttestationsInPool(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: r[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, r))
|
||||
|
||||
atts := b.Block.Body.Attestations
|
||||
atts := make([]ethpb.Att, len(b.Block.Body.Attestations))
|
||||
for i, a := range b.Block.Body.Attestations {
|
||||
atts[i] = a
|
||||
}
|
||||
require.NoError(t, service.cfg.AttPool.SaveAggregatedAttestations(atts))
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
@@ -1531,6 +1534,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
// 12 and recover. Notice that it takes two epochs to fully recover, and we stay
|
||||
// optimistic for the whole time.
|
||||
func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
t.Skip("Requires #13664 to be fixed")
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.SlotsPerEpoch = 6
|
||||
@@ -1959,68 +1963,130 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
t.Run("pre-Electra", func(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err = util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err = util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
// prepare another block that is not inserted
|
||||
st3, err := transition.ExecuteStateTransition(ctx, st, wsb)
|
||||
require.NoError(t, err)
|
||||
b3, err := util.GenerateFullBlock(st3, keys, util.DefaultBlockGenConfig(), 3)
|
||||
require.NoError(t, err)
|
||||
wsb3, err := consensusblocks.NewSignedBeaconBlock(b3)
|
||||
require.NoError(t, err)
|
||||
// prepare another block that is not inserted
|
||||
st3, err := transition.ExecuteStateTransition(ctx, st, wsb)
|
||||
require.NoError(t, err)
|
||||
b3, err := util.GenerateFullBlock(st3, keys, util.DefaultBlockGenConfig(), 3)
|
||||
require.NoError(t, err)
|
||||
wsb3, err := consensusblocks.NewSignedBeaconBlock(b3)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a := wsb.Block().Body().Attestations()[0]
|
||||
r := bytesutil.ToBytes32(a.Data.BeaconBlockRoot)
|
||||
require.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r))
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a := wsb.Block().Body().Attestations()[0]
|
||||
r := bytesutil.ToBytes32(a.GetData().BeaconBlockRoot)
|
||||
require.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r))
|
||||
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a3 := wsb3.Block().Body().Attestations()[0]
|
||||
r3 := bytesutil.ToBytes32(a3.Data.BeaconBlockRoot)
|
||||
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a3 := wsb3.Block().Body().Attestations()[0]
|
||||
r3 := bytesutil.ToBytes32(a3.GetData().BeaconBlockRoot)
|
||||
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
|
||||
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committee as st
|
||||
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committee as st
|
||||
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committee as st
|
||||
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committee as st
|
||||
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
|
||||
})
|
||||
t.Run("post-Electra", func(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisStateElectra(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis, err := blocks.NewGenesisBlockForState(ctx, st)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, genesis), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block().HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockElectra(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err = util.GenerateFullBlockElectra(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
// prepare another block that is not inserted
|
||||
st3, err := transition.ExecuteStateTransition(ctx, st, wsb)
|
||||
require.NoError(t, err)
|
||||
b3, err := util.GenerateFullBlockElectra(st3, keys, util.DefaultBlockGenConfig(), 3)
|
||||
require.NoError(t, err)
|
||||
wsb3, err := consensusblocks.NewSignedBeaconBlock(b3)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a := wsb.Block().Body().Attestations()[0]
|
||||
r := bytesutil.ToBytes32(a.GetData().BeaconBlockRoot)
|
||||
require.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r))
|
||||
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a3 := wsb3.Block().Body().Attestations()[0]
|
||||
r3 := bytesutil.ToBytes32(a3.GetData().BeaconBlockRoot)
|
||||
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
|
||||
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committee as st
|
||||
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committee as st
|
||||
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
|
||||
})
|
||||
}
|
||||
|
||||
func TestFillMissingBlockPayloadId_DiffSlotExitEarly(t *testing.T) {
|
||||
@@ -2114,7 +2180,7 @@ func TestMissingIndices(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
bm, bs := filesystem.NewEphemeralBlobStorageWithMocker(t)
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
require.NoError(t, bm.CreateFakeIndices(c.root, c.present))
|
||||
require.NoError(t, bm.CreateFakeIndices(c.root, c.present...))
|
||||
missing, err := missingIndices(bs, c.root, c.expected)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
|
||||
@@ -31,7 +31,7 @@ type AttestationStateFetcher interface {
|
||||
// AttestationReceiver interface defines the methods of chain service receive and processing new attestations.
|
||||
type AttestationReceiver interface {
|
||||
AttestationStateFetcher
|
||||
VerifyLmdFfgConsistency(ctx context.Context, att *ethpb.Attestation) error
|
||||
VerifyLmdFfgConsistency(ctx context.Context, att ethpb.Att) error
|
||||
InForkchoice([32]byte) bool
|
||||
}
|
||||
|
||||
@@ -51,13 +51,13 @@ func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Chec
|
||||
}
|
||||
|
||||
// VerifyLmdFfgConsistency verifies that attestation's LMD and FFG votes are consistency to each other.
|
||||
func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestation) error {
|
||||
r, err := s.TargetRootForEpoch([32]byte(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
|
||||
func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a ethpb.Att) error {
|
||||
r, err := s.TargetRootForEpoch([32]byte(a.GetData().BeaconBlockRoot), a.GetData().Target.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(a.Data.Target.Root, r[:]) {
|
||||
return fmt.Errorf("FFG and LMD votes are not consistent, block root: %#x, target root: %#x, canonical target root: %#x", a.Data.BeaconBlockRoot, a.Data.Target.Root, r)
|
||||
if !bytes.Equal(a.GetData().Target.Root, r[:]) {
|
||||
return fmt.Errorf("FFG and LMD votes are not consistent, block root: %#x, target root: %#x, canonical target root: %#x", a.GetData().BeaconBlockRoot, a.GetData().Target.Root, r)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -95,7 +95,9 @@ func (s *Service) spawnProcessAttestationsRoutine() {
|
||||
return
|
||||
case slotInterval := <-ticker.C():
|
||||
if slotInterval.Interval > 0 {
|
||||
s.UpdateHead(s.ctx, slotInterval.Slot+1)
|
||||
if s.validating() {
|
||||
s.UpdateHead(s.ctx, slotInterval.Slot+1)
|
||||
}
|
||||
} else {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
|
||||
@@ -168,13 +170,13 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
|
||||
// Based on the spec, don't process the attestation until the subsequent slot.
|
||||
// This delays consideration in the fork choice until their slot is in the past.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation
|
||||
nextSlot := a.Data.Slot + 1
|
||||
nextSlot := a.GetData().Slot + 1
|
||||
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), nextSlot, disparity); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
hasState := s.cfg.BeaconDB.HasStateSummary(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
|
||||
hasBlock := s.hasBlock(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
|
||||
hasState := s.cfg.BeaconDB.HasStateSummary(ctx, bytesutil.ToBytes32(a.GetData().BeaconBlockRoot))
|
||||
hasBlock := s.hasBlock(ctx, bytesutil.ToBytes32(a.GetData().BeaconBlockRoot))
|
||||
if !(hasState && hasBlock) {
|
||||
continue
|
||||
}
|
||||
@@ -183,17 +185,17 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
|
||||
log.WithError(err).Error("Could not delete fork choice attestation in pool")
|
||||
}
|
||||
|
||||
if !helpers.VerifyCheckpointEpoch(a.Data.Target, s.genesisTime) {
|
||||
if !helpers.VerifyCheckpointEpoch(a.GetData().Target, s.genesisTime) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.receiveAttestationNoPubsub(ctx, a, disparity); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": a.Data.Slot,
|
||||
"committeeIndex": a.Data.CommitteeIndex,
|
||||
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.Data.BeaconBlockRoot)),
|
||||
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.Data.Target.Root)),
|
||||
"aggregationCount": a.AggregationBits.Count(),
|
||||
"slot": a.GetData().Slot,
|
||||
"committeeIndex": a.GetData().CommitteeIndex,
|
||||
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().BeaconBlockRoot)),
|
||||
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().Target.Root)),
|
||||
"aggregationCount": a.GetAggregationBits().Count(),
|
||||
}).WithError(err).Warn("Could not process attestation for fork choice")
|
||||
}
|
||||
}
|
||||
@@ -204,7 +206,7 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
|
||||
// 1. Validate attestation, update validator's latest vote
|
||||
// 2. Apply fork choice to the processed attestation
|
||||
// 3. Save latest head info
|
||||
func (s *Service) receiveAttestationNoPubsub(ctx context.Context, att *ethpb.Attestation, disparity time.Duration) error {
|
||||
func (s *Service) receiveAttestationNoPubsub(ctx context.Context, att ethpb.Att, disparity time.Duration) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.receiveAttestationNoPubsub")
|
||||
defer span.End()
|
||||
|
||||
|
||||
@@ -73,7 +73,7 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
atts, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
tRoot := bytesutil.ToBytes32(atts[0].Data.Target.Root)
|
||||
tRoot := bytesutil.ToBytes32(atts[0].GetData().Target.Root)
|
||||
copied := genesisState.Copy()
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -83,7 +83,11 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
|
||||
attsToSave := make([]ethpb.Att, len(atts))
|
||||
for i, a := range atts {
|
||||
attsToSave[i] = a
|
||||
}
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(attsToSave))
|
||||
service.processAttestations(ctx, 0)
|
||||
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations()))
|
||||
require.LogsDoNotContain(t, hook, "Could not process attestation for fork choice")
|
||||
@@ -121,10 +125,14 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
// Generate attestations for this block in Slot 1
|
||||
atts, err := util.GenerateAttestations(copied, pks, 1, 1, false)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
|
||||
attsToSave := make([]ethpb.Att, len(atts))
|
||||
for i, a := range atts {
|
||||
attsToSave[i] = a
|
||||
}
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(attsToSave))
|
||||
// Verify the target is in forkchoice
|
||||
require.Equal(t, true, fcs.HasNode(bytesutil.ToBytes32(atts[0].Data.BeaconBlockRoot)))
|
||||
require.Equal(t, tRoot, bytesutil.ToBytes32(atts[0].Data.BeaconBlockRoot))
|
||||
require.Equal(t, true, fcs.HasNode(bytesutil.ToBytes32(atts[0].GetData().BeaconBlockRoot)))
|
||||
require.Equal(t, tRoot, bytesutil.ToBytes32(atts[0].GetData().BeaconBlockRoot))
|
||||
require.Equal(t, true, fcs.HasNode(service.originBlockRoot))
|
||||
|
||||
// Insert a new block to forkchoice
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/slasher/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
@@ -32,6 +33,9 @@ import (
|
||||
// This defines how many epochs since finality the run time will begin to save hot state on to the DB.
|
||||
var epochsSinceFinalitySaveHotStateDB = primitives.Epoch(100)
|
||||
|
||||
// This defines how many epochs since finality the run time will begin to expand our respective cache sizes.
|
||||
var epochsSinceFinalityExpandCache = primitives.Epoch(4)
|
||||
|
||||
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
|
||||
type BlockReceiver interface {
|
||||
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error
|
||||
@@ -47,9 +51,15 @@ type BlobReceiver interface {
|
||||
ReceiveBlob(context.Context, blocks.VerifiedROBlob) error
|
||||
}
|
||||
|
||||
// DataColumnReceiver interface defines the methods of chain service for receiving new
|
||||
// data columns
|
||||
type DataColumnReceiver interface {
|
||||
ReceiveDataColumn(context.Context, blocks.VerifiedRODataColumn) error
|
||||
}
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
type SlashingReceiver interface {
|
||||
ReceiveAttesterSlashing(ctx context.Context, slashings *ethpb.AttesterSlashing)
|
||||
ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing)
|
||||
}
|
||||
|
||||
// ReceiveBlock is a function that defines the operations (minus pubsub)
|
||||
@@ -94,6 +104,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
var postState state.BeaconState
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
postState, err = s.validateStateTransition(ctx, preState, blockCopy)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to validate consensus state transition function")
|
||||
@@ -102,6 +113,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
})
|
||||
var isValidPayload bool
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, blockCopy, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not notify the engine of the new payload")
|
||||
@@ -164,13 +176,8 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
}
|
||||
// Send finalized events and finalized deposits in the background
|
||||
if newFinalized {
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
go s.sendNewFinalizedEvent(blockCopy, postState)
|
||||
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
|
||||
go func() {
|
||||
s.insertFinalizedDeposits(depCtx, finalized.Root)
|
||||
cancel()
|
||||
}()
|
||||
// hook to process all post state finalization tasks
|
||||
s.executePostFinalizationTasks(ctx, postState)
|
||||
}
|
||||
|
||||
// If slasher is configured, forward the attestations in the block via an event feed for processing.
|
||||
@@ -188,6 +195,11 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
return err
|
||||
}
|
||||
|
||||
// We apply the same heuristic to some of our more important caches.
|
||||
if err := s.handleCaches(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Reports on block and fork choice metrics.
|
||||
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
finalized := ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
@@ -214,6 +226,19 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedState state.BeaconState) {
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
go func() {
|
||||
finalizedState.SaveValidatorIndices() // used to handle Validator index invariant from EIP6110
|
||||
s.sendNewFinalizedEvent(ctx, finalizedState)
|
||||
}()
|
||||
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
|
||||
go func() {
|
||||
s.insertFinalizedDeposits(depCtx, finalized.Root)
|
||||
cancel()
|
||||
}()
|
||||
}
|
||||
|
||||
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
|
||||
// the state, performing batch verification of all collected signatures and then performing the appropriate
|
||||
// actions for a block post-transition.
|
||||
@@ -285,10 +310,10 @@ func (s *Service) HasBlock(ctx context.Context, root [32]byte) bool {
|
||||
}
|
||||
|
||||
// ReceiveAttesterSlashing receives an attester slashing and inserts it to forkchoice
|
||||
func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing *ethpb.AttesterSlashing) {
|
||||
func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing) {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, []*ethpb.AttesterSlashing{slashing})
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, []ethpb.AttSlashing{slashing})
|
||||
}
|
||||
|
||||
// prunePostBlockOperationPools only runs on new head otherwise should return a nil.
|
||||
@@ -361,6 +386,27 @@ func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
|
||||
return s.cfg.StateGen.DisableSaveHotStateToDB(ctx)
|
||||
}
|
||||
|
||||
func (s *Service) handleCaches() error {
|
||||
currentEpoch := slots.ToEpoch(s.CurrentSlot())
|
||||
// Prevent `sinceFinality` going underflow.
|
||||
var sinceFinality primitives.Epoch
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
if currentEpoch > finalized.Epoch {
|
||||
sinceFinality = currentEpoch - finalized.Epoch
|
||||
}
|
||||
|
||||
if sinceFinality >= epochsSinceFinalityExpandCache {
|
||||
helpers.ExpandCommitteeCache()
|
||||
return nil
|
||||
}
|
||||
|
||||
helpers.CompressCommitteeCache()
|
||||
return nil
|
||||
}
|
||||
|
||||
// This performs the state transition function and returns the poststate or an
|
||||
// error if the block fails to verify the consensus rules
|
||||
func (s *Service) validateStateTransition(ctx context.Context, preState state.BeaconState, signed interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
|
||||
@@ -412,7 +458,7 @@ func (s *Service) updateFinalizationOnBlock(ctx context.Context, preState, postS
|
||||
|
||||
// sendNewFinalizedEvent sends a new finalization checkpoint event over the
|
||||
// event feed. It needs to be called on the background
|
||||
func (s *Service) sendNewFinalizedEvent(signed interfaces.ReadOnlySignedBeaconBlock, postState state.BeaconState) {
|
||||
func (s *Service) sendNewFinalizedEvent(ctx context.Context, postState state.BeaconState) {
|
||||
isValidPayload := false
|
||||
s.headLock.RLock()
|
||||
if s.head != nil {
|
||||
@@ -420,8 +466,17 @@ func (s *Service) sendNewFinalizedEvent(signed interfaces.ReadOnlySignedBeaconBl
|
||||
}
|
||||
s.headLock.RUnlock()
|
||||
|
||||
blk, err := s.cfg.BeaconDB.Block(ctx, bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve block for finalized checkpoint root. Finalized event will not be emitted")
|
||||
return
|
||||
}
|
||||
if blk == nil || blk.IsNil() || blk.Block() == nil || blk.Block().IsNil() {
|
||||
log.WithError(err).Error("Block retrieved for finalized checkpoint root is nil. Finalized event will not be emitted")
|
||||
return
|
||||
}
|
||||
stateRoot := blk.Block().StateRoot()
|
||||
// Send an event regarding the new finalized checkpoint over a common event feed.
|
||||
stateRoot := signed.Block().StateRoot()
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.FinalizedCheckpoint,
|
||||
Data: ðpbv1.EventFinalizedCheckpoint{
|
||||
@@ -439,17 +494,17 @@ func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySigne
|
||||
// is done in the background to avoid adding more load to this critical code path.
|
||||
ctx := context.TODO()
|
||||
for _, att := range signed.Block().Body().Attestations() {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
committees, err := helpers.AttestationCommittees(ctx, preState, att)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get attestation committee")
|
||||
log.WithError(err).Error("Could not get attestation committees")
|
||||
return
|
||||
}
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committee)
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committees...)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert to indexed attestation")
|
||||
return
|
||||
}
|
||||
s.cfg.SlasherAttestationsFeed.Send(indexedAtt)
|
||||
s.cfg.SlasherAttestationsFeed.Send(&types.WrappedIndexedAtt{IndexedAtt: indexedAtt})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -457,7 +512,10 @@ func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySigne
|
||||
func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header interfaces.ExecutionData, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) (bool, error) {
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, signed)
|
||||
if err != nil {
|
||||
return false, s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
err = s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
return false, err
|
||||
}
|
||||
if signed.Version() < version.Capella && isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, ver, header, signed); err != nil {
|
||||
|
||||
@@ -6,18 +6,24 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
blockchainTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
@@ -308,6 +314,29 @@ func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
|
||||
assert.LogsDoNotContain(t, hook, "Entering mode to save hot states in DB")
|
||||
}
|
||||
|
||||
func TestHandleCaches_EnablingLargeSize(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s, _ := minimalTestService(t)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
require.NoError(t, s.handleCaches())
|
||||
assert.LogsContain(t, hook, "Expanding committee cache size")
|
||||
}
|
||||
|
||||
func TestHandleCaches_DisablingLargeSize(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s, _ := minimalTestService(t)
|
||||
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
require.NoError(t, s.handleCaches())
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
require.NoError(t, s.handleCaches())
|
||||
assert.LogsContain(t, hook, "Reducing committee cache size")
|
||||
}
|
||||
|
||||
func TestHandleBlockBLSToExecutionChanges(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
pool := tr.blsPool
|
||||
@@ -355,3 +384,116 @@ func TestHandleBlockBLSToExecutionChanges(t *testing.T) {
|
||||
require.Equal(t, false, pool.ValidatorExists(idx))
|
||||
})
|
||||
}
|
||||
|
||||
func Test_sendNewFinalizedEvent(t *testing.T) {
|
||||
s, _ := minimalTestService(t)
|
||||
notifier := &blockchainTesting.MockStateNotifier{RecordEvents: true}
|
||||
s.cfg.StateNotifier = notifier
|
||||
finalizedSt, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
finalizedStRoot, err := finalizedSt.HashTreeRoot(s.ctx)
|
||||
require.NoError(t, err)
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.StateRoot = finalizedStRoot[:]
|
||||
sbb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
sbbRoot, err := sbb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveBlock(s.ctx, sbb))
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetFinalizedCheckpoint(ðpb.Checkpoint{
|
||||
Epoch: 123,
|
||||
Root: sbbRoot[:],
|
||||
}))
|
||||
|
||||
s.sendNewFinalizedEvent(s.ctx, st)
|
||||
|
||||
require.Equal(t, 1, len(notifier.ReceivedEvents()))
|
||||
e := notifier.ReceivedEvents()[0]
|
||||
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
|
||||
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
|
||||
require.Equal(t, true, ok, "event has wrong data type")
|
||||
assert.Equal(t, primitives.Epoch(123), fc.Epoch)
|
||||
assert.DeepEqual(t, sbbRoot[:], fc.Block)
|
||||
assert.DeepEqual(t, finalizedStRoot[:], fc.State)
|
||||
assert.Equal(t, false, fc.ExecutionOptimistic)
|
||||
}
|
||||
|
||||
func Test_executePostFinalizationTasks(t *testing.T) {
|
||||
resetFn := features.InitWithReset(&features.Flags{
|
||||
EIP6110ValidatorIndexCache: true,
|
||||
})
|
||||
defer resetFn()
|
||||
|
||||
logHook := logTest.NewGlobal()
|
||||
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
finalizedStRoot, err := headState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
genesis := util.NewBeaconBlock()
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*122 + 1
|
||||
headBlock := util.NewBeaconBlock()
|
||||
headBlock.Block.Slot = finalizedSlot
|
||||
headBlock.Block.StateRoot = finalizedStRoot[:]
|
||||
headBlock.Block.ParentRoot = bytesutil.PadTo(genesisRoot[:], 32)
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
hexKey := "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
key, err := hexutil.Decode(hexKey)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetValidators([]*ethpb.Validator{
|
||||
{
|
||||
PublicKey: key,
|
||||
WithdrawalCredentials: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
}))
|
||||
require.NoError(t, headState.SetSlot(finalizedSlot))
|
||||
require.NoError(t, headState.SetFinalizedCheckpoint(ðpb.Checkpoint{
|
||||
Epoch: 123,
|
||||
Root: headRoot[:],
|
||||
}))
|
||||
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
|
||||
s, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState))
|
||||
ctx, beaconDB, stateGen := tr.ctx, tr.db, tr.sg
|
||||
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, headBlock)
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
|
||||
notifier := &blockchainTesting.MockStateNotifier{RecordEvents: true}
|
||||
s.cfg.StateNotifier = notifier
|
||||
s.executePostFinalizationTasks(s.ctx, headState)
|
||||
|
||||
time.Sleep(1 * time.Second) // sleep for a second because event is in a separate go routine
|
||||
require.Equal(t, 1, len(notifier.ReceivedEvents()))
|
||||
e := notifier.ReceivedEvents()[0]
|
||||
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
|
||||
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
|
||||
require.Equal(t, true, ok, "event has wrong data type")
|
||||
assert.Equal(t, primitives.Epoch(123), fc.Epoch)
|
||||
assert.DeepEqual(t, headRoot[:], fc.Block)
|
||||
assert.DeepEqual(t, finalizedStRoot[:], fc.State)
|
||||
assert.Equal(t, false, fc.ExecutionOptimistic)
|
||||
|
||||
// check the cache
|
||||
index, ok := headState.ValidatorIndexByPubkey(bytesutil.ToBytes48(key))
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), index) // first index
|
||||
|
||||
// check deposit
|
||||
require.LogsContain(t, logHook, "Finalized deposit insertion completed at index")
|
||||
}
|
||||
|
||||
16
beacon-chain/blockchain/receive_data_column.go
Normal file
16
beacon-chain/blockchain/receive_data_column.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
)
|
||||
|
||||
func (s *Service) ReceiveDataColumn(ctx context.Context, ds blocks.VerifiedRODataColumn) error {
|
||||
if err := s.blobStorage.SaveDataColumn(ds); err != nil {
|
||||
return errors.Wrap(err, "save data column")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -11,8 +11,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
@@ -44,6 +42,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// Service represents a service that handles the internal
|
||||
@@ -82,7 +81,7 @@ type config struct {
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
BLSToExecPool blstoexec.PoolManager
|
||||
P2p p2p.Broadcaster
|
||||
P2P p2p.Acceser
|
||||
MaxRoutines int
|
||||
StateNotifier statefeed.Notifier
|
||||
ForkChoiceStore f.ForkChoicer
|
||||
@@ -107,15 +106,17 @@ var ErrMissingClockSetter = errors.New("blockchain Service initialized without a
|
||||
type blobNotifierMap struct {
|
||||
sync.RWMutex
|
||||
notifiers map[[32]byte]chan uint64
|
||||
seenIndex map[[32]byte][fieldparams.MaxBlobsPerBlock]bool
|
||||
seenIndex map[[32]byte][fieldparams.NumberOfColumns]bool
|
||||
}
|
||||
|
||||
// notifyIndex notifies a blob by its index for a given root.
|
||||
// It uses internal maps to keep track of seen indices and notifier channels.
|
||||
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64) {
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return
|
||||
}
|
||||
// TODO: Separate Data Columns from blobs
|
||||
/*
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return
|
||||
}*/
|
||||
|
||||
bn.Lock()
|
||||
seen := bn.seenIndex[root]
|
||||
@@ -129,7 +130,7 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64) {
|
||||
// Retrieve or create the notifier channel for the given root.
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, fieldparams.MaxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
|
||||
@@ -143,7 +144,7 @@ func (bn *blobNotifierMap) forRoot(root [32]byte) chan uint64 {
|
||||
defer bn.Unlock()
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, fieldparams.MaxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
return c
|
||||
@@ -169,7 +170,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
bn := &blobNotifierMap{
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
seenIndex: make(map[[32]byte][fieldparams.MaxBlobsPerBlock]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
}
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
@@ -199,6 +200,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
// Start a blockchain service's main event loop.
|
||||
func (s *Service) Start() {
|
||||
saved := s.cfg.FinalizedStateAtStartUp
|
||||
defer s.removeStartupState()
|
||||
|
||||
if saved != nil && !saved.IsNil() {
|
||||
if err := s.StartFromSavedState(saved); err != nil {
|
||||
@@ -329,6 +331,8 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
return errors.Wrap(err, "failed to initialize blockchain service")
|
||||
}
|
||||
|
||||
saved.SaveValidatorIndices() // used to handle Validator index invariant from EIP6110
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -418,7 +422,7 @@ func (s *Service) startFromExecutionChain() error {
|
||||
log.Error("event data is not type *statefeed.ChainStartedData")
|
||||
return
|
||||
}
|
||||
log.WithField("starttime", data.StartTime).Debug("Received chain start event")
|
||||
log.WithField("startTime", data.StartTime).Debug("Received chain start event")
|
||||
s.onExecutionChainStart(s.ctx, data.StartTime)
|
||||
return
|
||||
}
|
||||
@@ -503,7 +507,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
}
|
||||
genesisBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
if err != nil || genesisBlk == nil || genesisBlk.IsNil() {
|
||||
return fmt.Errorf("could not load genesis block: %v", err)
|
||||
return fmt.Errorf("could not load genesis block: %w", err)
|
||||
}
|
||||
genesisBlkRoot, err := genesisBlk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
@@ -550,6 +554,10 @@ func (s *Service) hasBlock(ctx context.Context, root [32]byte) bool {
|
||||
return s.cfg.BeaconDB.HasBlock(ctx, root)
|
||||
}
|
||||
|
||||
func (s *Service) removeStartupState() {
|
||||
s.cfg.FinalizedStateAtStartUp = nil
|
||||
}
|
||||
|
||||
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
|
||||
currentTime := prysmTime.Now()
|
||||
if currentTime.After(genesisTime) {
|
||||
|
||||
@@ -8,9 +8,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
@@ -29,6 +30,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/trie"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -80,7 +82,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
attService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
|
||||
require.NoError(t, err)
|
||||
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
fc := doublylinkedtree.New()
|
||||
@@ -95,7 +97,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithSlashingPool(slashings.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithP2PBroadcaster(&mockBroadcaster{}),
|
||||
WithP2PBroadcaster(&mockAccesser{}),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithForkChoiceStore(fc),
|
||||
WithAttestationService(attService),
|
||||
@@ -203,7 +205,7 @@ func TestChainService_InitializeBeaconChain(t *testing.T) {
|
||||
BlockHash: make([]byte, 32),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
genState, err = blocks.ProcessPreGenesisDeposits(ctx, genState, deposits)
|
||||
genState, err = altair.ProcessPreGenesisDeposits(ctx, genState, deposits)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = bc.initializeBeaconChain(ctx, time.Unix(0, 0), genState, ðpb.Eth1Data{DepositRoot: hashTreeRoot[:], BlockHash: make([]byte, 32)})
|
||||
@@ -499,6 +501,66 @@ func TestChainService_EverythingOptimistic(t *testing.T) {
|
||||
require.Equal(t, true, op)
|
||||
}
|
||||
|
||||
func TestStartFromSavedState_ValidatorIndexCacheUpdated(t *testing.T) {
|
||||
resetFn := features.InitWithReset(&features.Flags{
|
||||
EnableStartOptimistic: true,
|
||||
EIP6110ValidatorIndexCache: true,
|
||||
})
|
||||
defer resetFn()
|
||||
|
||||
genesis := util.NewBeaconBlock()
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
|
||||
headBlock := util.NewBeaconBlock()
|
||||
headBlock.Block.Slot = finalizedSlot
|
||||
headBlock.Block.ParentRoot = bytesutil.PadTo(genesisRoot[:], 32)
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
hexKey := "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
key, err := hexutil.Decode(hexKey)
|
||||
require.NoError(t, err)
|
||||
hexKey2 := "0x42247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
key2, err := hexutil.Decode(hexKey2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetValidators([]*ethpb.Validator{
|
||||
{
|
||||
PublicKey: key,
|
||||
WithdrawalCredentials: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
{
|
||||
PublicKey: key2,
|
||||
WithdrawalCredentials: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
}))
|
||||
require.NoError(t, headState.SetSlot(finalizedSlot))
|
||||
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
c, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState))
|
||||
ctx, beaconDB, stateGen := tr.ctx, tr.db, tr.sg
|
||||
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, headBlock)
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
require.NoError(t, c.StartFromSavedState(headState))
|
||||
|
||||
index, ok := headState.ValidatorIndexByPubkey(bytesutil.ToBytes48(key))
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), index) // first index
|
||||
index2, ok := headState.ValidatorIndexByPubkey(bytesutil.ToBytes48(key2))
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, primitives.ValidatorIndex(1), index2) // first index
|
||||
}
|
||||
|
||||
// MockClockSetter satisfies the ClockSetter interface for testing the conditions where blockchain.Service should
|
||||
// call SetGenesis.
|
||||
type MockClockSetter struct {
|
||||
@@ -518,7 +580,7 @@ func (s *MockClockSetter) SetClock(g *startup.Clock) error {
|
||||
func TestNotifyIndex(t *testing.T) {
|
||||
// Initialize a blobNotifierMap
|
||||
bn := &blobNotifierMap{
|
||||
seenIndex: make(map[[32]byte][fieldparams.MaxBlobsPerBlock]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
}
|
||||
|
||||
|
||||
@@ -8,16 +8,18 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
testDB "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2pTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -44,12 +46,17 @@ type mockBroadcaster struct {
|
||||
broadcastCalled bool
|
||||
}
|
||||
|
||||
type mockAccesser struct {
|
||||
mockBroadcaster
|
||||
p2pTesting.MockPeerManager
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Attestation) error {
|
||||
func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ ethpb.Att) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
@@ -64,6 +71,11 @@ func (mb *mockBroadcaster) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.B
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumn(_ context.Context, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
|
||||
}
|
||||
|
||||
@@ -79,7 +91,7 @@ type testServiceRequirements struct {
|
||||
attPool attestations.Pool
|
||||
attSrv *attestations.Service
|
||||
blsPool *blstoexec.Pool
|
||||
dc *depositcache.DepositCache
|
||||
dc *depositsnapshot.Cache
|
||||
}
|
||||
|
||||
func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceRequirements) {
|
||||
@@ -94,7 +106,7 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
|
||||
attSrv, err := attestations.NewService(ctx, &attestations.Config{Pool: attPool})
|
||||
require.NoError(t, err)
|
||||
blsPool := blstoexec.NewPool()
|
||||
dc, err := depositcache.New()
|
||||
dc, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
req := &testServiceRequirements{
|
||||
ctx: ctx,
|
||||
@@ -120,6 +132,7 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
|
||||
WithTrackedValidatorsCache(cache.NewTrackedValidatorsCache()),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
|
||||
WithSyncChecker(mock.MockChecker{}),
|
||||
WithExecutionEngineCaller(&mockExecution.EngineClient{}),
|
||||
}
|
||||
// append the variadic opts so they override the defaults by being processed afterwards
|
||||
opts = append(defOpts, opts...)
|
||||
|
||||
@@ -414,8 +414,8 @@ func (*ChainService) HeadGenesisValidatorsRoot() [32]byte {
|
||||
}
|
||||
|
||||
// VerifyLmdFfgConsistency mocks VerifyLmdFfgConsistency and always returns nil.
|
||||
func (*ChainService) VerifyLmdFfgConsistency(_ context.Context, a *ethpb.Attestation) error {
|
||||
if !bytes.Equal(a.Data.BeaconBlockRoot, a.Data.Target.Root) {
|
||||
func (*ChainService) VerifyLmdFfgConsistency(_ context.Context, a ethpb.Att) error {
|
||||
if !bytes.Equal(a.GetData().BeaconBlockRoot, a.GetData().Target.Root) {
|
||||
return errors.New("LMD and FFG miss matched")
|
||||
}
|
||||
return nil
|
||||
@@ -495,7 +495,7 @@ func (s *ChainService) UpdateHead(ctx context.Context, slot primitives.Slot) {
|
||||
}
|
||||
|
||||
// ReceiveAttesterSlashing mocks the same method in the chain service.
|
||||
func (*ChainService) ReceiveAttesterSlashing(context.Context, *ethpb.AttesterSlashing) {}
|
||||
func (*ChainService) ReceiveAttesterSlashing(context.Context, ethpb.AttSlashing) {}
|
||||
|
||||
// IsFinalized mocks the same method in the chain service.
|
||||
func (s *ChainService) IsFinalized(_ context.Context, blockRoot [32]byte) bool {
|
||||
@@ -628,6 +628,11 @@ func (c *ChainService) ReceiveBlob(_ context.Context, b blocks.VerifiedROBlob) e
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveDataColumn implements the same method in chain service
|
||||
func (c *ChainService) ReceiveDataColumn(_ context.Context, _ blocks.VerifiedRODataColumn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TargetRootForEpoch mocks the same method in the chain service
|
||||
func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
|
||||
return c.TargetRoot, nil
|
||||
|
||||
@@ -2,7 +2,6 @@ package testing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client/builder"
|
||||
@@ -55,13 +54,13 @@ func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.
|
||||
}
|
||||
return w, nil, s.ErrSubmitBlindedBlock
|
||||
case version.Capella:
|
||||
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella, big.NewInt(0))
|
||||
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not wrap capella payload")
|
||||
}
|
||||
return w, nil, s.ErrSubmitBlindedBlock
|
||||
case version.Deneb:
|
||||
w, err := blocks.WrappedExecutionPayloadDeneb(s.PayloadDeneb, big.NewInt(0))
|
||||
w, err := blocks.WrappedExecutionPayloadDeneb(s.PayloadDeneb)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not wrap deneb payload")
|
||||
}
|
||||
|
||||
4
beacon-chain/cache/BUILD.bazel
vendored
4
beacon-chain/cache/BUILD.bazel
vendored
@@ -6,7 +6,9 @@ go_library(
|
||||
"active_balance.go",
|
||||
"active_balance_disabled.go", # keep
|
||||
"attestation_data.go",
|
||||
"balance_cache_key.go",
|
||||
"checkpoint_state.go",
|
||||
"column_subnet_ids.go",
|
||||
"committee.go",
|
||||
"committee_disabled.go", # keep
|
||||
"committees.go",
|
||||
@@ -70,6 +72,7 @@ go_test(
|
||||
"committee_fuzz_test.go",
|
||||
"committee_test.go",
|
||||
"payload_id_test.go",
|
||||
"private_access_test.go",
|
||||
"proposer_indices_test.go",
|
||||
"registration_test.go",
|
||||
"skip_slot_cache_test.go",
|
||||
@@ -93,6 +96,7 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_stretchr_testify//require:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
|
||||
37
beacon-chain/cache/active_balance.go
vendored
37
beacon-chain/cache/active_balance.go
vendored
@@ -3,17 +3,13 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
lruwrpr "github.com/prysmaticlabs/prysm/v5/cache/lru"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -86,36 +82,3 @@ func (c *BalanceCache) Get(st state.ReadOnlyBeaconState) (uint64, error) {
|
||||
balanceCacheHit.Inc()
|
||||
return value.(uint64), nil
|
||||
}
|
||||
|
||||
// Given input state `st`, balance key is constructed as:
|
||||
// (block_root in `st` at epoch_start_slot - 1) + current_epoch + validator_count
|
||||
func balanceCacheKey(st state.ReadOnlyBeaconState) (string, error) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
currentEpoch := st.Slot().DivSlot(slotsPerEpoch)
|
||||
epochStartSlot, err := slotsPerEpoch.SafeMul(uint64(currentEpoch))
|
||||
if err != nil {
|
||||
// impossible condition due to early division
|
||||
return "", errors.Errorf("start slot calculation overflows: %v", err)
|
||||
}
|
||||
prevSlot := primitives.Slot(0)
|
||||
if epochStartSlot > 1 {
|
||||
prevSlot = epochStartSlot - 1
|
||||
}
|
||||
r, err := st.BlockRootAtIndex(uint64(prevSlot % params.BeaconConfig().SlotsPerHistoricalRoot))
|
||||
if err != nil {
|
||||
// impossible condition because index is always constrained within state
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Mix in current epoch
|
||||
b := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, uint64(currentEpoch))
|
||||
key := append(r, b...)
|
||||
|
||||
// Mix in validator count
|
||||
b = make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, uint64(st.NumValidators()))
|
||||
key = append(key, b...)
|
||||
|
||||
return string(key), nil
|
||||
}
|
||||
|
||||
31
beacon-chain/cache/active_balance_test.go
vendored
31
beacon-chain/cache/active_balance_test.go
vendored
@@ -1,12 +1,13 @@
|
||||
//go:build !fuzz
|
||||
|
||||
package cache
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -27,33 +28,33 @@ func TestBalanceCache_AddGetBalance(t *testing.T) {
|
||||
st, err := state_native.InitializeFromProtoPhase0(raw)
|
||||
require.NoError(t, err)
|
||||
|
||||
cache := NewEffectiveBalanceCache()
|
||||
_, err = cache.Get(st)
|
||||
require.ErrorContains(t, ErrNotFound.Error(), err)
|
||||
cc := cache.NewEffectiveBalanceCache()
|
||||
_, err = cc.Get(st)
|
||||
require.ErrorContains(t, cache.ErrNotFound.Error(), err)
|
||||
|
||||
b := uint64(100)
|
||||
require.NoError(t, cache.AddTotalEffectiveBalance(st, b))
|
||||
cachedB, err := cache.Get(st)
|
||||
require.NoError(t, cc.AddTotalEffectiveBalance(st, b))
|
||||
cachedB, err := cc.Get(st)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, b, cachedB)
|
||||
|
||||
require.NoError(t, st.SetSlot(1000))
|
||||
_, err = cache.Get(st)
|
||||
require.ErrorContains(t, ErrNotFound.Error(), err)
|
||||
_, err = cc.Get(st)
|
||||
require.ErrorContains(t, cache.ErrNotFound.Error(), err)
|
||||
|
||||
b = uint64(200)
|
||||
require.NoError(t, cache.AddTotalEffectiveBalance(st, b))
|
||||
cachedB, err = cache.Get(st)
|
||||
require.NoError(t, cc.AddTotalEffectiveBalance(st, b))
|
||||
cachedB, err = cc.Get(st)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, b, cachedB)
|
||||
|
||||
require.NoError(t, st.SetSlot(1000+params.BeaconConfig().SlotsPerHistoricalRoot))
|
||||
_, err = cache.Get(st)
|
||||
require.ErrorContains(t, ErrNotFound.Error(), err)
|
||||
_, err = cc.Get(st)
|
||||
require.ErrorContains(t, cache.ErrNotFound.Error(), err)
|
||||
|
||||
b = uint64(300)
|
||||
require.NoError(t, cache.AddTotalEffectiveBalance(st, b))
|
||||
cachedB, err = cache.Get(st)
|
||||
require.NoError(t, cc.AddTotalEffectiveBalance(st, b))
|
||||
cachedB, err = cc.Get(st)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, b, cachedB)
|
||||
}
|
||||
@@ -72,6 +73,6 @@ func TestBalanceCache_BalanceKey(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(primitives.Slot(math.MaxUint64)))
|
||||
|
||||
_, err = balanceCacheKey(st)
|
||||
_, err = cache.BalanceCacheKey(st)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
43
beacon-chain/cache/balance_cache_key.go
vendored
Normal file
43
beacon-chain/cache/balance_cache_key.go
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// Given input state `st`, balance key is constructed as:
|
||||
// (block_root in `st` at epoch_start_slot - 1) + current_epoch + validator_count
|
||||
func balanceCacheKey(st state.ReadOnlyBeaconState) (string, error) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
currentEpoch := st.Slot().DivSlot(slotsPerEpoch)
|
||||
epochStartSlot, err := slotsPerEpoch.SafeMul(uint64(currentEpoch))
|
||||
if err != nil {
|
||||
// impossible condition due to early division
|
||||
return "", fmt.Errorf("start slot calculation overflows: %w", err)
|
||||
}
|
||||
prevSlot := primitives.Slot(0)
|
||||
if epochStartSlot > 1 {
|
||||
prevSlot = epochStartSlot - 1
|
||||
}
|
||||
r, err := st.BlockRootAtIndex(uint64(prevSlot % params.BeaconConfig().SlotsPerHistoricalRoot))
|
||||
if err != nil {
|
||||
// impossible condition because index is always constrained within state
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Mix in current epoch
|
||||
b := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, uint64(currentEpoch))
|
||||
key := append(r, b...)
|
||||
|
||||
// Mix in validator count
|
||||
b = make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, uint64(st.NumValidators()))
|
||||
key = append(key, b...)
|
||||
|
||||
return string(key), nil
|
||||
}
|
||||
11
beacon-chain/cache/checkpoint_state_test.go
vendored
11
beacon-chain/cache/checkpoint_state_test.go
vendored
@@ -1,8 +1,9 @@
|
||||
package cache
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -15,7 +16,7 @@ import (
|
||||
)
|
||||
|
||||
func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
|
||||
cache := NewCheckpointStateCache()
|
||||
cache := cache.NewCheckpointStateCache()
|
||||
|
||||
cp1 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)}
|
||||
st, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
@@ -58,16 +59,16 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckpointStateCache_MaxSize(t *testing.T) {
|
||||
c := NewCheckpointStateCache()
|
||||
c := cache.NewCheckpointStateCache()
|
||||
st, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Slot: 0,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := uint64(0); i < uint64(maxCheckpointStateSize+100); i++ {
|
||||
for i := uint64(0); i < uint64(cache.MaxCheckpointStateSize()+100); i++ {
|
||||
require.NoError(t, st.SetSlot(primitives.Slot(i)))
|
||||
require.NoError(t, c.AddCheckpointState(ðpb.Checkpoint{Epoch: primitives.Epoch(i), Root: make([]byte, 32)}, st))
|
||||
}
|
||||
|
||||
assert.Equal(t, maxCheckpointStateSize, len(c.cache.Keys()))
|
||||
assert.Equal(t, cache.MaxCheckpointStateSize(), len(c.Cache().Keys()))
|
||||
}
|
||||
|
||||
65
beacon-chain/cache/column_subnet_ids.go
vendored
Normal file
65
beacon-chain/cache/column_subnet_ids.go
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
|
||||
type columnSubnetIDs struct {
|
||||
colSubCache *cache.Cache
|
||||
colSubLock sync.RWMutex
|
||||
}
|
||||
|
||||
// ColumnSubnetIDs for column subnet participants
|
||||
var ColumnSubnetIDs = newColumnSubnetIDs()
|
||||
|
||||
const columnKey = "columns"
|
||||
|
||||
func newColumnSubnetIDs() *columnSubnetIDs {
|
||||
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
// Set the default duration of a column subnet subscription as the column expiry period.
|
||||
subLength := epochDuration * time.Duration(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
persistentCache := cache.New(subLength*time.Second, epochDuration*time.Second)
|
||||
return &columnSubnetIDs{colSubCache: persistentCache}
|
||||
}
|
||||
|
||||
// GetColumnSubnets retrieves the data column subnets.
|
||||
func (s *columnSubnetIDs) GetColumnSubnets() ([]uint64, bool, time.Time) {
|
||||
s.colSubLock.RLock()
|
||||
defer s.colSubLock.RUnlock()
|
||||
|
||||
id, duration, ok := s.colSubCache.GetWithExpiration(columnKey)
|
||||
if !ok {
|
||||
return nil, false, time.Time{}
|
||||
}
|
||||
// Retrieve indices from the cache.
|
||||
idxs, ok := id.([]uint64)
|
||||
if !ok {
|
||||
return nil, false, time.Time{}
|
||||
}
|
||||
|
||||
return idxs, ok, duration
|
||||
}
|
||||
|
||||
// AddColumnSubnets adds the relevant data column subnets.
|
||||
func (s *columnSubnetIDs) AddColumnSubnets(colIdx []uint64) {
|
||||
s.colSubLock.Lock()
|
||||
defer s.colSubLock.Unlock()
|
||||
|
||||
s.colSubCache.Set(columnKey, colIdx, 0)
|
||||
}
|
||||
|
||||
// EmptyAllCaches empties out all the related caches and flushes any stored
|
||||
// entries on them. This should only ever be used for testing, in normal
|
||||
// production, handling of the relevant subnets for each role is done
|
||||
// separately.
|
||||
func (s *columnSubnetIDs) EmptyAllCaches() {
|
||||
// Clear the cache.
|
||||
s.colSubLock.Lock()
|
||||
defer s.colSubLock.Unlock()
|
||||
|
||||
s.colSubCache.Flush()
|
||||
}
|
||||
34
beacon-chain/cache/committee.go
vendored
34
beacon-chain/cache/committee.go
vendored
@@ -17,12 +17,16 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
mathutil "github.com/prysmaticlabs/prysm/v5/math"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxCommitteesCacheSize defines the max number of shuffled committees on per randao basis can cache.
|
||||
// Due to reorgs and long finality, it's good to keep the old cache around for quickly switch over.
|
||||
maxCommitteesCacheSize = int(32)
|
||||
maxCommitteesCacheSize = int(4)
|
||||
// expandedCommitteeCacheSize defines the expanded size of the committee cache in the event we
|
||||
// do not have finality to deal with long forks better.
|
||||
expandedCommitteeCacheSize = int(32)
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -43,6 +47,7 @@ type CommitteeCache struct {
|
||||
CommitteeCache *lru.Cache
|
||||
lock sync.RWMutex
|
||||
inProgress map[string]bool
|
||||
size int
|
||||
}
|
||||
|
||||
// committeeKeyFn takes the seed as the key to retrieve shuffled indices of a committee in a given epoch.
|
||||
@@ -67,6 +72,33 @@ func (c *CommitteeCache) Clear() {
|
||||
defer c.lock.Unlock()
|
||||
c.CommitteeCache = lruwrpr.New(maxCommitteesCacheSize)
|
||||
c.inProgress = make(map[string]bool)
|
||||
c.size = maxCommitteesCacheSize
|
||||
}
|
||||
|
||||
// ExpandCommitteeCache expands the size of the committee cache.
|
||||
func (c *CommitteeCache) ExpandCommitteeCache() {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if c.size == expandedCommitteeCacheSize {
|
||||
return
|
||||
}
|
||||
c.CommitteeCache.Resize(expandedCommitteeCacheSize)
|
||||
c.size = expandedCommitteeCacheSize
|
||||
log.Warnf("Expanding committee cache size from %d to %d", maxCommitteesCacheSize, expandedCommitteeCacheSize)
|
||||
}
|
||||
|
||||
// CompressCommitteeCache compresses the size of the committee cache.
|
||||
func (c *CommitteeCache) CompressCommitteeCache() {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if c.size == maxCommitteesCacheSize {
|
||||
return
|
||||
}
|
||||
c.CommitteeCache.Resize(maxCommitteesCacheSize)
|
||||
c.size = maxCommitteesCacheSize
|
||||
log.Warnf("Reducing committee cache size from %d to %d", expandedCommitteeCacheSize, maxCommitteesCacheSize)
|
||||
}
|
||||
|
||||
// Committee fetches the shuffled indices by slot and committee index. Every list of indices
|
||||
|
||||
8
beacon-chain/cache/committee_disabled.go
vendored
8
beacon-chain/cache/committee_disabled.go
vendored
@@ -74,3 +74,11 @@ func (c *FakeCommitteeCache) MarkNotInProgress(seed [32]byte) error {
|
||||
func (c *FakeCommitteeCache) Clear() {
|
||||
return
|
||||
}
|
||||
|
||||
func (c *FakeCommitteeCache) ExpandCommitteeCache() {
|
||||
return
|
||||
}
|
||||
|
||||
func (c *FakeCommitteeCache) CompressCommitteeCache() {
|
||||
return
|
||||
}
|
||||
|
||||
50
beacon-chain/cache/depositcache/BUILD.bazel
vendored
50
beacon-chain/cache/depositcache/BUILD.bazel
vendored
@@ -1,50 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"deposits_cache.go",
|
||||
"log.go",
|
||||
"pending_deposits.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//testing/spectest:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"deposits_cache_test.go",
|
||||
"pending_deposits_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
327
beacon-chain/cache/depositcache/deposits_cache.go
vendored
327
beacon-chain/cache/depositcache/deposits_cache.go
vendored
@@ -1,327 +0,0 @@
|
||||
// Package depositcache is the source of validator deposits maintained
|
||||
// in-memory by the beacon node – deposits processed from the
|
||||
// eth1 powchain are then stored in this cache to be accessed by
|
||||
// any other service during a beacon node's runtime.
|
||||
package depositcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"math/big"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/trie"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var (
|
||||
historicalDepositsCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "beacondb_all_deposits",
|
||||
Help: "The number of total deposits in the beaconDB in-memory database",
|
||||
})
|
||||
)
|
||||
|
||||
// FinalizedDeposits stores the trie of deposits that have been included
|
||||
// in the beacon state up to the latest finalized checkpoint.
|
||||
type FinalizedDeposits struct {
|
||||
deposits *trie.SparseMerkleTrie
|
||||
merkleTrieIndex int64
|
||||
}
|
||||
|
||||
// DepositCache stores all in-memory deposit objects. This
|
||||
// stores all the deposit related data that is required by the beacon-node.
|
||||
type DepositCache struct {
|
||||
// Beacon chain deposits in memory.
|
||||
pendingDeposits []*ethpb.DepositContainer
|
||||
deposits []*ethpb.DepositContainer
|
||||
finalizedDeposits FinalizedDeposits
|
||||
depositsByKey map[[fieldparams.BLSPubkeyLength]byte][]*ethpb.DepositContainer
|
||||
depositsLock sync.RWMutex
|
||||
}
|
||||
|
||||
// New instantiates a new deposit cache
|
||||
func New() (*DepositCache, error) {
|
||||
finalizedDepositsTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// finalizedDeposits.merkleTrieIndex is initialized to -1 because it represents the index of the last trie item.
|
||||
// Inserting the first item into the trie will set the value of the index to 0.
|
||||
return &DepositCache{
|
||||
pendingDeposits: []*ethpb.DepositContainer{},
|
||||
deposits: []*ethpb.DepositContainer{},
|
||||
depositsByKey: map[[fieldparams.BLSPubkeyLength]byte][]*ethpb.DepositContainer{},
|
||||
finalizedDeposits: FinalizedDeposits{deposits: finalizedDepositsTrie, merkleTrieIndex: -1},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// InsertDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertDeposit")
|
||||
defer span.End()
|
||||
if d == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
"index": index,
|
||||
"deposit root": hex.EncodeToString(depositRoot[:]),
|
||||
}).Warn("Ignoring nil deposit insertion")
|
||||
return errors.New("nil deposit inserted into the cache")
|
||||
}
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
if int(index) != len(dc.deposits) {
|
||||
return errors.Errorf("wanted deposit with index %d to be inserted but received %d", len(dc.deposits), index)
|
||||
}
|
||||
// Keep the slice sorted on insertion in order to avoid costly sorting on retrieval.
|
||||
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Index >= index })
|
||||
depCtr := ðpb.DepositContainer{Deposit: d, Eth1BlockHeight: blockNum, DepositRoot: depositRoot[:], Index: index}
|
||||
newDeposits := append(
|
||||
[]*ethpb.DepositContainer{depCtr},
|
||||
dc.deposits[heightIdx:]...)
|
||||
dc.deposits = append(dc.deposits[:heightIdx], newDeposits...)
|
||||
// Append the deposit to our map, in the event no deposits
|
||||
// exist for the pubkey , it is simply added to the map.
|
||||
pubkey := bytesutil.ToBytes48(d.Data.PublicKey)
|
||||
dc.depositsByKey[pubkey] = append(dc.depositsByKey[pubkey], depCtr)
|
||||
historicalDepositsCount.Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertDepositContainers inserts a set of deposit containers into our deposit cache.
|
||||
func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*ethpb.DepositContainer) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertDepositContainers")
|
||||
defer span.End()
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
sort.SliceStable(ctrs, func(i int, j int) bool { return ctrs[i].Index < ctrs[j].Index })
|
||||
dc.deposits = ctrs
|
||||
for _, c := range ctrs {
|
||||
// Use a new value, as the reference
|
||||
// of c changes in the next iteration.
|
||||
newPtr := c
|
||||
pKey := bytesutil.ToBytes48(newPtr.Deposit.Data.PublicKey)
|
||||
dc.depositsByKey[pKey] = append(dc.depositsByKey[pKey], newPtr)
|
||||
}
|
||||
historicalDepositsCount.Add(float64(len(ctrs)))
|
||||
}
|
||||
|
||||
// InsertFinalizedDeposits inserts deposits up to eth1DepositIndex (inclusive) into the finalized deposits cache.
|
||||
func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context,
|
||||
eth1DepositIndex int64, _ common.Hash, _ uint64) error {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertFinalizedDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
depositTrie := dc.finalizedDeposits.Deposits()
|
||||
insertIndex := int(dc.finalizedDeposits.merkleTrieIndex + 1)
|
||||
|
||||
// Don't insert into finalized trie if there is no deposit to
|
||||
// insert.
|
||||
if len(dc.deposits) == 0 {
|
||||
return nil
|
||||
}
|
||||
// In the event we have less deposits than we need to
|
||||
// finalize we finalize till the index on which we do have it.
|
||||
if len(dc.deposits) <= int(eth1DepositIndex) {
|
||||
eth1DepositIndex = int64(len(dc.deposits)) - 1
|
||||
}
|
||||
// If we finalize to some lower deposit index, we
|
||||
// ignore it.
|
||||
if int(eth1DepositIndex) < insertIndex {
|
||||
return nil
|
||||
}
|
||||
for _, d := range dc.deposits {
|
||||
if d.Index <= dc.finalizedDeposits.merkleTrieIndex {
|
||||
continue
|
||||
}
|
||||
if d.Index > eth1DepositIndex {
|
||||
break
|
||||
}
|
||||
depHash, err := d.Deposit.Data.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not hash deposit data")
|
||||
}
|
||||
if err = depositTrie.Insert(depHash[:], insertIndex); err != nil {
|
||||
return errors.Wrap(err, "could not insert deposit hash")
|
||||
}
|
||||
insertIndex++
|
||||
}
|
||||
tree, ok := depositTrie.(*trie.SparseMerkleTrie)
|
||||
if !ok {
|
||||
return errors.New("not a sparse merkle tree")
|
||||
}
|
||||
dc.finalizedDeposits = FinalizedDeposits{
|
||||
deposits: tree,
|
||||
merkleTrieIndex: eth1DepositIndex,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AllDepositContainers returns all historical deposit containers.
|
||||
func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*ethpb.DepositContainer {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.AllDepositContainers")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
// Make a shallow copy of the deposits and return that. This way, the
|
||||
// caller can safely iterate over the returned list of deposits without
|
||||
// the possibility of new deposits showing up. If we were to return the
|
||||
// list without a copy, when a new deposit is added to the cache, it
|
||||
// would also be present in the returned value. This could result in a
|
||||
// race condition if the list is being iterated over.
|
||||
//
|
||||
// It's not necessary to make a deep copy of this list because the
|
||||
// deposits in the cache should never be modified. It is still possible
|
||||
// for the caller to modify one of the underlying deposits and modify
|
||||
// the cache, but that's not a race condition. Also, a deep copy would
|
||||
// take too long and use too much memory.
|
||||
deposits := make([]*ethpb.DepositContainer, len(dc.deposits))
|
||||
copy(deposits, dc.deposits)
|
||||
return deposits
|
||||
}
|
||||
|
||||
// AllDeposits returns a list of historical deposits until the given block number
|
||||
// (inclusive). If no block is specified then this method returns all historical deposits.
|
||||
func (dc *DepositCache) AllDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
return dc.allDeposits(untilBlk)
|
||||
}
|
||||
|
||||
func (dc *DepositCache) allDeposits(untilBlk *big.Int) []*ethpb.Deposit {
|
||||
var deposits []*ethpb.Deposit
|
||||
for _, ctnr := range dc.deposits {
|
||||
if untilBlk == nil || untilBlk.Uint64() >= ctnr.Eth1BlockHeight {
|
||||
deposits = append(deposits, ctnr.Deposit)
|
||||
}
|
||||
}
|
||||
return deposits
|
||||
}
|
||||
|
||||
// DepositsNumberAndRootAtHeight returns number of deposits made up to blockheight and the
|
||||
// root that corresponds to the latest deposit at that blockheight.
|
||||
func (dc *DepositCache) DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.DepositsNumberAndRootAtHeight")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Eth1BlockHeight > blockHeight.Uint64() })
|
||||
// send the deposit root of the empty trie, if eth1follow distance is greater than the time of the earliest
|
||||
// deposit.
|
||||
if heightIdx == 0 {
|
||||
return 0, [32]byte{}
|
||||
}
|
||||
return uint64(heightIdx), bytesutil.ToBytes32(dc.deposits[heightIdx-1].DepositRoot)
|
||||
}
|
||||
|
||||
// DepositByPubkey looks through historical deposits and finds one which contains
|
||||
// a certain public key within its deposit data.
|
||||
func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.DepositByPubkey")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
var deposit *ethpb.Deposit
|
||||
var blockNum *big.Int
|
||||
deps, ok := dc.depositsByKey[bytesutil.ToBytes48(pubKey)]
|
||||
if !ok || len(deps) == 0 {
|
||||
return deposit, blockNum
|
||||
}
|
||||
// We always return the first deposit if a particular
|
||||
// validator key has multiple deposits assigned to
|
||||
// it.
|
||||
deposit = deps[0].Deposit
|
||||
blockNum = big.NewInt(int64(deps[0].Eth1BlockHeight))
|
||||
return deposit, blockNum
|
||||
}
|
||||
|
||||
// FinalizedDeposits returns the finalized deposits trie.
|
||||
func (dc *DepositCache) FinalizedDeposits(ctx context.Context) (cache.FinalizedDeposits, error) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.FinalizedDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
return &FinalizedDeposits{
|
||||
deposits: dc.finalizedDeposits.deposits.Copy(),
|
||||
merkleTrieIndex: dc.finalizedDeposits.merkleTrieIndex,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NonFinalizedDeposits returns the list of non-finalized deposits until the given block number (inclusive).
|
||||
// If no block is specified then this method returns all non-finalized deposits.
|
||||
func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.NonFinalizedDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
if dc.finalizedDeposits.Deposits() == nil {
|
||||
return dc.allDeposits(untilBlk)
|
||||
}
|
||||
|
||||
var deposits []*ethpb.Deposit
|
||||
for _, d := range dc.deposits {
|
||||
if (d.Index > lastFinalizedIndex) && (untilBlk == nil || untilBlk.Uint64() >= d.Eth1BlockHeight) {
|
||||
deposits = append(deposits, d.Deposit)
|
||||
}
|
||||
}
|
||||
|
||||
return deposits
|
||||
}
|
||||
|
||||
// PruneProofs removes proofs from all deposits whose index is equal or less than untilDepositIndex.
|
||||
func (dc *DepositCache) PruneProofs(ctx context.Context, untilDepositIndex int64) error {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.PruneProofs")
|
||||
defer span.End()
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
if untilDepositIndex >= int64(len(dc.deposits)) {
|
||||
untilDepositIndex = int64(len(dc.deposits) - 1)
|
||||
}
|
||||
|
||||
for i := untilDepositIndex; i >= 0; i-- {
|
||||
// Finding a nil proof means that all proofs up to this deposit have been already pruned.
|
||||
if dc.deposits[i].Deposit.Proof == nil {
|
||||
break
|
||||
}
|
||||
dc.deposits[i].Deposit.Proof = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deposits returns the cached internal deposit tree.
|
||||
func (fd *FinalizedDeposits) Deposits() cache.MerkleTree {
|
||||
if fd.deposits != nil {
|
||||
return fd.deposits
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MerkleTrieIndex represents the last finalized index in
|
||||
// the finalized deposit container.
|
||||
func (fd *FinalizedDeposits) MerkleTrieIndex() int64 {
|
||||
return fd.merkleTrieIndex
|
||||
}
|
||||
1074
beacon-chain/cache/depositcache/deposits_cache_test.go
vendored
1074
beacon-chain/cache/depositcache/deposits_cache_test.go
vendored
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user