mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
210 Commits
move-netwo
...
hdiff-fuzz
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
60de0e5097 | ||
|
|
603aaa8f86 | ||
|
|
8c5f7cf1fa | ||
|
|
15c610fdbd | ||
|
|
c62395960a | ||
|
|
18efd620dc | ||
|
|
6139d58fa5 | ||
|
|
0ea5e2cf9d | ||
|
|
29fe707143 | ||
|
|
d68196822b | ||
|
|
924fe4de98 | ||
|
|
fe9dd255c7 | ||
|
|
83d75bcb78 | ||
|
|
ba5f7361ad | ||
|
|
8fa956036a | ||
|
|
58ce1c25f5 | ||
|
|
98532a2df3 | ||
|
|
08be6fde92 | ||
|
|
4585cdc932 | ||
|
|
aa47435c91 | ||
|
|
80eba4e6dd | ||
|
|
606294e17f | ||
|
|
977e923692 | ||
|
|
013b6b1d60 | ||
|
|
66ff6f70b8 | ||
|
|
5b1a9fb077 | ||
|
|
35bc9b1a0f | ||
|
|
02dca85251 | ||
|
|
39b2163702 | ||
|
|
d67ee62efa | ||
|
|
9f9401e615 | ||
|
|
6b89d839f6 | ||
|
|
d826a3c7fe | ||
|
|
900f162467 | ||
|
|
5266d34a22 | ||
|
|
c941e47b7e | ||
|
|
54991bbc52 | ||
|
|
7e32bbc199 | ||
|
|
a1be3d68cd | ||
|
|
88af3f90a5 | ||
|
|
600169a53b | ||
|
|
a5e4fccb47 | ||
|
|
e589588f47 | ||
|
|
41884d8d9d | ||
|
|
db074cbf12 | ||
|
|
360e89767f | ||
|
|
238d5c07df | ||
|
|
2292d955a3 | ||
|
|
76bc30e8ba | ||
|
|
a5c7c6da06 | ||
|
|
4b09dd4aa5 | ||
|
|
1dab5a9f8a | ||
|
|
d681232fe6 | ||
|
|
1d24f89c96 | ||
|
|
967193e6a2 | ||
|
|
9e40551852 | ||
|
|
a8cab58f7e | ||
|
|
df86f57507 | ||
|
|
9b0a3e9632 | ||
|
|
5e079aa62c | ||
|
|
5c68ec5c39 | ||
|
|
5410232bef | ||
|
|
3f5c4df7e0 | ||
|
|
5c348dff59 | ||
|
|
8136ff7c3a | ||
|
|
f690af81fa | ||
|
|
029b896c79 | ||
|
|
e1117a7de2 | ||
|
|
39b2a02f66 | ||
|
|
4e8a710b64 | ||
|
|
7191a5bcdf | ||
|
|
d335a52c49 | ||
|
|
c7401f5e75 | ||
|
|
0057cc57b5 | ||
|
|
b1dc5e485d | ||
|
|
f035da6fc5 | ||
|
|
854f4bc9a3 | ||
|
|
1933adedbf | ||
|
|
278b796e43 | ||
|
|
8e52d0c3c6 | ||
|
|
d339e09509 | ||
|
|
8ec460223c | ||
|
|
349d9d2fd0 | ||
|
|
e0aecb9c32 | ||
|
|
4a1ab70929 | ||
|
|
240cd1d058 | ||
|
|
26d8b6b786 | ||
|
|
92c359456e | ||
|
|
d48ed44c4c | ||
|
|
dbab624f3d | ||
|
|
83e9cc3dbb | ||
|
|
ee03c7cce2 | ||
|
|
c5135f6995 | ||
|
|
29aedac113 | ||
|
|
08fb3812b7 | ||
|
|
07738dd9a4 | ||
|
|
53df29b07f | ||
|
|
00cf1f2507 | ||
|
|
6528fb9cea | ||
|
|
5021131811 | ||
|
|
26cec9d9c7 | ||
|
|
4ed90a02ef | ||
|
|
7d528c75bb | ||
|
|
e7b2953d5a | ||
|
|
acf35e849e | ||
|
|
c826d334a1 | ||
|
|
eace128ee9 | ||
|
|
9161b80a32 | ||
|
|
009a1507a1 | ||
|
|
fa71a6e117 | ||
|
|
3da40ecd9c | ||
|
|
f7f992c256 | ||
|
|
09565e0c3a | ||
|
|
05a3736310 | ||
|
|
84c8653a52 | ||
|
|
921ff23c6b | ||
|
|
87235fb384 | ||
|
|
2ec5914b4a | ||
|
|
fe000e5629 | ||
|
|
447a3d8add | ||
|
|
b00aaef202 | ||
|
|
0f6070a866 | ||
|
|
2a09c9f681 | ||
|
|
9c6ccd67c1 | ||
|
|
36e5d4926b | ||
|
|
17b7d3ff12 | ||
|
|
fb2bceece8 | ||
|
|
d012ab653c | ||
|
|
46e7555c42 | ||
|
|
181df3995e | ||
|
|
149e220b61 | ||
|
|
ae4b982a6c | ||
|
|
f330021785 | ||
|
|
bd6b4ecd5b | ||
|
|
d7d8764a91 | ||
|
|
9b7f91d947 | ||
|
|
57e27199bd | ||
|
|
11ca766ed6 | ||
|
|
cd6cc76d58 | ||
|
|
fc4a1469f0 | ||
|
|
f3dc4c283e | ||
|
|
6ddf271688 | ||
|
|
af7afba26e | ||
|
|
b740a4ff83 | ||
|
|
385c2224e8 | ||
|
|
04b39d1a4d | ||
|
|
4c40caf7fd | ||
|
|
bc209cadab | ||
|
|
856742ff68 | ||
|
|
abe16a9cb4 | ||
|
|
77958022e7 | ||
|
|
c21fae239f | ||
|
|
deb3ba7f21 | ||
|
|
f288a3c0e1 | ||
|
|
a4ca6355d0 | ||
|
|
cd0821d026 | ||
|
|
8b53887891 | ||
|
|
8623a144d9 | ||
|
|
f3314d2d24 | ||
|
|
bcd65e7a4d | ||
|
|
dce89a1627 | ||
|
|
09485c2062 | ||
|
|
9e014da0b9 | ||
|
|
d8fedacc26 | ||
|
|
3def16caaa | ||
|
|
4e5bfa9760 | ||
|
|
70ac53f991 | ||
|
|
6f5ff03b42 | ||
|
|
499d27b6ae | ||
|
|
56e8881bc1 | ||
|
|
78f8411ad2 | ||
|
|
83943b5dd8 | ||
|
|
bc7e4f7751 | ||
|
|
02f8726e46 | ||
|
|
16b567f6af | ||
|
|
5c1d827335 | ||
|
|
68d7df0e4f | ||
|
|
288b33750d | ||
|
|
f4f48d6372 | ||
|
|
f2d57f0b5f | ||
|
|
7025e50a6c | ||
|
|
961ea05454 | ||
|
|
da5525096c | ||
|
|
2d2507b907 | ||
|
|
a701f07f3a | ||
|
|
f4bbe5ca40 | ||
|
|
4be8de2476 | ||
|
|
fac509a3e6 | ||
|
|
b1ac8209b2 | ||
|
|
74c9586c66 | ||
|
|
f0ad3dfaeb | ||
|
|
2540196747 | ||
|
|
f133751cce | ||
|
|
bddcc158e4 | ||
|
|
bc7664321b | ||
|
|
97f416b3a7 | ||
|
|
1c1e0f38bb | ||
|
|
121914d0d7 | ||
|
|
e8625cd89d | ||
|
|
667aaf1564 | ||
|
|
e020907d2a | ||
|
|
9927cea35a | ||
|
|
d4233471d2 | ||
|
|
d63ae69920 | ||
|
|
b9fd32dfff | ||
|
|
559d02bf4d | ||
|
|
62fec4d1f3 | ||
|
|
6a13ba9125 | ||
|
|
2a3876427f | ||
|
|
f8b88db1a4 |
1
.bazelrc
1
.bazelrc
@@ -34,6 +34,7 @@ build:minimal --@io_bazel_rules_go//go/config:tags=minimal
|
||||
build:release --compilation_mode=opt
|
||||
build:release --stamp
|
||||
build:release --define pgo_enabled=1
|
||||
build:release --strip=always
|
||||
|
||||
# Build binary with cgo symbolizer for debugging / profiling.
|
||||
build:cgo_symbolizer --copt=-g
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -1,6 +1,6 @@
|
||||
name: 🐞 Bug report
|
||||
description: Report a bug or problem with running Prysm
|
||||
labels: ["Bug"]
|
||||
type: "Bug"
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
|
||||
2
.github/actions/gomodtidy/Dockerfile
vendored
2
.github/actions/gomodtidy/Dockerfile
vendored
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.24-alpine
|
||||
FROM golang:1.25.1-alpine
|
||||
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
|
||||
43
.github/workflows/check-specrefs.yml
vendored
Normal file
43
.github/workflows/check-specrefs.yml
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
name: Check Spec References
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
check-specrefs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Check version consistency
|
||||
run: |
|
||||
WORKSPACE_VERSION=$(grep 'consensus_spec_version = ' WORKSPACE | sed 's/.*"\(.*\)"/\1/')
|
||||
ETHSPECIFY_VERSION=$(grep '^version:' specrefs/.ethspecify.yml | sed 's/version: //')
|
||||
if [ "$WORKSPACE_VERSION" != "$ETHSPECIFY_VERSION" ]; then
|
||||
echo "Version mismatch between WORKSPACE and ethspecify"
|
||||
echo " WORKSPACE: $WORKSPACE_VERSION"
|
||||
echo " specrefs/.ethspecify.yml: $ETHSPECIFY_VERSION"
|
||||
exit 1
|
||||
else
|
||||
echo "Versions match: $WORKSPACE_VERSION"
|
||||
fi
|
||||
|
||||
- name: Install ethspecify
|
||||
run: python3 -mpip install ethspecify
|
||||
|
||||
- name: Update spec references
|
||||
run: ethspecify process --path=specrefs
|
||||
|
||||
- name: Check for differences
|
||||
run: |
|
||||
if ! git diff --exit-code specrefs >/dev/null; then
|
||||
echo "Spec references are out-of-date!"
|
||||
echo ""
|
||||
git --no-pager diff specrefs
|
||||
exit 1
|
||||
else
|
||||
echo "Spec references are up-to-date!"
|
||||
fi
|
||||
|
||||
- name: Check spec references
|
||||
run: ethspecify check --path=specrefs
|
||||
4
.github/workflows/fuzz.yml
vendored
4
.github/workflows/fuzz.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.23.5'
|
||||
go-version: '1.25.1'
|
||||
- id: list
|
||||
uses: shogo82148/actions-go-fuzz/list@v0
|
||||
with:
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.23.5'
|
||||
go-version: '1.25.1'
|
||||
- uses: shogo82148/actions-go-fuzz/run@v0
|
||||
with:
|
||||
packages: ${{ matrix.package }}
|
||||
|
||||
22
.github/workflows/go.yml
vendored
22
.github/workflows/go.yml
vendored
@@ -31,7 +31,7 @@ jobs:
|
||||
- name: Set up Go 1.24
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.24.0'
|
||||
go-version: '1.25.1'
|
||||
- name: Run Gosec Security Scanner
|
||||
run: | # https://github.com/securego/gosec/issues/469
|
||||
export PATH=$PATH:$(go env GOPATH)/bin
|
||||
@@ -44,27 +44,27 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go 1.24
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.24.0'
|
||||
id: go
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go 1.25.1
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.25.1'
|
||||
|
||||
- name: Golangci-lint
|
||||
uses: golangci/golangci-lint-action@v5
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
with:
|
||||
version: v1.64.5
|
||||
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
|
||||
version: v2.4
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up Go 1.x
|
||||
- name: Set up Go 1.25.1
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.24.0'
|
||||
go-version: '1.25.1'
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
|
||||
121
.golangci.yml
121
.golangci.yml
@@ -1,90 +1,41 @@
|
||||
version: "2"
|
||||
run:
|
||||
timeout: 10m
|
||||
go: '1.23.5'
|
||||
|
||||
issues:
|
||||
exclude-files:
|
||||
- validator/web/site_data.go
|
||||
- .*_test.go
|
||||
exclude-dirs:
|
||||
- proto
|
||||
- tools/analyzers
|
||||
|
||||
go: 1.23.5
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
# Deprecated linters:
|
||||
enable:
|
||||
- errcheck
|
||||
- ineffassign
|
||||
- govet
|
||||
|
||||
# Disabled for now:
|
||||
- asasalint
|
||||
- bodyclose
|
||||
- containedctx
|
||||
- contextcheck
|
||||
- cyclop
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
- durationcheck
|
||||
- errname
|
||||
- err113
|
||||
- exhaustive
|
||||
- exhaustruct
|
||||
- forbidigo
|
||||
- forcetypeassert
|
||||
- funlen
|
||||
- gci
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- goconst
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- godot
|
||||
- godox
|
||||
- gofumpt
|
||||
- gomoddirectives
|
||||
- gosec
|
||||
- inamedparam
|
||||
- interfacebloat
|
||||
- intrange
|
||||
- ireturn
|
||||
- lll
|
||||
- maintidx
|
||||
- makezero
|
||||
- mnd
|
||||
- musttag
|
||||
- nakedret
|
||||
- nestif
|
||||
- nilnil
|
||||
- nlreturn
|
||||
- noctx
|
||||
- nolintlint
|
||||
- nonamedreturns
|
||||
- nosprintfhostport
|
||||
- perfsprint
|
||||
- prealloc
|
||||
- predeclared
|
||||
- promlinter
|
||||
- protogetter
|
||||
- recvcheck
|
||||
- revive
|
||||
- spancheck
|
||||
disable:
|
||||
- staticcheck
|
||||
- stylecheck
|
||||
- tagalign
|
||||
- tagliatelle
|
||||
- thelper
|
||||
- unparam
|
||||
- usetesting
|
||||
- varnamelen
|
||||
- wrapcheck
|
||||
- wsl
|
||||
- unused
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- validator/web/site_data.go
|
||||
- .*_test.go
|
||||
- proto
|
||||
- tools/analyzers
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
||||
linters-settings:
|
||||
gocognit:
|
||||
# TODO: We should target for < 50
|
||||
min-complexity: 65
|
||||
|
||||
output:
|
||||
print-issued-lines: true
|
||||
sort-results: true
|
||||
formatters:
|
||||
enable:
|
||||
- gofmt
|
||||
- goimports
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- validator/web/site_data.go
|
||||
- .*_test.go
|
||||
- proto
|
||||
- tools/analyzers
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
@@ -194,6 +194,7 @@ nogo(
|
||||
"//tools/analyzers/gocognit:go_default_library",
|
||||
"//tools/analyzers/ineffassign:go_default_library",
|
||||
"//tools/analyzers/interfacechecker:go_default_library",
|
||||
"//tools/analyzers/logcapitalization:go_default_library",
|
||||
"//tools/analyzers/logruswitherror:go_default_library",
|
||||
"//tools/analyzers/maligned:go_default_library",
|
||||
"//tools/analyzers/nop:go_default_library",
|
||||
|
||||
@@ -2993,7 +2993,7 @@ There are two known issues with this release:
|
||||
|
||||
### Added
|
||||
|
||||
- Web3Signer support. See the [documentation](https://docs.prylabs.network/docs/next/wallet/web3signer) for more
|
||||
- Web3Signer support. See the [documentation](https://prysm.offchainlabs.com/docs/manage-wallet/web3signer/) for more
|
||||
details.
|
||||
- Bellatrix support. See [kiln testnet instructions](https://hackmd.io/OqIoTiQvS9KOIataIFksBQ?view)
|
||||
- Weak subjectivity sync / checkpoint sync. This is an experimental feature and may have unintended side effects for
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
Prysm is go project with many complicated dependencies, including some c++ based libraries. There
|
||||
are two parts to Prysm's dependency management. Go modules and bazel managed dependencies. Be sure
|
||||
to read [Why Bazel?](https://github.com/OffchainLabs/documentation/issues/138) to fully
|
||||
to read [Why Bazel?](https://prysm.offchainlabs.com/docs/install-prysm/install-with-bazel/#why-bazel) to fully
|
||||
understand the reasoning behind an additional layer of build tooling via Bazel rather than a pure
|
||||
"go build" project.
|
||||
|
||||
|
||||
64
WORKSPACE
64
WORKSPACE
@@ -1,7 +1,7 @@
|
||||
workspace(name = "prysm")
|
||||
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "rules_pkg",
|
||||
@@ -16,8 +16,6 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
|
||||
|
||||
rules_pkg_dependencies()
|
||||
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "toolchains_protoc",
|
||||
sha256 = "abb1540f8a9e045422730670ebb2f25b41fa56ca5a7cf795175a110a0a68f4ad",
|
||||
@@ -160,15 +158,15 @@ oci_register_toolchains(
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_go",
|
||||
integrity = "sha256-JD8o94crTb2DFiJJR8nMAGdBAW95zIENB4cbI+JnrI4=",
|
||||
patch_args = ["-p1"],
|
||||
patches = [
|
||||
# Expose internals of go_test for custom build transitions.
|
||||
"//third_party:io_bazel_rules_go_test.patch",
|
||||
],
|
||||
strip_prefix = "rules_go-cf3c3af34bd869b864f5f2b98e2f41c2b220d6c9",
|
||||
sha256 = "a729c8ed2447c90fe140077689079ca0acfb7580ec41637f312d650ce9d93d96",
|
||||
urls = [
|
||||
"https://github.com/bazel-contrib/rules_go/archive/cf3c3af34bd869b864f5f2b98e2f41c2b220d6c9.tar.gz",
|
||||
"https://mirror.bazel.build/github.com/bazel-contrib/rules_go/releases/download/v0.57.0/rules_go-v0.57.0.zip",
|
||||
"https://github.com/bazel-contrib/rules_go/releases/download/v0.57.0/rules_go-v0.57.0.zip",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -192,7 +190,7 @@ load("@rules_oci//oci:pull.bzl", "oci_pull")
|
||||
# A multi-arch base image
|
||||
oci_pull(
|
||||
name = "linux_debian11_multiarch_base", # Debian bullseye
|
||||
digest = "sha256:b82f113425c5b5c714151aaacd8039bc141821cdcd3c65202d42bdf9c43ae60b", # 2023-12-12
|
||||
digest = "sha256:55a5e011b2c4246b4c51e01fcc2b452d151e03df052e357465f0392fcd59fddf",
|
||||
image = "gcr.io/prysmaticlabs/distroless/cc-debian11",
|
||||
platforms = [
|
||||
"linux/amd64",
|
||||
@@ -210,7 +208,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.24.0",
|
||||
go_version = "1.25.1",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
@@ -255,16 +253,16 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.6.0-alpha.0"
|
||||
consensus_spec_version = "v1.6.0-beta.0"
|
||||
|
||||
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
|
||||
|
||||
consensus_spec_tests(
|
||||
name = "consensus_spec_tests",
|
||||
flavors = {
|
||||
"general": "sha256-W7oKvoM0nAkyitykRxAw6kmCvjYC01IqiNJy0AmCnMM=",
|
||||
"minimal": "sha256-ig7/zxomjv6buBWMom4IxAJh3lFJ9+JnY44E7c8ZNP8=",
|
||||
"mainnet": "sha256-mjx+MkXtPhCNv4c4knLYLIkvIdpF7WTjx/ElvGPQzSo=",
|
||||
"general": "sha256-rT3jQp2+ZaDiO66gIQggetzqr+kGeexaLqEhbx4HDMY=",
|
||||
"minimal": "sha256-wowwwyvd0KJLsE+oDOtPkrhZyJndJpJ0lbXYsLH6XBw=",
|
||||
"mainnet": "sha256-4ZLrLNeO7NihZ4TuWH5V5fUhvW9Y3mAPBQDCqrfShps=",
|
||||
},
|
||||
version = consensus_spec_version,
|
||||
)
|
||||
@@ -280,7 +278,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-u0RkIZIeGttb3sInR31mO64aBSwxALqO5SYIPlqEvPo=",
|
||||
integrity = "sha256-sBe3Rx8zGq9IrvfgIhZQpYidGjy3mE1SiCb6/+pjLdY=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
@@ -302,22 +300,6 @@ filegroup(
|
||||
url = "https://github.com/ethereum/bls12-381-tests/releases/download/%s/bls_tests_yaml.tar.gz" % bls_test_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "eth2_networks",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "configs",
|
||||
srcs = glob([
|
||||
"shared/**/config.yaml",
|
||||
]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "77e7e3ed65e33b7bb19d30131f4c2bb39e4dfeb188ab9ae84651c3cc7600131d",
|
||||
strip_prefix = "eth2-networks-934c948e69205dcf2deb87e4ae6cc140c335f94d",
|
||||
url = "https://github.com/eth-clients/eth2-networks/archive/934c948e69205dcf2deb87e4ae6cc140c335f94d.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "holesky_testnet",
|
||||
build_file_content = """
|
||||
@@ -329,9 +311,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-YVFFrCmjoGZ3fXMWpsCpSsYbANy1grnqYwOLKIg2SsA=",
|
||||
strip_prefix = "holesky-32a72e21c6e53c262f27d50dd540cb654517d03a",
|
||||
url = "https://github.com/eth-clients/holesky/archive/32a72e21c6e53c262f27d50dd540cb654517d03a.tar.gz", # 2025-03-17
|
||||
integrity = "sha256-htyxg8Ln2o8eCiifFN7/hcHGZg8Ir9CPzCEx+FUnnCs=",
|
||||
strip_prefix = "holesky-8aec65f11f0c986d6b76b2eb902420635eb9b815",
|
||||
url = "https://github.com/eth-clients/holesky/archive/8aec65f11f0c986d6b76b2eb902420635eb9b815.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -361,9 +343,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-b5F7Wg9LLMqGRIpP2uqb/YsSFVn2ynzlV7g/Nb1EFLk=",
|
||||
strip_prefix = "sepolia-562d9938f08675e9ba490a1dfba21fb05843f39f",
|
||||
url = "https://github.com/eth-clients/sepolia/archive/562d9938f08675e9ba490a1dfba21fb05843f39f.tar.gz", # 2025-03-17
|
||||
integrity = "sha256-+UZgfvBcea0K0sbvAJZOz5ZNmxdWZYbohP38heUuc6w=",
|
||||
strip_prefix = "sepolia-f9158732adb1a2a6440613ad2232eb50e7384c4f",
|
||||
url = "https://github.com/eth-clients/sepolia/archive/f9158732adb1a2a6440613ad2232eb50e7384c4f.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -377,17 +359,17 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-dPiEWUd8QvbYGwGtIm0QtCekitVLOLsW5rpQIGzz8PU=",
|
||||
strip_prefix = "hoodi-828c2c940e1141092bd4bb979cef547ea926d272",
|
||||
url = "https://github.com/eth-clients/hoodi/archive/828c2c940e1141092bd4bb979cef547ea926d272.tar.gz",
|
||||
integrity = "sha256-G+4c9c/vci1OyPrQJnQCI+ZCv/E0cWN4hrHDY3i7ns0=",
|
||||
strip_prefix = "hoodi-b6ee51b2045a5e7fe3efac52534f75b080b049c6",
|
||||
url = "https://github.com/eth-clients/hoodi/archive/b6ee51b2045a5e7fe3efac52534f75b080b049c6.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "com_google_protobuf",
|
||||
sha256 = "9bd87b8280ef720d3240514f884e56a712f2218f0d693b48050c836028940a42",
|
||||
strip_prefix = "protobuf-25.1",
|
||||
sha256 = "7c3ebd7aaedd86fa5dc479a0fda803f602caaf78d8aff7ce83b89e1b8ae7442a",
|
||||
strip_prefix = "protobuf-28.3",
|
||||
urls = [
|
||||
"https://github.com/protocolbuffers/protobuf/archive/v25.1.tar.gz",
|
||||
"https://github.com/protocolbuffers/protobuf/archive/v28.3.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -2,18 +2,28 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["common.go"],
|
||||
srcs = [
|
||||
"common.go",
|
||||
"header.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/api/apiutil",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//consensus-types/primitives:go_default_library"],
|
||||
deps = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["common_test.go"],
|
||||
srcs = [
|
||||
"common_test.go",
|
||||
"header_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
122
api/apiutil/header.go
Normal file
122
api/apiutil/header.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package apiutil
|
||||
|
||||
import (
|
||||
"mime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type mediaRange struct {
|
||||
mt string // canonicalised media‑type, e.g. "application/json"
|
||||
q float64 // quality factor (0‑1)
|
||||
raw string // original string – useful for logging/debugging
|
||||
spec int // 2=exact, 1=type/*, 0=*/*
|
||||
}
|
||||
|
||||
func parseMediaRange(field string) (mediaRange, bool) {
|
||||
field = strings.TrimSpace(field)
|
||||
|
||||
mt, params, err := mime.ParseMediaType(field)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Failed to parse header field")
|
||||
return mediaRange{}, false
|
||||
}
|
||||
|
||||
r := mediaRange{mt: mt, q: 1, spec: 2, raw: field}
|
||||
|
||||
if qs, ok := params["q"]; ok {
|
||||
v, err := strconv.ParseFloat(qs, 64)
|
||||
if err != nil || v < 0 || v > 1 {
|
||||
log.WithField("q", qs).Debug("Invalid quality factor (0‑1)")
|
||||
return mediaRange{}, false // skip invalid entry
|
||||
}
|
||||
r.q = v
|
||||
}
|
||||
|
||||
switch {
|
||||
case mt == "*/*":
|
||||
r.spec = 0
|
||||
case strings.HasSuffix(mt, "/*"):
|
||||
r.spec = 1
|
||||
}
|
||||
return r, true
|
||||
}
|
||||
|
||||
func hasExplicitQ(r mediaRange) bool {
|
||||
return strings.Contains(strings.ToLower(r.raw), ";q=")
|
||||
}
|
||||
|
||||
// ParseAccept returns media ranges sorted by q (desc) then specificity.
|
||||
func ParseAccept(header string) []mediaRange {
|
||||
if header == "" {
|
||||
return []mediaRange{{mt: "*/*", q: 1, spec: 0, raw: "*/*"}}
|
||||
}
|
||||
|
||||
var out []mediaRange
|
||||
for _, field := range strings.Split(header, ",") {
|
||||
if r, ok := parseMediaRange(field); ok {
|
||||
out = append(out, r)
|
||||
}
|
||||
}
|
||||
|
||||
sort.SliceStable(out, func(i, j int) bool {
|
||||
ei, ej := hasExplicitQ(out[i]), hasExplicitQ(out[j])
|
||||
if ei != ej {
|
||||
return ei // explicit beats implicit
|
||||
}
|
||||
if out[i].q != out[j].q {
|
||||
return out[i].q > out[j].q
|
||||
}
|
||||
return out[i].spec > out[j].spec
|
||||
})
|
||||
return out
|
||||
}
|
||||
|
||||
// Matches reports whether content type is acceptable per the header.
|
||||
func Matches(header, ct string) bool {
|
||||
for _, r := range ParseAccept(header) {
|
||||
switch {
|
||||
case r.q == 0:
|
||||
continue
|
||||
case r.mt == "*/*":
|
||||
return true
|
||||
case strings.HasSuffix(r.mt, "/*"):
|
||||
if strings.HasPrefix(ct, r.mt[:len(r.mt)-1]) {
|
||||
return true
|
||||
}
|
||||
case r.mt == ct:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Negotiate selects the best server type according to the header.
|
||||
// Returns the chosen type and true, or "", false when nothing matches.
|
||||
func Negotiate(header string, serverTypes []string) (string, bool) {
|
||||
for _, r := range ParseAccept(header) {
|
||||
if r.q == 0 {
|
||||
continue
|
||||
}
|
||||
for _, s := range serverTypes {
|
||||
if Matches(r.mt, s) {
|
||||
return s, true
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// PrimaryAcceptMatches only checks if the first accept matches
|
||||
func PrimaryAcceptMatches(header, produced string) bool {
|
||||
for _, r := range ParseAccept(header) {
|
||||
if r.q == 0 {
|
||||
continue // explicitly unacceptable – skip
|
||||
}
|
||||
return Matches(r.mt, produced)
|
||||
}
|
||||
return false
|
||||
}
|
||||
174
api/apiutil/header_test.go
Normal file
174
api/apiutil/header_test.go
Normal file
@@ -0,0 +1,174 @@
|
||||
package apiutil
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestParseAccept(t *testing.T) {
|
||||
type want struct {
|
||||
mt string
|
||||
q float64
|
||||
spec int
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
header string
|
||||
want []want
|
||||
}{
|
||||
{
|
||||
name: "empty header becomes */*;q=1",
|
||||
header: "",
|
||||
want: []want{{mt: "*/*", q: 1, spec: 0}},
|
||||
},
|
||||
{
|
||||
name: "quality ordering then specificity",
|
||||
header: "application/json;q=0.2, */*;q=0.1, application/xml;q=0.5, text/*;q=0.5",
|
||||
want: []want{
|
||||
{mt: "application/xml", q: 0.5, spec: 2},
|
||||
{mt: "text/*", q: 0.5, spec: 1},
|
||||
{mt: "application/json", q: 0.2, spec: 2},
|
||||
{mt: "*/*", q: 0.1, spec: 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid pieces are skipped",
|
||||
header: "text/plain; q=boom, application/json",
|
||||
want: []want{{mt: "application/json", q: 1, spec: 2}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := ParseAccept(tc.header)
|
||||
gotProjected := make([]want, len(got))
|
||||
for i, g := range got {
|
||||
gotProjected[i] = want{mt: g.mt, q: g.q, spec: g.spec}
|
||||
}
|
||||
require.DeepEqual(t, gotProjected, tc.want)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatches(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
accept string
|
||||
ct string
|
||||
matches bool
|
||||
}{
|
||||
{"exact match", "application/json", "application/json", true},
|
||||
{"type wildcard", "application/*;q=0.8", "application/xml", true},
|
||||
{"global wildcard", "*/*;q=0.1", "image/png", true},
|
||||
{"explicitly unacceptable (q=0)", "text/*;q=0", "text/plain", false},
|
||||
{"no match", "image/png", "application/json", false},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := Matches(tc.accept, tc.ct)
|
||||
require.Equal(t, tc.matches, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNegotiate(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
accept string
|
||||
serverTypes []string
|
||||
wantType string
|
||||
ok bool
|
||||
}{
|
||||
{
|
||||
name: "highest quality wins",
|
||||
accept: "application/json;q=0.8,application/xml;q=0.9",
|
||||
serverTypes: []string{"application/json", "application/xml"},
|
||||
wantType: "application/xml",
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
name: "wildcard matches first server type",
|
||||
accept: "*/*;q=0.5",
|
||||
serverTypes: []string{"application/octet-stream", "application/json"},
|
||||
wantType: "application/octet-stream",
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
name: "no acceptable type",
|
||||
accept: "image/png",
|
||||
serverTypes: []string{"application/json"},
|
||||
wantType: "",
|
||||
ok: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got, ok := Negotiate(tc.accept, tc.serverTypes)
|
||||
require.Equal(t, tc.ok, ok)
|
||||
require.Equal(t, tc.wantType, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrimaryAcceptMatches(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
accept string
|
||||
produced string
|
||||
expect bool
|
||||
}{
|
||||
{
|
||||
name: "prefers json",
|
||||
accept: "application/json;q=0.9,application/xml",
|
||||
produced: "application/json",
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
name: "wildcard application beats other wildcard",
|
||||
accept: "application/*;q=0.2,*/*;q=0.1",
|
||||
produced: "application/xml",
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
name: "json wins",
|
||||
accept: "application/xml;q=0.8,application/json;q=0.9",
|
||||
produced: "application/json",
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
name: "json loses",
|
||||
accept: "application/xml;q=0.8,application/json;q=0.9,application/octet-stream;q=0.99",
|
||||
produced: "application/json",
|
||||
expect: false,
|
||||
},
|
||||
{
|
||||
name: "json wins with non q option",
|
||||
accept: "application/xml;q=0.8,image/png,application/json;q=0.9",
|
||||
produced: "application/json",
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
name: "json not primary",
|
||||
accept: "image/png,application/json",
|
||||
produced: "application/json",
|
||||
expect: false,
|
||||
},
|
||||
{
|
||||
name: "absent header",
|
||||
accept: "",
|
||||
produced: "text/plain",
|
||||
expect: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := PrimaryAcceptMatches(tc.accept, tc.produced)
|
||||
require.Equal(t, got, tc.expect)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -16,7 +16,6 @@ go_library(
|
||||
"//api/server/structs:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/client"
|
||||
@@ -17,7 +16,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
@@ -137,24 +135,6 @@ func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fo
|
||||
return fr.ToConsensus()
|
||||
}
|
||||
|
||||
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
|
||||
func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, error) {
|
||||
body, err := c.Get(ctx, getForkSchedulePath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error requesting fork schedule")
|
||||
}
|
||||
fsr := &forkScheduleResponse{}
|
||||
err = json.Unmarshal(body, fsr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ofs, err := fsr.OrderedForkSchedule()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("problem unmarshaling %s response", getForkSchedulePath))
|
||||
}
|
||||
return ofs, nil
|
||||
}
|
||||
|
||||
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
|
||||
func (c *Client) GetConfigSpec(ctx context.Context) (*structs.GetSpecResponse, error) {
|
||||
body, err := c.Get(ctx, getConfigSpecPath)
|
||||
@@ -334,31 +314,3 @@ func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*structs.BLSToEx
|
||||
}
|
||||
return poolResponse, nil
|
||||
}
|
||||
|
||||
type forkScheduleResponse struct {
|
||||
Data []structs.Fork
|
||||
}
|
||||
|
||||
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
|
||||
ofs := make(forks.OrderedSchedule, 0)
|
||||
for _, d := range fsr.Data {
|
||||
epoch, err := strconv.ParseUint(d.Epoch, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing epoch %s", d.Epoch)
|
||||
}
|
||||
vSlice, err := hexutil.Decode(d.CurrentVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(vSlice) != 4 {
|
||||
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(vSlice), d.CurrentVersion)
|
||||
}
|
||||
version := bytesutil.ToBytes4(vSlice)
|
||||
ofs = append(ofs, forks.ForkScheduleEntry{
|
||||
Version: version,
|
||||
Epoch: primitives.Epoch(epoch),
|
||||
})
|
||||
}
|
||||
sort.Sort(ofs)
|
||||
return ofs, nil
|
||||
}
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"health.go",
|
||||
"interfaces.go",
|
||||
"mock.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/api/client/beacon/health",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["@org_uber_go_mock//gomock:go_default_library"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["health_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["@org_uber_go_mock//gomock:go_default_library"],
|
||||
)
|
||||
@@ -1,58 +0,0 @@
|
||||
package health
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type NodeHealthTracker struct {
|
||||
isHealthy *bool
|
||||
healthChan chan bool
|
||||
node Node
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
func NewTracker(node Node) Tracker {
|
||||
return &NodeHealthTracker{
|
||||
node: node,
|
||||
healthChan: make(chan bool, 1),
|
||||
}
|
||||
}
|
||||
|
||||
// HealthUpdates provides a read-only channel for health updates.
|
||||
func (n *NodeHealthTracker) HealthUpdates() <-chan bool {
|
||||
return n.healthChan
|
||||
}
|
||||
|
||||
func (n *NodeHealthTracker) IsHealthy(_ context.Context) bool {
|
||||
n.RLock()
|
||||
defer n.RUnlock()
|
||||
if n.isHealthy == nil {
|
||||
return false
|
||||
}
|
||||
return *n.isHealthy
|
||||
}
|
||||
|
||||
func (n *NodeHealthTracker) CheckHealth(ctx context.Context) bool {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
|
||||
newStatus := n.node.IsHealthy(ctx)
|
||||
if n.isHealthy == nil {
|
||||
n.isHealthy = &newStatus
|
||||
}
|
||||
|
||||
isStatusChanged := newStatus != *n.isHealthy
|
||||
if isStatusChanged {
|
||||
// Update the health status
|
||||
n.isHealthy = &newStatus
|
||||
// Send the new status to the health channel, potentially overwriting the existing value
|
||||
select {
|
||||
case <-n.healthChan:
|
||||
n.healthChan <- newStatus
|
||||
default:
|
||||
n.healthChan <- newStatus
|
||||
}
|
||||
}
|
||||
return newStatus
|
||||
}
|
||||
@@ -1,111 +0,0 @@
|
||||
package health
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
func TestNodeHealth_IsHealthy(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
isHealthy bool
|
||||
want bool
|
||||
}{
|
||||
{"initially healthy", true, true},
|
||||
{"initially unhealthy", false, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
n := &NodeHealthTracker{
|
||||
isHealthy: &tt.isHealthy,
|
||||
healthChan: make(chan bool, 1),
|
||||
}
|
||||
if got := n.IsHealthy(context.Background()); got != tt.want {
|
||||
t.Errorf("IsHealthy() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeHealth_UpdateNodeHealth(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
initial bool // Initial health status
|
||||
newStatus bool // Status to update to
|
||||
shouldSend bool // Should a message be sent through the channel
|
||||
}{
|
||||
{"healthy to unhealthy", true, false, true},
|
||||
{"unhealthy to healthy", false, true, true},
|
||||
{"remain healthy", true, true, false},
|
||||
{"remain unhealthy", false, false, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
client := NewMockHealthClient(ctrl)
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(tt.newStatus)
|
||||
n := &NodeHealthTracker{
|
||||
isHealthy: &tt.initial,
|
||||
node: client,
|
||||
healthChan: make(chan bool, 1),
|
||||
}
|
||||
|
||||
s := n.CheckHealth(context.Background())
|
||||
// Check if health status was updated
|
||||
if s != tt.newStatus {
|
||||
t.Errorf("UpdateNodeHealth() failed to update isHealthy from %v to %v", tt.initial, tt.newStatus)
|
||||
}
|
||||
|
||||
select {
|
||||
case status := <-n.HealthUpdates():
|
||||
if !tt.shouldSend {
|
||||
t.Errorf("UpdateNodeHealth() unexpectedly sent status %v to HealthCh", status)
|
||||
} else if status != tt.newStatus {
|
||||
t.Errorf("UpdateNodeHealth() sent wrong status %v, want %v", status, tt.newStatus)
|
||||
}
|
||||
default:
|
||||
if tt.shouldSend {
|
||||
t.Error("UpdateNodeHealth() did not send any status to HealthCh when expected")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeHealth_Concurrency(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
client := NewMockHealthClient(ctrl)
|
||||
n := NewTracker(client)
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Number of goroutines to spawn for both reading and writing
|
||||
numGoroutines := 6
|
||||
|
||||
wg.Add(numGoroutines * 2) // for readers and writers
|
||||
|
||||
// Concurrently update health status
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(false).Times(1)
|
||||
n.CheckHealth(context.Background())
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(true).Times(1)
|
||||
n.CheckHealth(context.Background())
|
||||
}()
|
||||
}
|
||||
|
||||
// Concurrently read health status
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
_ = n.IsHealthy(context.Background()) // Just read the value
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait() // Wait for all goroutines to finish
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
package health
|
||||
|
||||
import "context"
|
||||
|
||||
type Tracker interface {
|
||||
HealthUpdates() <-chan bool
|
||||
CheckHealth(ctx context.Context) bool
|
||||
Node
|
||||
}
|
||||
|
||||
type Node interface {
|
||||
IsHealthy(ctx context.Context) bool
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
package health
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
var (
|
||||
_ = Node(&MockHealthClient{})
|
||||
)
|
||||
|
||||
// MockHealthClient is a mock of HealthClient interface.
|
||||
type MockHealthClient struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockHealthClientMockRecorder
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// MockHealthClientMockRecorder is the mock recorder for MockHealthClient.
|
||||
type MockHealthClientMockRecorder struct {
|
||||
mock *MockHealthClient
|
||||
}
|
||||
|
||||
// IsHealthy mocks base method.
|
||||
func (m *MockHealthClient) IsHealthy(arg0 context.Context) bool {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "IsHealthy", arg0)
|
||||
ret0, ok := ret[0].(bool)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return ret0
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockHealthClient) EXPECT() *MockHealthClientMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// IsHealthy indicates an expected call of IsHealthy.
|
||||
func (mr *MockHealthClientMockRecorder) IsHealthy(arg0 any) *gomock.Call {
|
||||
mr.mock.Lock()
|
||||
defer mr.mock.Unlock()
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsHealthy", reflect.TypeOf((*MockHealthClient)(nil).IsHealthy), arg0)
|
||||
}
|
||||
|
||||
// NewMockHealthClient creates a new mock instance.
|
||||
func NewMockHealthClient(ctrl *gomock.Controller) *MockHealthClient {
|
||||
mock := &MockHealthClient{ctrl: ctrl}
|
||||
mock.recorder = &MockHealthClientMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
@@ -50,6 +50,7 @@ go_test(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
|
||||
@@ -30,10 +30,11 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
getExecHeaderPath = "/eth/v1/builder/header/{{.Slot}}/{{.ParentHash}}/{{.Pubkey}}"
|
||||
getStatus = "/eth/v1/builder/status"
|
||||
postBlindedBeaconBlockPath = "/eth/v1/builder/blinded_blocks"
|
||||
postRegisterValidatorPath = "/eth/v1/builder/validators"
|
||||
getExecHeaderPath = "/eth/v1/builder/header/{{.Slot}}/{{.ParentHash}}/{{.Pubkey}}"
|
||||
getStatus = "/eth/v1/builder/status"
|
||||
postBlindedBeaconBlockPath = "/eth/v1/builder/blinded_blocks"
|
||||
postBlindedBeaconBlockV2Path = "/eth/v2/builder/blinded_blocks"
|
||||
postRegisterValidatorPath = "/eth/v1/builder/validators"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -72,7 +73,7 @@ func (*requestLogger) observe(r *http.Request) (e error) {
|
||||
log.WithFields(log.Fields{
|
||||
"bodyBase64": "(nil value)",
|
||||
"url": r.URL.String(),
|
||||
}).Info("builder http request")
|
||||
}).Info("Builder http request")
|
||||
return nil
|
||||
}
|
||||
t := io.TeeReader(r.Body, b)
|
||||
@@ -89,7 +90,7 @@ func (*requestLogger) observe(r *http.Request) (e error) {
|
||||
log.WithFields(log.Fields{
|
||||
"bodyBase64": string(body),
|
||||
"url": r.URL.String(),
|
||||
}).Info("builder http request")
|
||||
}).Info("Builder http request")
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -101,7 +102,8 @@ type BuilderClient interface {
|
||||
NodeURL() string
|
||||
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubkey [48]byte) (SignedBid, error)
|
||||
RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error
|
||||
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error)
|
||||
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error)
|
||||
SubmitBlindedBlockPostFulu(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) error
|
||||
Status(ctx context.Context) error
|
||||
}
|
||||
|
||||
@@ -152,7 +154,8 @@ func (c *Client) NodeURL() string {
|
||||
type reqOption func(*http.Request)
|
||||
|
||||
// do is a generic, opinionated request function to reduce boilerplate amongst the methods in this package api/client/builder.
|
||||
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, opts ...reqOption) (res []byte, header http.Header, err error) {
|
||||
// It validates that the HTTP response status matches the expectedStatus parameter.
|
||||
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, expectedStatus int, opts ...reqOption) (res []byte, header http.Header, err error) {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.client.do")
|
||||
defer func() {
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -187,8 +190,8 @@ func (c *Client) do(ctx context.Context, method string, path string, body io.Rea
|
||||
log.WithError(closeErr).Error("Failed to close response body")
|
||||
}
|
||||
}()
|
||||
if r.StatusCode != http.StatusOK {
|
||||
err = non200Err(r)
|
||||
if r.StatusCode != expectedStatus {
|
||||
err = unexpectedStatusErr(r, expectedStatus)
|
||||
return
|
||||
}
|
||||
res, err = io.ReadAll(io.LimitReader(r.Body, client.MaxBodySize))
|
||||
@@ -236,7 +239,7 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
|
||||
r.Header.Set("Accept", api.JsonMediaType)
|
||||
}
|
||||
}
|
||||
data, header, err := c.do(ctx, http.MethodGet, path, nil, getOpts)
|
||||
data, header, err := c.do(ctx, http.MethodGet, path, nil, http.StatusOK, getOpts)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error getting header from builder server")
|
||||
}
|
||||
@@ -409,7 +412,7 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
|
||||
}
|
||||
}
|
||||
|
||||
if _, _, err = c.do(ctx, http.MethodPost, postRegisterValidatorPath, bytes.NewBuffer(body), postOpts); err != nil {
|
||||
if _, _, err = c.do(ctx, http.MethodPost, postRegisterValidatorPath, bytes.NewBuffer(body), http.StatusOK, postOpts); err != nil {
|
||||
return errors.Wrap(err, "do")
|
||||
}
|
||||
log.WithField("registrationCount", len(svr)).Debug("Successfully registered validator(s) on builder")
|
||||
@@ -446,6 +449,9 @@ func sszValidatorRegisterRequest(svr []*ethpb.SignedValidatorRegistrationV1) ([]
|
||||
var errResponseVersionMismatch = errors.New("builder API response uses a different version than requested in " + api.VersionHeader + " header")
|
||||
|
||||
func getVersionsBlockToPayload(blockVersion int) (int, error) {
|
||||
if blockVersion >= version.Fulu {
|
||||
return version.Fulu, nil
|
||||
}
|
||||
if blockVersion >= version.Deneb {
|
||||
return version.Deneb, nil
|
||||
}
|
||||
@@ -460,7 +466,7 @@ func getVersionsBlockToPayload(blockVersion int) (int, error) {
|
||||
|
||||
// SubmitBlindedBlock calls the builder API endpoint that binds the validator to the builder and submits the block.
|
||||
// The response is the full execution payload used to create the blinded block.
|
||||
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error) {
|
||||
body, postOpts, err := c.buildBlindedBlockRequest(sb)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -468,7 +474,7 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
|
||||
// post the blinded block - the execution payload response should contain the unblinded payload, along with the
|
||||
// blobs bundle if it is post deneb.
|
||||
data, header, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), postOpts)
|
||||
data, header, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), http.StatusOK, postOpts)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error posting the blinded block to the builder api")
|
||||
}
|
||||
@@ -498,6 +504,24 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
return ed, blobs, nil
|
||||
}
|
||||
|
||||
// SubmitBlindedBlockPostFulu calls the builder API endpoint post-Fulu where relays only return status codes.
|
||||
// This method is used after the Fulu fork when MEV-boost relays no longer return execution payloads.
|
||||
func (c *Client) SubmitBlindedBlockPostFulu(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
body, postOpts, err := c.buildBlindedBlockRequest(sb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Post the blinded block - the response should only contain a status code (no payload)
|
||||
_, _, err = c.do(ctx, http.MethodPost, postBlindedBeaconBlockV2Path, bytes.NewBuffer(body), http.StatusAccepted, postOpts)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error posting the blinded block to the builder api post-Fulu")
|
||||
}
|
||||
|
||||
// Success is indicated by no error (status 202)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) checkBlockVersion(respBytes []byte, header http.Header) (int, error) {
|
||||
var versionHeader string
|
||||
if c.sszEnabled {
|
||||
@@ -558,7 +582,7 @@ func (c *Client) buildBlindedBlockRequest(sb interfaces.ReadOnlySignedBeaconBloc
|
||||
func (c *Client) parseBlindedBlockResponse(
|
||||
respBytes []byte,
|
||||
forkVersion int,
|
||||
) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
) (interfaces.ExecutionData, v1.BlobsBundler, error) {
|
||||
if c.sszEnabled {
|
||||
return c.parseBlindedBlockResponseSSZ(respBytes, forkVersion)
|
||||
}
|
||||
@@ -568,8 +592,18 @@ func (c *Client) parseBlindedBlockResponse(
|
||||
func (c *Client) parseBlindedBlockResponseSSZ(
|
||||
respBytes []byte,
|
||||
forkVersion int,
|
||||
) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
if forkVersion >= version.Deneb {
|
||||
) (interfaces.ExecutionData, v1.BlobsBundler, error) {
|
||||
if forkVersion >= version.Fulu {
|
||||
payloadAndBlobs := &v1.ExecutionPayloadDenebAndBlobsBundleV2{}
|
||||
if err := payloadAndBlobs.UnmarshalSSZ(respBytes); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "unable to unmarshal ExecutionPayloadDenebAndBlobsBundleV2 SSZ")
|
||||
}
|
||||
ed, err := blocks.NewWrappedExecutionData(payloadAndBlobs.Payload)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "unable to wrap execution data for %s", version.String(forkVersion))
|
||||
}
|
||||
return ed, payloadAndBlobs.BlobsBundle, nil
|
||||
} else if forkVersion >= version.Deneb {
|
||||
payloadAndBlobs := &v1.ExecutionPayloadDenebAndBlobsBundle{}
|
||||
if err := payloadAndBlobs.UnmarshalSSZ(respBytes); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "unable to unmarshal ExecutionPayloadDenebAndBlobsBundle SSZ")
|
||||
@@ -644,11 +678,11 @@ func (c *Client) Status(ctx context.Context) error {
|
||||
getOpts := func(r *http.Request) {
|
||||
r.Header.Set("Accept", api.JsonMediaType)
|
||||
}
|
||||
_, _, err := c.do(ctx, http.MethodGet, getStatus, nil, getOpts)
|
||||
_, _, err := c.do(ctx, http.MethodGet, getStatus, nil, http.StatusOK, getOpts)
|
||||
return err
|
||||
}
|
||||
|
||||
func non200Err(response *http.Response) error {
|
||||
func unexpectedStatusErr(response *http.Response, expected int) error {
|
||||
bodyBytes, err := io.ReadAll(io.LimitReader(response.Body, client.MaxErrBodySize))
|
||||
var errMessage ErrorMessage
|
||||
var body string
|
||||
@@ -657,7 +691,7 @@ func non200Err(response *http.Response) error {
|
||||
} else {
|
||||
body = "response body:\n" + string(bodyBytes)
|
||||
}
|
||||
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
|
||||
msg := fmt.Sprintf("expected=%d, got=%d, url=%s, body=%s", expected, response.StatusCode, response.Request.URL, body)
|
||||
switch response.StatusCode {
|
||||
case http.StatusUnsupportedMediaType:
|
||||
log.WithError(ErrUnsupportedMediaType).Debug(msg)
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
v1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
@@ -31,7 +32,7 @@ func (fn roundtrip) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
}
|
||||
|
||||
func TestClient_Status(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
statusPath := "/eth/v1/builder/status"
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
@@ -84,7 +85,7 @@ func TestClient_Status(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestClient_RegisterValidator(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
expectedBody := `[{"message":{"fee_recipient":"0x0000000000000000000000000000000000000000","gas_limit":"23","timestamp":"42","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}]`
|
||||
expectedPath := "/eth/v1/builder/validators"
|
||||
t.Run("JSON success", func(t *testing.T) {
|
||||
@@ -168,7 +169,7 @@ func TestClient_RegisterValidator(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestClient_GetHeader(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
expectedPath := "/eth/v1/builder/header/23/0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2/0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
var slot primitives.Slot = 23
|
||||
parentHash := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
@@ -601,7 +602,7 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSubmitBlindedBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("bellatrix", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
@@ -1554,12 +1555,95 @@ func testSignedBlindedBeaconBlockElectra(t *testing.T) *eth.SignedBlindedBeaconB
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubmitBlindedBlockPostFulu(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("success", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockV2Path, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
|
||||
// Post-Fulu: only return status code, no payload
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusAccepted,
|
||||
Body: io.NopCloser(bytes.NewBufferString("")),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
|
||||
require.NoError(t, err)
|
||||
err = c.SubmitBlindedBlockPostFulu(ctx, sbbb)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("success_ssz", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockV2Path, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get(api.VersionHeader))
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
|
||||
// Post-Fulu: only return status code, no payload
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusAccepted,
|
||||
Body: io.NopCloser(bytes.NewBufferString("")),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
sszEnabled: true,
|
||||
}
|
||||
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
|
||||
require.NoError(t, err)
|
||||
err = c.SubmitBlindedBlockPostFulu(ctx, sbbb)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("error_response", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockV2Path, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
|
||||
message := ErrorMessage{
|
||||
Code: 400,
|
||||
Message: "Bad Request",
|
||||
}
|
||||
resp, err := json.Marshal(message)
|
||||
require.NoError(t, err)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Body: io.NopCloser(bytes.NewBuffer(resp)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
|
||||
require.NoError(t, err)
|
||||
err = c.SubmitBlindedBlockPostFulu(ctx, sbbb)
|
||||
require.ErrorIs(t, err, ErrNotOK)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRequestLogger(t *testing.T) {
|
||||
wo := WithObserver(&requestLogger{})
|
||||
c, err := NewClient("localhost:3500", wo)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, getStatus, r.URL.Path)
|
||||
@@ -1574,3 +1658,166 @@ func TestRequestLogger(t *testing.T) {
|
||||
err = c.Status(ctx)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGetVersionsBlockToPayload(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
blockVersion int
|
||||
expectedVersion int
|
||||
expectedError bool
|
||||
}{
|
||||
{
|
||||
name: "Fulu version",
|
||||
blockVersion: 6, // version.Fulu
|
||||
expectedVersion: 6,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "Deneb version",
|
||||
blockVersion: 4, // version.Deneb
|
||||
expectedVersion: 4,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "Capella version",
|
||||
blockVersion: 3, // version.Capella
|
||||
expectedVersion: 3,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "Bellatrix version",
|
||||
blockVersion: 2, // version.Bellatrix
|
||||
expectedVersion: 2,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "Unsupported version",
|
||||
blockVersion: 0,
|
||||
expectedError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
version, err := getVersionsBlockToPayload(tt.blockVersion)
|
||||
if tt.expectedError {
|
||||
assert.NotNil(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.expectedVersion, version)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseBlindedBlockResponseSSZ_WithBlobsBundleV2(t *testing.T) {
|
||||
c := &Client{sszEnabled: true}
|
||||
|
||||
// Create test payload
|
||||
payload := &v1.ExecutionPayloadDeneb{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
BlockNumber: 123456,
|
||||
GasLimit: 30000000,
|
||||
GasUsed: 21000,
|
||||
Timestamp: 1234567890,
|
||||
ExtraData: []byte("test-extra-data"),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
Transactions: [][]byte{},
|
||||
Withdrawals: []*v1.Withdrawal{},
|
||||
BlobGasUsed: 1024,
|
||||
ExcessBlobGas: 2048,
|
||||
}
|
||||
|
||||
// Create test BlobsBundleV2
|
||||
bundleV2 := &v1.BlobsBundleV2{
|
||||
KzgCommitments: [][]byte{make([]byte, 48), make([]byte, 48)},
|
||||
Proofs: [][]byte{make([]byte, 48), make([]byte, 48)},
|
||||
Blobs: [][]byte{make([]byte, 131072), make([]byte, 131072)},
|
||||
}
|
||||
|
||||
// Test Fulu version (should use ExecutionPayloadDenebAndBlobsBundleV2)
|
||||
t.Run("Fulu version with BlobsBundleV2", func(t *testing.T) {
|
||||
payloadAndBlobsV2 := &v1.ExecutionPayloadDenebAndBlobsBundleV2{
|
||||
Payload: payload,
|
||||
BlobsBundle: bundleV2,
|
||||
}
|
||||
|
||||
respBytes, err := payloadAndBlobsV2.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
ed, bundle, err := c.parseBlindedBlockResponseSSZ(respBytes, 6) // version.Fulu
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, ed)
|
||||
require.NotNil(t, bundle)
|
||||
|
||||
// Verify the bundle is BlobsBundleV2
|
||||
bundleV2Result, ok := bundle.(*v1.BlobsBundleV2)
|
||||
assert.Equal(t, true, ok, "Expected BlobsBundleV2 type")
|
||||
require.Equal(t, len(bundleV2.KzgCommitments), len(bundleV2Result.KzgCommitments))
|
||||
require.Equal(t, len(bundleV2.Proofs), len(bundleV2Result.Proofs))
|
||||
require.Equal(t, len(bundleV2.Blobs), len(bundleV2Result.Blobs))
|
||||
})
|
||||
|
||||
// Test Deneb version (should use regular BlobsBundle)
|
||||
t.Run("Deneb version with regular BlobsBundle", func(t *testing.T) {
|
||||
regularBundle := &v1.BlobsBundle{
|
||||
KzgCommitments: bundleV2.KzgCommitments,
|
||||
Proofs: bundleV2.Proofs,
|
||||
Blobs: bundleV2.Blobs,
|
||||
}
|
||||
|
||||
payloadAndBlobs := &v1.ExecutionPayloadDenebAndBlobsBundle{
|
||||
Payload: payload,
|
||||
BlobsBundle: regularBundle,
|
||||
}
|
||||
|
||||
respBytes, err := payloadAndBlobs.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
ed, bundle, err := c.parseBlindedBlockResponseSSZ(respBytes, 4) // version.Deneb
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, ed)
|
||||
require.NotNil(t, bundle)
|
||||
|
||||
// Verify the bundle is regular BlobsBundle
|
||||
regularBundleResult, ok := bundle.(*v1.BlobsBundle)
|
||||
assert.Equal(t, true, ok, "Expected BlobsBundle type")
|
||||
require.Equal(t, len(regularBundle.KzgCommitments), len(regularBundleResult.KzgCommitments))
|
||||
})
|
||||
|
||||
// Test invalid SSZ data
|
||||
t.Run("Invalid SSZ data", func(t *testing.T) {
|
||||
invalidBytes := []byte("invalid-ssz-data")
|
||||
|
||||
ed, bundle, err := c.parseBlindedBlockResponseSSZ(invalidBytes, 6)
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, true, ed == nil)
|
||||
assert.Equal(t, true, bundle == nil)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSubmitBlindedBlock_BlobsBundlerInterface(t *testing.T) {
|
||||
// Note: The full integration test is complex due to version detection logic
|
||||
// The key functionality is tested in the parseBlindedBlockResponseSSZ tests above
|
||||
// and in the mock service tests which verify the interface changes work correctly
|
||||
|
||||
t.Run("Interface signature verification", func(t *testing.T) {
|
||||
// This test verifies that the SubmitBlindedBlock method signature
|
||||
// has been updated to return BlobsBundler interface
|
||||
|
||||
client := &Client{}
|
||||
|
||||
// Verify the method exists with the correct signature
|
||||
// by using reflection or by checking it compiles with the interface
|
||||
var _ func(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error) = client.SubmitBlindedBlock
|
||||
|
||||
// This test passes if the signature is correct
|
||||
assert.Equal(t, true, true)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -41,10 +41,15 @@ func (m MockClient) RegisterValidator(_ context.Context, svr []*ethpb.SignedVali
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock --
|
||||
func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// SubmitBlindedBlockPostFulu --
|
||||
func (MockClient) SubmitBlindedBlockPostFulu(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Status --
|
||||
func (MockClient) Status(_ context.Context) error {
|
||||
return nil
|
||||
|
||||
@@ -1699,7 +1699,7 @@ func TestExecutionPayloadHeaderCapellaRoundtrip(t *testing.T) {
|
||||
require.DeepEqual(t, string(expected[0:len(expected)-1]), string(m))
|
||||
}
|
||||
|
||||
func TestErrorMessage_non200Err(t *testing.T) {
|
||||
func TestErrorMessage_unexpectedStatusErr(t *testing.T) {
|
||||
mockRequest := &http.Request{
|
||||
URL: &url.URL{Path: "example.com"},
|
||||
}
|
||||
@@ -1779,7 +1779,7 @@ func TestErrorMessage_non200Err(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := non200Err(tt.args)
|
||||
err := unexpectedStatusErr(tt.args, http.StatusOK)
|
||||
if err != nil && tt.wantMessage != "" {
|
||||
require.ErrorContains(t, tt.wantMessage, err)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package event
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@@ -30,7 +29,7 @@ func TestNewEventStream(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := NewEventStream(context.Background(), &http.Client{}, tt.host, tt.topics)
|
||||
_, err := NewEventStream(t.Context(), &http.Client{}, tt.host, tt.topics)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("NewEventStream() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
@@ -56,7 +55,7 @@ func TestEventStream(t *testing.T) {
|
||||
|
||||
topics := []string{"head"}
|
||||
eventsChannel := make(chan *Event, 1)
|
||||
stream, err := NewEventStream(context.Background(), http.DefaultClient, server.URL, topics)
|
||||
stream, err := NewEventStream(t.Context(), http.DefaultClient, server.URL, topics)
|
||||
require.NoError(t, err)
|
||||
go stream.Subscribe(eventsChannel)
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -16,7 +15,7 @@ type customErrorData struct {
|
||||
|
||||
func TestAppendHeaders(t *testing.T) {
|
||||
t.Run("one_header", func(t *testing.T) {
|
||||
ctx := AppendHeaders(context.Background(), []string{"first=value1"})
|
||||
ctx := AppendHeaders(t.Context(), []string{"first=value1"})
|
||||
md, ok := metadata.FromOutgoingContext(ctx)
|
||||
require.Equal(t, true, ok, "Failed to read context metadata")
|
||||
require.Equal(t, 1, md.Len(), "MetadataV0 contains wrong number of values")
|
||||
@@ -24,7 +23,7 @@ func TestAppendHeaders(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("multiple_headers", func(t *testing.T) {
|
||||
ctx := AppendHeaders(context.Background(), []string{"first=value1", "second=value2"})
|
||||
ctx := AppendHeaders(t.Context(), []string{"first=value1", "second=value2"})
|
||||
md, ok := metadata.FromOutgoingContext(ctx)
|
||||
require.Equal(t, true, ok, "Failed to read context metadata")
|
||||
require.Equal(t, 2, md.Len(), "MetadataV0 contains wrong number of values")
|
||||
@@ -33,7 +32,7 @@ func TestAppendHeaders(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("one_empty_header", func(t *testing.T) {
|
||||
ctx := AppendHeaders(context.Background(), []string{"first=value1", ""})
|
||||
ctx := AppendHeaders(t.Context(), []string{"first=value1", ""})
|
||||
md, ok := metadata.FromOutgoingContext(ctx)
|
||||
require.Equal(t, true, ok, "Failed to read context metadata")
|
||||
require.Equal(t, 1, md.Len(), "MetadataV0 contains wrong number of values")
|
||||
@@ -42,7 +41,7 @@ func TestAppendHeaders(t *testing.T) {
|
||||
|
||||
t.Run("incorrect_header", func(t *testing.T) {
|
||||
logHook := logTest.NewGlobal()
|
||||
ctx := AppendHeaders(context.Background(), []string{"first=value1", "second"})
|
||||
ctx := AppendHeaders(t.Context(), []string{"first=value1", "second"})
|
||||
md, ok := metadata.FromOutgoingContext(ctx)
|
||||
require.Equal(t, true, ok, "Failed to read context metadata")
|
||||
require.Equal(t, 1, md.Len(), "MetadataV0 contains wrong number of values")
|
||||
@@ -51,7 +50,7 @@ func TestAppendHeaders(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("header_value_with_equal_sign", func(t *testing.T) {
|
||||
ctx := AppendHeaders(context.Background(), []string{"first=value=1"})
|
||||
ctx := AppendHeaders(t.Context(), []string{"first=value=1"})
|
||||
md, ok := metadata.FromOutgoingContext(ctx)
|
||||
require.Equal(t, true, ok, "Failed to read context metadata")
|
||||
require.Equal(t, 1, md.Len(), "MetadataV0 contains wrong number of values")
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
package httprest
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/middleware"
|
||||
)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package httprest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net"
|
||||
@@ -34,7 +33,7 @@ func TestServer_StartStop(t *testing.T) {
|
||||
WithRouter(handler),
|
||||
}
|
||||
|
||||
g, err := New(context.Background(), opts...)
|
||||
g, err := New(t.Context(), opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
g.Start()
|
||||
@@ -62,7 +61,7 @@ func TestServer_NilHandler_NotFoundHandlerRegistered(t *testing.T) {
|
||||
WithRouter(handler),
|
||||
}
|
||||
|
||||
g, err := New(context.Background(), opts...)
|
||||
g, err := New(t.Context(), opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
@@ -8,7 +8,12 @@ go_library(
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/api/server/middleware",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["@com_github_rs_cors//:go_default_library"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/apiutil:go_default_library",
|
||||
"@com_github_rs_cors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
@@ -22,5 +27,6 @@ go_test(
|
||||
"//api:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/apiutil"
|
||||
"github.com/rs/cors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Middleware func(http.Handler) http.Handler
|
||||
@@ -71,47 +75,64 @@ func ContentTypeHandler(acceptedMediaTypes []string) Middleware {
|
||||
func AcceptHeaderHandler(serverAcceptedTypes []string) Middleware {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
acceptHeader := r.Header.Get("Accept")
|
||||
// header is optional and should skip if not provided
|
||||
if acceptHeader == "" {
|
||||
if _, ok := apiutil.Negotiate(r.Header.Get("Accept"), serverAcceptedTypes); !ok {
|
||||
http.Error(w, "Not Acceptable", http.StatusNotAcceptable)
|
||||
return
|
||||
}
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// AcceptEncodingHeaderHandler compresses the response before sending it back to the client, if gzip is supported.
|
||||
func AcceptEncodingHeaderHandler() Middleware {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
accepted := false
|
||||
acceptTypes := strings.Split(acceptHeader, ",")
|
||||
// follows rules defined in https://datatracker.ietf.org/doc/html/rfc2616#section-14.1
|
||||
for _, acceptType := range acceptTypes {
|
||||
acceptType = strings.TrimSpace(acceptType)
|
||||
if acceptType == "*/*" {
|
||||
accepted = true
|
||||
break
|
||||
gz := gzip.NewWriter(w)
|
||||
gzipRW := &gzipResponseWriter{gz: gz, ResponseWriter: w}
|
||||
defer func() {
|
||||
if !gzipRW.zip {
|
||||
return
|
||||
}
|
||||
for _, serverAcceptedType := range serverAcceptedTypes {
|
||||
if strings.HasPrefix(acceptType, serverAcceptedType) {
|
||||
accepted = true
|
||||
break
|
||||
}
|
||||
if acceptType != "/*" && strings.HasSuffix(acceptType, "/*") && strings.HasPrefix(serverAcceptedType, acceptType[:len(acceptType)-2]) {
|
||||
accepted = true
|
||||
break
|
||||
}
|
||||
if err := gz.Close(); err != nil {
|
||||
log.WithError(err).Error("Failed to close gzip writer")
|
||||
}
|
||||
if accepted {
|
||||
break
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if !accepted {
|
||||
http.Error(w, fmt.Sprintf("Not Acceptable: %s", acceptHeader), http.StatusNotAcceptable)
|
||||
return
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
next.ServeHTTP(gzipRW, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type gzipResponseWriter struct {
|
||||
gz *gzip.Writer
|
||||
http.ResponseWriter
|
||||
zip bool
|
||||
}
|
||||
|
||||
func (g *gzipResponseWriter) WriteHeader(statusCode int) {
|
||||
if strings.Contains(g.Header().Get("Content-Type"), api.JsonMediaType) {
|
||||
// Removing the current Content-Length because zipping will change it.
|
||||
g.Header().Del("Content-Length")
|
||||
g.Header().Set("Content-Encoding", "gzip")
|
||||
g.zip = true
|
||||
}
|
||||
|
||||
g.ResponseWriter.WriteHeader(statusCode)
|
||||
}
|
||||
|
||||
func (g *gzipResponseWriter) Write(b []byte) (int, error) {
|
||||
if g.zip {
|
||||
return g.gz.Write(b)
|
||||
}
|
||||
return g.ResponseWriter.Write(b)
|
||||
}
|
||||
|
||||
func MiddlewareChain(h http.Handler, mw []Middleware) http.Handler {
|
||||
if len(mw) < 1 {
|
||||
return h
|
||||
|
||||
@@ -1,14 +1,35 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// frozenHeaderRecorder allows asserting that response headers were not modified
|
||||
// after the call to WriteHeader.
|
||||
//
|
||||
// Its purpose is to have a regression test for https://github.com/OffchainLabs/prysm/pull/15499.
|
||||
type frozenHeaderRecorder struct {
|
||||
*httptest.ResponseRecorder
|
||||
frozenHeader http.Header
|
||||
}
|
||||
|
||||
func (r *frozenHeaderRecorder) WriteHeader(code int) {
|
||||
if r.frozenHeader != nil {
|
||||
return
|
||||
}
|
||||
r.ResponseRecorder.WriteHeader(code)
|
||||
r.frozenHeader = r.ResponseRecorder.Header().Clone()
|
||||
}
|
||||
|
||||
func TestNormalizeQueryValuesHandler(t *testing.T) {
|
||||
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, err := w.Write([]byte("next handler"))
|
||||
@@ -124,6 +145,90 @@ func TestContentTypeHandler(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestAcceptEncodingHeaderHandler(t *testing.T) {
|
||||
dummyContent := "Test gzip middleware content"
|
||||
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", r.Header.Get("Accept"))
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, err := w.Write([]byte(dummyContent))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
handler := AcceptEncodingHeaderHandler()(nextHandler)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
accept string
|
||||
acceptEncoding string
|
||||
expectCompressed bool
|
||||
}{
|
||||
{
|
||||
name: "Accept gzip",
|
||||
accept: api.JsonMediaType,
|
||||
acceptEncoding: "gzip",
|
||||
expectCompressed: true,
|
||||
},
|
||||
{
|
||||
name: "Accept multiple encodings",
|
||||
accept: api.JsonMediaType,
|
||||
acceptEncoding: "deflate, gzip",
|
||||
expectCompressed: true,
|
||||
},
|
||||
{
|
||||
name: "Accept unsupported encoding",
|
||||
accept: api.JsonMediaType,
|
||||
acceptEncoding: "deflate",
|
||||
expectCompressed: false,
|
||||
},
|
||||
{
|
||||
name: "No accept encoding header",
|
||||
accept: api.JsonMediaType,
|
||||
acceptEncoding: "",
|
||||
expectCompressed: false,
|
||||
},
|
||||
{
|
||||
name: "SSZ",
|
||||
accept: api.OctetStreamMediaType,
|
||||
acceptEncoding: "gzip",
|
||||
expectCompressed: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
req := httptest.NewRequest("GET", "/", nil)
|
||||
req.Header.Set("Accept", tt.accept)
|
||||
if tt.acceptEncoding != "" {
|
||||
req.Header.Set("Accept-Encoding", tt.acceptEncoding)
|
||||
}
|
||||
rr := &frozenHeaderRecorder{ResponseRecorder: httptest.NewRecorder()}
|
||||
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if tt.expectCompressed {
|
||||
require.Equal(t, "gzip", rr.frozenHeader.Get("Content-Encoding"), "Expected Content-Encoding header to be 'gzip'")
|
||||
|
||||
compressedBody := rr.Body.Bytes()
|
||||
require.NotEqual(t, dummyContent, string(compressedBody), "Response body should be compressed and differ from the original")
|
||||
|
||||
gzReader, err := gzip.NewReader(bytes.NewReader(compressedBody))
|
||||
require.NoError(t, err, "Failed to create gzipReader")
|
||||
defer func() {
|
||||
if err := gzReader.Close(); err != nil {
|
||||
log.WithError(err).Error("Failed to close gzip reader")
|
||||
}
|
||||
}()
|
||||
|
||||
decompressedBody, err := io.ReadAll(gzReader)
|
||||
require.NoError(t, err, "Failed to decompress response body")
|
||||
require.Equal(t, dummyContent, string(decompressedBody), "Decompressed content should match the original")
|
||||
} else {
|
||||
require.Equal(t, dummyContent, rr.Body.String(), "Response body should be uncompressed and match the original")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAcceptHeaderHandler(t *testing.T) {
|
||||
acceptedTypes := []string{"application/json", "application/octet-stream"}
|
||||
|
||||
|
||||
@@ -36,6 +36,7 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/validator:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/validator"
|
||||
"github.com/OffchainLabs/prysm/v6/container/slice"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
@@ -699,6 +700,11 @@ func (m *SyncCommitteeMessage) ToConsensus() (*eth.SyncCommitteeMessage, error)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
// Add validation to check if the signature is valid BLS format
|
||||
_, err = bls.SignatureFromBytes(sig)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
|
||||
return ð.SyncCommitteeMessage{
|
||||
Slot: primitives.Slot(slot),
|
||||
|
||||
@@ -74,7 +74,7 @@ func BeaconStateFromConsensus(st beaconState.BeaconState) (*BeaconState, error)
|
||||
}
|
||||
|
||||
return &BeaconState{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
@@ -177,7 +177,7 @@ func BeaconStateAltairFromConsensus(st beaconState.BeaconState) (*BeaconStateAlt
|
||||
}
|
||||
|
||||
return &BeaconStateAltair{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
@@ -295,7 +295,7 @@ func BeaconStateBellatrixFromConsensus(st beaconState.BeaconState) (*BeaconState
|
||||
}
|
||||
|
||||
return &BeaconStateBellatrix{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
@@ -430,7 +430,7 @@ func BeaconStateCapellaFromConsensus(st beaconState.BeaconState) (*BeaconStateCa
|
||||
}
|
||||
|
||||
return &BeaconStateCapella{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
@@ -568,7 +568,7 @@ func BeaconStateDenebFromConsensus(st beaconState.BeaconState) (*BeaconStateDene
|
||||
}
|
||||
|
||||
return &BeaconStateDeneb{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
@@ -742,7 +742,7 @@ func BeaconStateElectraFromConsensus(st beaconState.BeaconState) (*BeaconStateEl
|
||||
}
|
||||
|
||||
return &BeaconStateElectra{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
@@ -923,9 +923,16 @@ func BeaconStateFuluFromConsensus(st beaconState.BeaconState) (*BeaconStateFulu,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
srcLookahead, err := st.ProposerLookahead()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lookahead := make([]string, len(srcLookahead))
|
||||
for i, v := range srcLookahead {
|
||||
lookahead[i] = fmt.Sprintf("%d", uint64(v))
|
||||
}
|
||||
return &BeaconStateFulu{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
@@ -962,5 +969,6 @@ func BeaconStateFuluFromConsensus(st beaconState.BeaconState) (*BeaconStateFulu,
|
||||
PendingDeposits: PendingDepositsFromConsensus(pbd),
|
||||
PendingPartialWithdrawals: PendingPartialWithdrawalsFromConsensus(ppw),
|
||||
PendingConsolidations: PendingConsolidationsFromConsensus(pc),
|
||||
ProposerLookahead: lookahead,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -283,3 +283,16 @@ type GetPendingPartialWithdrawalsResponse struct {
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []*PendingPartialWithdrawal `json:"data"`
|
||||
}
|
||||
|
||||
type GetProposerLookaheadResponse struct {
|
||||
Version string `json:"version"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []string `json:"data"` // validator indexes
|
||||
}
|
||||
|
||||
type GetBlobsResponse struct {
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []string `json:"data"` //blobs
|
||||
}
|
||||
|
||||
@@ -56,3 +56,19 @@ type ForkChoiceNodeExtraData struct {
|
||||
TimeStamp string `json:"timestamp"`
|
||||
Target string `json:"target"`
|
||||
}
|
||||
|
||||
type GetDebugDataColumnSidecarsResponse struct {
|
||||
Version string `json:"version"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []*DataColumnSidecar `json:"data"`
|
||||
}
|
||||
|
||||
type DataColumnSidecar struct {
|
||||
Index string `json:"index"`
|
||||
Column []string `json:"column"`
|
||||
KzgCommitments []string `json:"kzg_commitments"`
|
||||
KzgProofs []string `json:"kzg_proofs"`
|
||||
SignedBeaconBlockHeader *SignedBeaconBlockHeader `json:"signed_block_header"`
|
||||
KzgCommitmentsInclusionProof []string `json:"kzg_commitments_inclusion_proof"`
|
||||
}
|
||||
|
||||
@@ -27,6 +27,8 @@ type Identity struct {
|
||||
type Metadata struct {
|
||||
SeqNumber string `json:"seq_number"`
|
||||
Attnets string `json:"attnets"`
|
||||
Syncnets string `json:"syncnets,omitempty"`
|
||||
Cgc string `json:"custody_group_count,omitempty"`
|
||||
}
|
||||
|
||||
type GetPeerResponse struct {
|
||||
|
||||
@@ -219,4 +219,5 @@ type BeaconStateFulu struct {
|
||||
PendingDeposits []*PendingDeposit `json:"pending_deposits"`
|
||||
PendingPartialWithdrawals []*PendingPartialWithdrawal `json:"pending_partial_withdrawals"`
|
||||
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
|
||||
ProposerLookahead []string `json:"proposer_lookahead"`
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
|
||||
func TestDebounce_NoEvents(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
interval := time.Second
|
||||
timesHandled := int32(0)
|
||||
wg := &sync.WaitGroup{}
|
||||
@@ -39,7 +39,7 @@ func TestDebounce_NoEvents(t *testing.T) {
|
||||
|
||||
func TestDebounce_CtxClosing(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
interval := time.Second
|
||||
timesHandled := int32(0)
|
||||
wg := &sync.WaitGroup{}
|
||||
@@ -75,7 +75,7 @@ func TestDebounce_CtxClosing(t *testing.T) {
|
||||
|
||||
func TestDebounce_SingleHandlerInvocation(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
interval := time.Second
|
||||
timesHandled := int32(0)
|
||||
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
@@ -93,7 +93,7 @@ func TestDebounce_SingleHandlerInvocation(t *testing.T) {
|
||||
|
||||
func TestDebounce_MultipleHandlerInvocation(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
interval := time.Second
|
||||
timesHandled := int32(0)
|
||||
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
|
||||
@@ -19,10 +19,10 @@ func RunEvery(ctx context.Context, period time.Duration, f func()) {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
log.WithField("function", funcName).Trace("running")
|
||||
log.WithField("function", funcName).Trace("Running")
|
||||
f()
|
||||
case <-ctx.Done():
|
||||
log.WithField("function", funcName).Debug("context is closed, exiting")
|
||||
log.WithField("function", funcName).Debug("Context is closed, exiting")
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
)
|
||||
|
||||
func TestEveryRuns(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
|
||||
i := int32(0)
|
||||
async.RunEvery(ctx, 100*time.Millisecond, func() {
|
||||
|
||||
@@ -27,7 +27,7 @@ go_library(
|
||||
"receive_block.go",
|
||||
"receive_data_column.go",
|
||||
"service.go",
|
||||
"setup_forchoice.go",
|
||||
"setup_forkchoice.go",
|
||||
"tracked_proposer.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
],
|
||||
@@ -50,7 +50,6 @@ go_library(
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
@@ -63,6 +62,7 @@ go_library(
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
@@ -73,6 +73,7 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -147,7 +148,6 @@ go_test(
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -160,6 +160,7 @@ go_test(
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/attestations/kv:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
@@ -181,6 +182,7 @@ go_test(
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
@@ -194,6 +196,7 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -41,7 +41,7 @@ type ForkchoiceFetcher interface {
|
||||
Ancestor(context.Context, []byte, primitives.Slot) ([]byte, error)
|
||||
CachedHeadRoot() [32]byte
|
||||
GetProposerHead() [32]byte
|
||||
SetForkChoiceGenesisTime(uint64)
|
||||
SetForkChoiceGenesisTime(time.Time)
|
||||
UpdateHead(context.Context, primitives.Slot)
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
@@ -514,7 +514,7 @@ func (s *Service) Ancestor(ctx context.Context, root []byte, slot primitives.Slo
|
||||
|
||||
// SetGenesisTime sets the genesis time of beacon chain.
|
||||
func (s *Service) SetGenesisTime(t time.Time) {
|
||||
s.genesisTime = t
|
||||
s.genesisTime = t.Truncate(time.Second) // Genesis time has a precision of 1 second.
|
||||
}
|
||||
|
||||
func (s *Service) recoverStateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error) {
|
||||
|
||||
@@ -2,6 +2,7 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
consensus_blocks "github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
@@ -27,7 +28,7 @@ func (s *Service) GetProposerHead() [32]byte {
|
||||
}
|
||||
|
||||
// SetForkChoiceGenesisTime sets the genesis time in Forkchoice
|
||||
func (s *Service) SetForkChoiceGenesisTime(timestamp uint64) {
|
||||
func (s *Service) SetForkChoiceGenesisTime(timestamp time.Time) {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(timestamp)
|
||||
|
||||
@@ -1,12 +1,8 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
@@ -15,82 +11,71 @@ import (
|
||||
)
|
||||
|
||||
func TestHeadSlot_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB},
|
||||
}
|
||||
s := testServiceWithDB(t)
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
wait := make(chan struct{})
|
||||
go func() {
|
||||
defer close(wait)
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
require.NoError(t, s.saveHead(t.Context(), [32]byte{}, b, st))
|
||||
}()
|
||||
s.HeadSlot()
|
||||
<-wait
|
||||
}
|
||||
|
||||
func TestHeadRoot_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
|
||||
head: &head{root: [32]byte{'A'}},
|
||||
}
|
||||
s := testServiceWithDB(t)
|
||||
s.head = &head{root: [32]byte{'A'}}
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
wait := make(chan struct{})
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
go func() {
|
||||
defer close(wait)
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
require.NoError(t, s.saveHead(t.Context(), [32]byte{}, b, st))
|
||||
|
||||
}()
|
||||
_, err = s.HeadRoot(context.Background())
|
||||
_, err = s.HeadRoot(t.Context())
|
||||
require.NoError(t, err)
|
||||
<-wait
|
||||
}
|
||||
|
||||
func TestHeadBlock_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}})
|
||||
require.NoError(t, err)
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
|
||||
head: &head{block: wsb},
|
||||
}
|
||||
s := testServiceWithDB(t)
|
||||
s.head = &head{block: wsb}
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
wait := make(chan struct{})
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
go func() {
|
||||
defer close(wait)
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
require.NoError(t, s.saveHead(t.Context(), [32]byte{}, b, st))
|
||||
|
||||
}()
|
||||
_, err = s.HeadBlock(context.Background())
|
||||
_, err = s.HeadBlock(t.Context())
|
||||
require.NoError(t, err)
|
||||
<-wait
|
||||
}
|
||||
|
||||
func TestHeadState_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
|
||||
}
|
||||
s := testServiceWithDB(t)
|
||||
beaconDB := s.cfg.BeaconDB
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
wait := make(chan struct{})
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
root := bytesutil.ToBytes32(bytesutil.PadTo([]byte{'s'}, 32))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(context.Background(), root))
|
||||
require.NoError(t, beaconDB.SaveState(context.Background(), st, root))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(t.Context(), root))
|
||||
require.NoError(t, beaconDB.SaveState(t.Context(), st, root))
|
||||
go func() {
|
||||
defer close(wait)
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
require.NoError(t, s.saveHead(t.Context(), [32]byte{}, b, st))
|
||||
|
||||
}()
|
||||
_, err = s.HeadState(context.Background())
|
||||
_, err = s.HeadState(t.Context())
|
||||
require.NoError(t, err)
|
||||
<-wait
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"time"
|
||||
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
@@ -15,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -84,7 +84,7 @@ func prepareForkchoiceState(
|
||||
func TestHeadRoot_Nil(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
headRoot, err := c.HeadRoot(context.Background())
|
||||
headRoot, err := c.HeadRoot(t.Context())
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, params.BeaconConfig().ZeroHash[:], headRoot, "Incorrect pre chain start value")
|
||||
}
|
||||
@@ -137,8 +137,8 @@ func TestFinalizedBlockHash(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUnrealizedJustifiedBlockHash(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
service := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
ctx := t.Context()
|
||||
service := testServiceWithDB(t)
|
||||
ojc := ðpb.Checkpoint{Root: []byte{'j'}}
|
||||
ofc := ðpb.Checkpoint{Root: []byte{'f'}}
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
@@ -153,7 +153,7 @@ func TestUnrealizedJustifiedBlockHash(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHeadSlot_CanRetrieve(t *testing.T) {
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
@@ -200,10 +200,10 @@ func TestHeadBlock_CanRetrieve(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
c.head = &head{block: wsb, state: s}
|
||||
|
||||
received, err := c.HeadBlock(context.Background())
|
||||
received, err := c.HeadBlock(t.Context())
|
||||
require.NoError(t, err)
|
||||
pb, err := received.Proto()
|
||||
require.NoError(t, err)
|
||||
@@ -213,15 +213,16 @@ func TestHeadBlock_CanRetrieve(t *testing.T) {
|
||||
func TestHeadState_CanRetrieve(t *testing.T) {
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: 2, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
|
||||
require.NoError(t, err)
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
c.head = &head{state: s}
|
||||
headState, err := c.HeadState(context.Background())
|
||||
headState, err := c.HeadState(t.Context())
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, headState.ToProtoUnsafe(), s.ToProtoUnsafe(), "Incorrect head state received")
|
||||
}
|
||||
|
||||
func TestGenesisTime_CanRetrieve(t *testing.T) {
|
||||
c := &Service{genesisTime: time.Unix(999, 0)}
|
||||
c := testServiceNoDB(t)
|
||||
c.genesisTime = time.Unix(999, 0)
|
||||
wanted := time.Unix(999, 0)
|
||||
assert.Equal(t, wanted, c.GenesisTime(), "Did not get wanted genesis time")
|
||||
}
|
||||
@@ -230,7 +231,7 @@ func TestCurrentFork_CanRetrieve(t *testing.T) {
|
||||
f := ðpb.Fork{Epoch: 999}
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Fork: f})
|
||||
require.NoError(t, err)
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
c.head = &head{state: s}
|
||||
if !proto.Equal(c.CurrentFork(), f) {
|
||||
t.Error("Received incorrect fork version")
|
||||
@@ -242,7 +243,7 @@ func TestCurrentFork_NilHeadSTate(t *testing.T) {
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
}
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
if !proto.Equal(c.CurrentFork(), f) {
|
||||
t.Error("Received incorrect fork version")
|
||||
}
|
||||
@@ -250,7 +251,7 @@ func TestCurrentFork_NilHeadSTate(t *testing.T) {
|
||||
|
||||
func TestGenesisValidatorsRoot_CanRetrieve(t *testing.T) {
|
||||
// Should not panic if head state is nil.
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
assert.Equal(t, [32]byte{}, c.GenesisValidatorsRoot(), "Did not get correct genesis validators root")
|
||||
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{GenesisValidatorsRoot: []byte{'a'}})
|
||||
@@ -269,7 +270,7 @@ func TestHeadETH1Data_CanRetrieve(t *testing.T) {
|
||||
d := ðpb.Eth1Data{DepositCount: 999}
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Eth1Data: d})
|
||||
require.NoError(t, err)
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
c.head = &head{state: s}
|
||||
if !proto.Equal(c.HeadETH1Data(), d) {
|
||||
t.Error("Received incorrect eth1 data")
|
||||
@@ -277,7 +278,7 @@ func TestHeadETH1Data_CanRetrieve(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIsCanonical_Ok(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
|
||||
@@ -298,22 +299,22 @@ func TestIsCanonical_Ok(t *testing.T) {
|
||||
|
||||
func TestService_HeadValidatorsIndices(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisState(t, 10)
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
|
||||
c.head = &head{}
|
||||
indices, err := c.HeadValidatorsIndices(context.Background(), 0)
|
||||
indices, err := c.HeadValidatorsIndices(t.Context(), 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(indices))
|
||||
|
||||
c.head = &head{state: s}
|
||||
indices, err = c.HeadValidatorsIndices(context.Background(), 0)
|
||||
indices, err = c.HeadValidatorsIndices(t.Context(), 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 10, len(indices))
|
||||
}
|
||||
|
||||
func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisState(t, 1)
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
|
||||
c.head = &head{}
|
||||
root := c.HeadGenesisValidatorsRoot()
|
||||
@@ -331,8 +332,8 @@ func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
|
||||
// ---------- D
|
||||
|
||||
func TestService_ChainHeads(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
ctx := t.Context()
|
||||
c := testServiceWithDB(t)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
@@ -366,7 +367,7 @@ func TestService_ChainHeads(t *testing.T) {
|
||||
|
||||
func TestService_HeadPublicKeyToValidatorIndex(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisState(t, 10)
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
c.head = &head{state: s}
|
||||
|
||||
_, e := c.HeadPublicKeyToValidatorIndex([fieldparams.BLSPubkeyLength]byte{})
|
||||
@@ -381,7 +382,7 @@ func TestService_HeadPublicKeyToValidatorIndex(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_HeadPublicKeyToValidatorIndexNil(t *testing.T) {
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
c.head = nil
|
||||
|
||||
idx, e := c.HeadPublicKeyToValidatorIndex([fieldparams.BLSPubkeyLength]byte{})
|
||||
@@ -396,10 +397,10 @@ func TestService_HeadPublicKeyToValidatorIndexNil(t *testing.T) {
|
||||
|
||||
func TestService_HeadValidatorIndexToPublicKey(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisState(t, 10)
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
c.head = &head{state: s}
|
||||
|
||||
p, err := c.HeadValidatorIndexToPublicKey(context.Background(), 0)
|
||||
p, err := c.HeadValidatorIndexToPublicKey(t.Context(), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
v, err := s.ValidatorAtIndex(0)
|
||||
@@ -409,15 +410,15 @@ func TestService_HeadValidatorIndexToPublicKey(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_HeadValidatorIndexToPublicKeyNil(t *testing.T) {
|
||||
c := &Service{}
|
||||
c := testServiceNoDB(t)
|
||||
c.head = nil
|
||||
|
||||
p, err := c.HeadValidatorIndexToPublicKey(context.Background(), 0)
|
||||
p, err := c.HeadValidatorIndexToPublicKey(t.Context(), 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [fieldparams.BLSPubkeyLength]byte{}, p)
|
||||
|
||||
c.head = &head{state: nil}
|
||||
p, err = c.HeadValidatorIndexToPublicKey(context.Background(), 0)
|
||||
p, err = c.HeadValidatorIndexToPublicKey(t.Context(), 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [fieldparams.BLSPubkeyLength]byte{}, p)
|
||||
}
|
||||
@@ -428,10 +429,12 @@ func TestService_IsOptimistic(t *testing.T) {
|
||||
cfg.BellatrixForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
c := testServiceWithDB(t)
|
||||
c.SetGenesisTime(time.Now())
|
||||
c.head = &head{root: [32]byte{'b'}}
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
@@ -450,22 +453,24 @@ func TestService_IsOptimistic(t *testing.T) {
|
||||
require.Equal(t, true, opt)
|
||||
|
||||
// If head is nil, for some reason, an error should be returned rather than panic.
|
||||
c = &Service{}
|
||||
c = testServiceNoDB(t)
|
||||
_, err = c.IsOptimistic(ctx)
|
||||
require.ErrorIs(t, err, ErrNilHead)
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{genesisTime: time.Now()}
|
||||
ctx := t.Context()
|
||||
c := testServiceNoDB(t)
|
||||
c.genesisTime = time.Now()
|
||||
opt, err := c.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, opt)
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticForRoot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
ctx := t.Context()
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{root: [32]byte{'b'}}
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
@@ -481,28 +486,29 @@ func TestService_IsOptimisticForRoot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticForRoot_DB(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
ctx := t.Context()
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{root: [32]byte{'b'}}
|
||||
beaconDB := c.cfg.BeaconDB
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 10
|
||||
br, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, context.Background(), beaconDB, b)
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: br[:], Slot: 10}))
|
||||
util.SaveBlock(t, t.Context(), beaconDB, b)
|
||||
require.NoError(t, beaconDB.SaveStateSummary(t.Context(), ðpb.StateSummary{Root: br[:], Slot: 10}))
|
||||
|
||||
optimisticBlock := util.NewBeaconBlock()
|
||||
optimisticBlock.Block.Slot = 97
|
||||
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, context.Background(), beaconDB, optimisticBlock)
|
||||
util.SaveBlock(t, t.Context(), beaconDB, optimisticBlock)
|
||||
|
||||
validatedBlock := util.NewBeaconBlock()
|
||||
validatedBlock.Block.Slot = 9
|
||||
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, context.Background(), beaconDB, validatedBlock)
|
||||
util.SaveBlock(t, t.Context(), beaconDB, validatedBlock)
|
||||
|
||||
validatedCheckpoint := ðpb.Checkpoint{Root: br[:]}
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
||||
@@ -524,48 +530,48 @@ func TestService_IsOptimisticForRoot_DB(t *testing.T) {
|
||||
// Before the first finalized epoch, finalized root could be zeros.
|
||||
validatedCheckpoint = ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, br))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:], Slot: 10}))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(t.Context(), ðpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:], Slot: 10}))
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(t.Context(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
|
||||
optimistic, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
ctx := t.Context()
|
||||
c := testServiceWithDB(t)
|
||||
beaconDB := c.cfg.BeaconDB
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 10
|
||||
br, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, context.Background(), beaconDB, b)
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: br[:], Slot: 10}))
|
||||
util.SaveBlock(t, t.Context(), beaconDB, b)
|
||||
require.NoError(t, beaconDB.SaveStateSummary(t.Context(), ðpb.StateSummary{Root: br[:], Slot: 10}))
|
||||
|
||||
optimisticBlock := util.NewBeaconBlock()
|
||||
optimisticBlock.Block.Slot = 97
|
||||
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, context.Background(), beaconDB, optimisticBlock)
|
||||
util.SaveBlock(t, t.Context(), beaconDB, optimisticBlock)
|
||||
|
||||
validatedBlock := util.NewBeaconBlock()
|
||||
validatedBlock.Block.Slot = 9
|
||||
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, context.Background(), beaconDB, validatedBlock)
|
||||
util.SaveBlock(t, t.Context(), beaconDB, validatedBlock)
|
||||
|
||||
validatedCheckpoint := ðpb.Checkpoint{Root: br[:]}
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(t.Context(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
|
||||
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(t.Context(), ðpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
|
||||
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, validated)
|
||||
@@ -573,15 +579,15 @@ func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticForRoot_StateSummaryRecovered(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
ctx := t.Context()
|
||||
c := testServiceWithDB(t)
|
||||
beaconDB := c.cfg.BeaconDB
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 10
|
||||
br, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, context.Background(), beaconDB, b)
|
||||
util.SaveBlock(t, t.Context(), beaconDB, b)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, [32]byte{}))
|
||||
_, err = c.IsOptimisticForRoot(ctx, br)
|
||||
assert.NoError(t, err)
|
||||
@@ -593,9 +599,9 @@ func TestService_IsOptimisticForRoot_StateSummaryRecovered(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_IsFinalized(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}}
|
||||
ctx := t.Context()
|
||||
c := testServiceWithDB(t)
|
||||
beaconDB := c.cfg.BeaconDB
|
||||
r1 := [32]byte{'a'}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
Root: r1,
|
||||
@@ -616,9 +622,10 @@ func TestService_IsFinalized(t *testing.T) {
|
||||
|
||||
func Test_hashForGenesisRoot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 10)
|
||||
genesis.StoreDuringTest(t, genesis.GenesisData{State: st})
|
||||
require.NoError(t, c.cfg.BeaconDB.SaveGenesisData(ctx, st))
|
||||
root, err := beaconDB.GenesisBlockRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -7,10 +7,15 @@ type currentlySyncingBlock struct {
|
||||
roots map[[32]byte]struct{}
|
||||
}
|
||||
|
||||
func (b *currentlySyncingBlock) set(root [32]byte) {
|
||||
func (b *currentlySyncingBlock) set(root [32]byte) error {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
_, ok := b.roots[root]
|
||||
if ok {
|
||||
return errBlockBeingSynced
|
||||
}
|
||||
b.roots[root] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *currentlySyncingBlock) unset(root [32]byte) {
|
||||
|
||||
@@ -2,7 +2,6 @@ package blockchain
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/time"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
@@ -17,9 +16,6 @@ var stateDefragmentationTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
// a higher number of fragmented indexes are reallocated to a new separate slice for
|
||||
// that field.
|
||||
func (s *Service) defragmentState(st state.BeaconState) {
|
||||
if !features.Get().EnableExperimentalState {
|
||||
return
|
||||
}
|
||||
startTime := time.Now()
|
||||
st.Defragment()
|
||||
elapsedTime := time.Since(startTime)
|
||||
|
||||
@@ -44,6 +44,8 @@ var (
|
||||
errMaxBlobsExceeded = verification.AsVerificationFailure(errors.New("expected commitments in block exceeds MAX_BLOBS_PER_BLOCK"))
|
||||
// errMaxDataColumnsExceeded is returned when the number of data columns exceeds the maximum allowed.
|
||||
errMaxDataColumnsExceeded = verification.AsVerificationFailure(errors.New("expected data columns for node exceeds NUMBER_OF_COLUMNS"))
|
||||
// errBlockBeingSynced is returned when a block is being synced.
|
||||
errBlockBeingSynced = errors.New("block is being synced")
|
||||
)
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
consensusblocks "github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
blocktypes "github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
payloadattribute "github.com/OffchainLabs/prysm/v6/consensus-types/payload-attribute"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -141,7 +141,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
}
|
||||
|
||||
if err := s.saveHead(ctx, r, b, st); err != nil {
|
||||
log.WithError(err).Error("could not save head after pruning invalid blocks")
|
||||
log.WithError(err).Error("Could not save head after pruning invalid blocks")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -174,6 +174,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
"payloadID": fmt.Sprintf("%#x", bytesutil.Trunc(payloadID[:])),
|
||||
}).Info("Forkchoice updated with payload attributes for proposal")
|
||||
s.cfg.PayloadIDCache.Set(nextSlot, arg.headRoot, pId)
|
||||
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), arg.headBlock, arg.headRoot, nextSlot)
|
||||
} else if hasAttr && payloadID == nil && !features.Get().PrepareAllPayloads {
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", headPayload.BlockHash()),
|
||||
@@ -217,24 +218,18 @@ func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, er
|
||||
|
||||
// notifyNewPayload signals execution engine on a new payload.
|
||||
// It returns true if the EL has returned VALID for the block
|
||||
func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
preStateHeader interfaces.ExecutionData, blk interfaces.ReadOnlySignedBeaconBlock) (bool, error) {
|
||||
// stVersion should represent the version of the pre-state; header should also be from the pre-state.
|
||||
func (s *Service) notifyNewPayload(ctx context.Context, stVersion int, header interfaces.ExecutionData, blk blocktypes.ROBlock) (bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
|
||||
defer span.End()
|
||||
|
||||
// Execution payload is only supported in Bellatrix and beyond. Pre
|
||||
// merge blocks are never optimistic
|
||||
if blk == nil {
|
||||
return false, errors.New("signed beacon block can't be nil")
|
||||
}
|
||||
if preStateVersion < version.Bellatrix {
|
||||
if stVersion < version.Bellatrix {
|
||||
return true, nil
|
||||
}
|
||||
if err := consensusblocks.BeaconBlockIsNil(blk); err != nil {
|
||||
return false, err
|
||||
}
|
||||
body := blk.Block().Body()
|
||||
enabled, err := blocks.IsExecutionEnabledUsingHeader(preStateHeader, body)
|
||||
enabled, err := blocks.IsExecutionEnabledUsingHeader(header, body)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(invalidBlock{error: err}, "could not determine if execution is enabled")
|
||||
}
|
||||
@@ -267,28 +262,32 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
return false, errors.New("nil execution requests")
|
||||
}
|
||||
}
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests)
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests)
|
||||
if err == nil {
|
||||
newPayloadValidNodeCount.Inc()
|
||||
return true, nil
|
||||
case errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus):
|
||||
}
|
||||
logFields := logrus.Fields{
|
||||
"slot": blk.Block().Slot(),
|
||||
"parentRoot": fmt.Sprintf("%#x", parentRoot),
|
||||
"root": fmt.Sprintf("%#x", blk.Root()),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
}
|
||||
if errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus) {
|
||||
newPayloadOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blk.Block().Slot(),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
}).Info("Called new payload with optimistic block")
|
||||
log.WithFields(logFields).Info("Called new payload with optimistic block")
|
||||
return false, nil
|
||||
case errors.Is(err, execution.ErrInvalidPayloadStatus):
|
||||
lvh := bytesutil.ToBytes32(lastValidHash)
|
||||
}
|
||||
if errors.Is(err, execution.ErrInvalidPayloadStatus) {
|
||||
log.WithFields(logFields).WithError(err).Error("Invalid payload status")
|
||||
return false, invalidBlock{
|
||||
error: ErrInvalidPayload,
|
||||
lastValidHash: lvh,
|
||||
lastValidHash: bytesutil.ToBytes32(lastValidHash),
|
||||
}
|
||||
default:
|
||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
log.WithFields(logFields).WithError(err).Error("Unexpected execution engine error")
|
||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
|
||||
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
|
||||
@@ -354,7 +353,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
}
|
||||
|
||||
// Get timestamp.
|
||||
t, err := slots.ToTime(uint64(s.genesisTime.Unix()), slot)
|
||||
t, err := slots.StartTime(s.genesisTime, slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get timestamp to get payload attribute")
|
||||
return emptyAttri
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
v1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -309,6 +310,7 @@ func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
|
||||
block: wba,
|
||||
}
|
||||
|
||||
genesis.StoreStateDuringTest(t, st)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, st, bra))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bra))
|
||||
a := &fcuConfig{
|
||||
@@ -403,6 +405,7 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
genesis.StoreStateDuringTest(t, bState)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, bState, bra))
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, brb, bra, [32]byte{'B'}, ojc, ofc)
|
||||
@@ -478,33 +481,12 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
phase0State, _ := util.DeterministicGenesisState(t, 1)
|
||||
altairState, _ := util.DeterministicGenesisStateAltair(t, 1)
|
||||
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
||||
a := ðpb.SignedBeaconBlockAltair{
|
||||
Block: ðpb.BeaconBlockAltair{
|
||||
Body: ðpb.BeaconBlockBodyAltair{},
|
||||
},
|
||||
}
|
||||
a := util.NewBeaconBlockAltair()
|
||||
altairBlk, err := consensusblocks.NewSignedBeaconBlock(a)
|
||||
require.NoError(t, err)
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: 1,
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
BlockNumber: 1,
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Slot = 1
|
||||
blk.Block.Body.ExecutionPayload.BlockNumber = 1
|
||||
bellatrixBlk, err := consensusblocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(blk))
|
||||
require.NoError(t, err)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
@@ -541,12 +523,6 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
blk: altairBlk,
|
||||
isValidPayload: true,
|
||||
},
|
||||
{
|
||||
name: "nil beacon block",
|
||||
postState: bellatrixState,
|
||||
errString: "signed beacon block can't be nil",
|
||||
isValidPayload: false,
|
||||
},
|
||||
{
|
||||
name: "new payload with optimistic block",
|
||||
postState: bellatrixState,
|
||||
@@ -573,15 +549,8 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "altair pre state, happy case",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -592,24 +561,7 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "not at merge transition",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -620,15 +572,8 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "happy case",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -639,15 +584,8 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "undefined error from ee",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -659,15 +597,8 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "invalid block hash error from ee",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -698,7 +629,9 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
postVersion, postHeader, err := getStateVersionAndPayload(tt.postState)
|
||||
require.NoError(t, err)
|
||||
isValidPayload, err := service.notifyNewPayload(ctx, postVersion, postHeader, tt.blk)
|
||||
rob, err := consensusblocks.NewROBlock(tt.blk)
|
||||
require.NoError(t, err)
|
||||
isValidPayload, err := service.notifyNewPayload(ctx, postVersion, postHeader, rob)
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
if tt.invalidBlock {
|
||||
@@ -722,17 +655,12 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
ctx := tr.ctx
|
||||
|
||||
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
bellatrixBlk, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
rob, err := consensusblocks.NewROBlock(bellatrixBlk)
|
||||
require.NoError(t, err)
|
||||
e := &mockExecution.EngineClient{BlockByHashMap: map[[32]byte]*v1.ExecutionBlock{}}
|
||||
e.BlockByHashMap[[32]byte{'a'}] = &v1.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
@@ -749,7 +677,7 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
service.cfg.ExecutionEngineCaller = e
|
||||
postVersion, postHeader, err := getStateVersionAndPayload(bellatrixState)
|
||||
require.NoError(t, err)
|
||||
validated, err := service.notifyNewPayload(ctx, postVersion, postHeader, bellatrixBlk)
|
||||
validated, err := service.notifyNewPayload(ctx, postVersion, postHeader, rob)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, validated)
|
||||
}
|
||||
|
||||
@@ -76,14 +76,14 @@ func (s *Service) sendFCUWithAttributes(cfg *postBlockProcessConfig, fcuArgs *fc
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
if err := s.computePayloadAttributes(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("could not compute payload attributes")
|
||||
log.WithError(err).Error("Could not compute payload attributes")
|
||||
return
|
||||
}
|
||||
if fcuArgs.attributes.IsEmpty() {
|
||||
return
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(cfg.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("could not update forkchoice with payload attributes for proposal")
|
||||
log.WithError(err).Error("Could not update forkchoice with payload attributes for proposal")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -99,11 +99,9 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuCo
|
||||
}
|
||||
|
||||
if err := s.saveHead(ctx, args.headRoot, args.headBlock, args.headState); err != nil {
|
||||
log.WithError(err).Error("could not save head")
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
|
||||
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), args.headBlock, args.headRoot, s.CurrentSlot()+1)
|
||||
|
||||
// Only need to prune attestations from pool if the head has changed.
|
||||
s.pruneAttsFromPool(s.ctx, args.headState, args.headBlock)
|
||||
return nil
|
||||
@@ -114,7 +112,7 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuCo
|
||||
func (s *Service) shouldOverrideFCU(newHeadRoot [32]byte, proposingSlot primitives.Slot) bool {
|
||||
headWeight, err := s.cfg.ForkChoiceStore.Weight(newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
|
||||
log.WithError(err).WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("Could not determine node weight")
|
||||
}
|
||||
currentSlot := s.CurrentSlot()
|
||||
if proposingSlot == currentSlot {
|
||||
@@ -132,17 +130,17 @@ func (s *Service) shouldOverrideFCU(newHeadRoot [32]byte, proposingSlot primitiv
|
||||
if s.cfg.ForkChoiceStore.ShouldOverrideFCU() {
|
||||
return true
|
||||
}
|
||||
secs, err := slots.SecondsSinceSlotStart(currentSlot,
|
||||
uint64(s.genesisTime.Unix()), uint64(time.Now().Unix()))
|
||||
sss, err := slots.SinceSlotStart(currentSlot, s.genesisTime, time.Now())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not compute seconds since slot start")
|
||||
log.WithError(err).Error("Could not compute seconds since slot start")
|
||||
}
|
||||
if secs >= doublylinkedtree.ProcessAttestationsThreshold {
|
||||
if sss >= doublylinkedtree.ProcessAttestationsThreshold {
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", newHeadRoot),
|
||||
"weight": headWeight,
|
||||
}).Infof("Attempted late block reorg aborted due to attestations at %d seconds",
|
||||
doublylinkedtree.ProcessAttestationsThreshold)
|
||||
"root": fmt.Sprintf("%#x", newHeadRoot),
|
||||
"weight": headWeight,
|
||||
"sinceSlotStart": sss,
|
||||
"threshold": doublylinkedtree.ProcessAttestationsThreshold,
|
||||
}).Info("Attempted late block reorg aborted due to attestations after threshold")
|
||||
lateBlockFailedAttemptFirstThreshold.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -36,29 +35,29 @@ func TestService_isNewHead(t *testing.T) {
|
||||
func TestService_getHeadStateAndBlock(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
_, _, err := service.getStateAndBlock(context.Background(), [32]byte{})
|
||||
_, _, err := service.getStateAndBlock(t.Context(), [32]byte{})
|
||||
require.ErrorContains(t, "block does not exist", err)
|
||||
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{Signature: []byte{1}}))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), blk))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(t.Context(), blk))
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
r, err := blk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), st, r))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(t.Context(), st, r))
|
||||
|
||||
gotState, err := service.cfg.BeaconDB.State(context.Background(), r)
|
||||
gotState, err := service.cfg.BeaconDB.State(t.Context(), r)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, st.ToProto(), gotState.ToProto())
|
||||
|
||||
gotBlk, err := service.cfg.BeaconDB.Block(context.Background(), r)
|
||||
gotBlk, err := service.cfg.BeaconDB.Block(t.Context(), r)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, blk, gotBlk)
|
||||
}
|
||||
|
||||
func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
opts := testServiceOptsWithDB(t)
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
@@ -161,6 +160,7 @@ func TestShouldOverrideFCU(t *testing.T) {
|
||||
ctx, fcs := tr.ctx, tr.fcs
|
||||
|
||||
service.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot) * time.Second))
|
||||
fcs.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot) * time.Second))
|
||||
headRoot := [32]byte{'b'}
|
||||
parentRoot := [32]byte{'a'}
|
||||
ojc := ðpb.Checkpoint{}
|
||||
@@ -181,11 +181,12 @@ func TestShouldOverrideFCU(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, headRoot, head)
|
||||
|
||||
fcs.SetGenesisTime(uint64(time.Now().Unix()) - 29)
|
||||
wantLog := "aborted due to attestations after threshold"
|
||||
fcs.SetGenesisTime(time.Now().Add(-29 * time.Second))
|
||||
require.Equal(t, true, service.shouldOverrideFCU(parentRoot, 3))
|
||||
require.LogsDoNotContain(t, hook, "10 seconds")
|
||||
fcs.SetGenesisTime(uint64(time.Now().Unix()) - 24)
|
||||
require.LogsDoNotContain(t, hook, wantLog)
|
||||
fcs.SetGenesisTime(time.Now().Add(-24 * time.Second))
|
||||
service.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot+10) * time.Second))
|
||||
require.Equal(t, false, service.shouldOverrideFCU(parentRoot, 3))
|
||||
require.LogsContain(t, hook, "10 seconds")
|
||||
require.LogsContain(t, hook, wantLog)
|
||||
}
|
||||
|
||||
@@ -98,7 +98,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
oldHeadRoot := bytesutil.ToBytes32(r)
|
||||
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not check if node is optimistically synced")
|
||||
log.WithError(err).Error("Could not check if node is optimistically synced")
|
||||
}
|
||||
if headBlock.Block().ParentRoot() != oldHeadRoot {
|
||||
// A chain re-org occurred, so we fire an event notifying the rest of the services.
|
||||
@@ -111,11 +111,11 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
dep := math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot))
|
||||
oldWeight, err := s.cfg.ForkChoiceStore.Weight(oldHeadRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", oldHeadRoot)).Warn("could not determine node weight")
|
||||
log.WithField("root", fmt.Sprintf("%#x", oldHeadRoot)).Warn("Could not determine node weight")
|
||||
}
|
||||
newWeight, err := s.cfg.ForkChoiceStore.Weight(newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
|
||||
log.WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("Could not determine node weight")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"newSlot": fmt.Sprintf("%d", newHeadSlot),
|
||||
|
||||
@@ -18,9 +18,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Initialize the state cache for sync committees.
|
||||
var syncCommitteeHeadStateCache = cache.NewSyncCommitteeHeadState()
|
||||
|
||||
// HeadSyncCommitteeFetcher is the interface that wraps the head sync committee related functions.
|
||||
// The head sync committee functions return callers sync committee indices and public keys with respect to current head state.
|
||||
type HeadSyncCommitteeFetcher interface {
|
||||
@@ -143,7 +140,7 @@ func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot primitives
|
||||
defer mLock.Unlock()
|
||||
|
||||
// If there's already a head state exists with the request slot, we don't need to process slots.
|
||||
cachedState, err := syncCommitteeHeadStateCache.Get(slot)
|
||||
cachedState, err := s.syncCommitteeHeadState.Get(slot)
|
||||
switch {
|
||||
case err == nil:
|
||||
syncHeadStateHit.Inc()
|
||||
@@ -166,7 +163,7 @@ func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot primitives
|
||||
return nil, err
|
||||
}
|
||||
syncHeadStateMiss.Inc()
|
||||
err = syncCommitteeHeadStateCache.Put(slot, headState)
|
||||
err = s.syncCommitteeHeadState.Put(slot, headState)
|
||||
return headState, err
|
||||
default:
|
||||
// In the event, we encounter another error
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
dbTest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -16,35 +14,35 @@ import (
|
||||
|
||||
func TestService_HeadSyncCommitteeIndices(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Current period
|
||||
slot := 2*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
|
||||
a, err := c.HeadSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
|
||||
a, err := c.HeadSyncCommitteeIndices(t.Context(), 0, primitives.Slot(slot))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Current period where slot-2 across EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
slot = 3*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) - 2
|
||||
b, err := c.HeadSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
|
||||
b, err := c.HeadSyncCommitteeIndices(t.Context(), 0, primitives.Slot(slot))
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, a, b)
|
||||
|
||||
// Next period where slot-1 across EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
slot = 3*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) - 1
|
||||
b, err = c.HeadSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
|
||||
b, err = c.HeadSyncCommitteeIndices(t.Context(), 0, primitives.Slot(slot))
|
||||
require.NoError(t, err)
|
||||
require.DeepNotEqual(t, a, b)
|
||||
}
|
||||
|
||||
func TestService_headCurrentSyncCommitteeIndices(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
|
||||
slot := uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
|
||||
indices, err := c.headCurrentSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
|
||||
indices, err := c.headCurrentSyncCommitteeIndices(t.Context(), 0, primitives.Slot(slot))
|
||||
require.NoError(t, err)
|
||||
|
||||
// NextSyncCommittee becomes CurrentSyncCommittee so it should be empty by default.
|
||||
@@ -53,12 +51,12 @@ func TestService_headCurrentSyncCommitteeIndices(t *testing.T) {
|
||||
|
||||
func TestService_headNextSyncCommitteeIndices(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
|
||||
slot := uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
|
||||
indices, err := c.headNextSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
|
||||
indices, err := c.headNextSyncCommitteeIndices(t.Context(), 0, primitives.Slot(slot))
|
||||
require.NoError(t, err)
|
||||
|
||||
// NextSyncCommittee should be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
|
||||
@@ -67,12 +65,12 @@ func TestService_headNextSyncCommitteeIndices(t *testing.T) {
|
||||
|
||||
func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Process slot up to 2 * `EpochsPerSyncCommitteePeriod` so it can run `ProcessSyncCommitteeUpdates` twice.
|
||||
slot := uint64(2*params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
|
||||
pubkeys, err := c.HeadSyncCommitteePubKeys(context.Background(), primitives.Slot(slot), 0)
|
||||
pubkeys, err := c.HeadSyncCommitteePubKeys(t.Context(), primitives.Slot(slot), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Any subcommittee should match the subcommittee size.
|
||||
@@ -82,13 +80,13 @@ func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
|
||||
|
||||
func TestService_HeadSyncCommitteeDomain(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{state: s}
|
||||
|
||||
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
d, err := c.HeadSyncCommitteeDomain(context.Background(), 0)
|
||||
d, err := c.HeadSyncCommitteeDomain(t.Context(), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, wanted, d)
|
||||
@@ -96,13 +94,13 @@ func TestService_HeadSyncCommitteeDomain(t *testing.T) {
|
||||
|
||||
func TestService_HeadSyncContributionProofDomain(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{state: s}
|
||||
|
||||
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainContributionAndProof, s.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
d, err := c.HeadSyncContributionProofDomain(context.Background(), 0)
|
||||
d, err := c.HeadSyncContributionProofDomain(t.Context(), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, wanted, d)
|
||||
@@ -110,30 +108,27 @@ func TestService_HeadSyncContributionProofDomain(t *testing.T) {
|
||||
|
||||
func TestService_HeadSyncSelectionProofDomain(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c := testServiceWithDB(t)
|
||||
c.head = &head{state: s}
|
||||
|
||||
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommitteeSelectionProof, s.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
d, err := c.HeadSyncSelectionProofDomain(context.Background(), 0)
|
||||
d, err := c.HeadSyncSelectionProofDomain(t.Context(), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, wanted, d)
|
||||
}
|
||||
|
||||
func TestSyncCommitteeHeadStateCache_RoundTrip(t *testing.T) {
|
||||
c := syncCommitteeHeadStateCache
|
||||
t.Cleanup(func() {
|
||||
syncCommitteeHeadStateCache = cache.NewSyncCommitteeHeadState()
|
||||
})
|
||||
s := testServiceNoDB(t)
|
||||
beaconState, _ := util.DeterministicGenesisStateAltair(t, 100)
|
||||
require.NoError(t, beaconState.SetSlot(100))
|
||||
cachedState, err := c.Get(101)
|
||||
cachedState, err := s.syncCommitteeHeadState.Get(101)
|
||||
require.ErrorContains(t, cache.ErrNotFound.Error(), err)
|
||||
require.Equal(t, nil, cachedState)
|
||||
require.NoError(t, c.Put(101, beaconState))
|
||||
cachedState, err = c.Get(101)
|
||||
require.NoError(t, s.syncCommitteeHeadState.Put(101, beaconState))
|
||||
cachedState, err = s.syncCommitteeHeadState.Get(101)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, beaconState, cachedState)
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -34,17 +33,17 @@ func TestSaveHead_Same(t *testing.T) {
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, service.saveHead(context.Background(), r, b, st))
|
||||
require.NoError(t, service.saveHead(t.Context(), r, b, st))
|
||||
assert.Equal(t, primitives.Slot(0), service.headSlot(), "Head did not stay the same")
|
||||
assert.Equal(t, r, service.headRoot(), "Head did not stay the same")
|
||||
}
|
||||
|
||||
func TestSaveHead_Different(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
|
||||
oldBlock := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, util.NewBeaconBlock())
|
||||
oldBlock := util.SaveBlock(t, t.Context(), service.cfg.BeaconDB, util.NewBeaconBlock())
|
||||
oldRoot, err := oldBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
@@ -61,7 +60,7 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
newHeadSignedBlock.Block.Slot = 1
|
||||
newHeadBlock := newHeadSignedBlock.Block
|
||||
|
||||
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||
wsb := util.SaveBlock(t, t.Context(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, slots.PrevSlot(wsb.Block().Slot()), wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
|
||||
@@ -74,13 +73,13 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(1))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
|
||||
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(t.Context(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(t.Context(), headState, newRoot))
|
||||
require.NoError(t, service.saveHead(t.Context(), newRoot, wsb, headState))
|
||||
|
||||
assert.Equal(t, primitives.Slot(1), service.HeadSlot(), "Head did not change")
|
||||
|
||||
cachedRoot, err := service.HeadRoot(context.Background())
|
||||
cachedRoot, err := service.HeadRoot(t.Context())
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, cachedRoot, newRoot[:], "Head did not change")
|
||||
headBlock, err := service.headBlock()
|
||||
@@ -92,12 +91,12 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
hook := logTest.NewGlobal()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
|
||||
oldBlock := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, util.NewBeaconBlock())
|
||||
oldBlock := util.SaveBlock(t, t.Context(), service.cfg.BeaconDB, util.NewBeaconBlock())
|
||||
oldRoot, err := oldBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
@@ -120,7 +119,7 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
newHeadSignedBlock.Block.ParentRoot = reorgChainParent[:]
|
||||
newHeadBlock := newHeadSignedBlock.Block
|
||||
|
||||
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||
wsb := util.SaveBlock(t, t.Context(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, wsb.Block().ParentRoot(), [32]byte{}, ojc, ofc)
|
||||
@@ -129,13 +128,13 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(1))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
|
||||
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(t.Context(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(t.Context(), headState, newRoot))
|
||||
require.NoError(t, service.saveHead(t.Context(), newRoot, wsb, headState))
|
||||
|
||||
assert.Equal(t, primitives.Slot(1), service.HeadSlot(), "Head did not change")
|
||||
|
||||
cachedRoot, err := service.HeadRoot(context.Background())
|
||||
cachedRoot, err := service.HeadRoot(t.Context())
|
||||
require.NoError(t, err)
|
||||
if !bytes.Equal(cachedRoot, newRoot[:]) {
|
||||
t.Error("Head did not change")
|
||||
@@ -154,20 +153,16 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
t.Run("genesis_state_root", func(t *testing.T) {
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
notifier := &mock.MockStateNotifier{RecordEvents: true}
|
||||
srv := &Service{
|
||||
cfg: &config{
|
||||
StateNotifier: notifier,
|
||||
ForkChoiceStore: doublylinkedtree.New(),
|
||||
},
|
||||
originBlockRoot: [32]byte{1},
|
||||
}
|
||||
st, blk, err := prepareForkchoiceState(context.Background(), 0, [32]byte{}, [32]byte{}, [32]byte{}, ðpb.Checkpoint{}, ðpb.Checkpoint{})
|
||||
srv := testServiceWithDB(t)
|
||||
srv.SetGenesisTime(time.Now())
|
||||
notifier := srv.cfg.StateNotifier.(*mock.MockStateNotifier)
|
||||
srv.originBlockRoot = [32]byte{1}
|
||||
st, blk, err := prepareForkchoiceState(t.Context(), 0, [32]byte{}, [32]byte{}, [32]byte{}, ðpb.Checkpoint{}, ðpb.Checkpoint{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(context.Background(), st, blk))
|
||||
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
|
||||
newHeadStateRoot := [32]byte{2}
|
||||
newHeadRoot := [32]byte{3}
|
||||
require.NoError(t, srv.notifyNewHeadEvent(context.Background(), 1, bState, newHeadStateRoot[:], newHeadRoot[:]))
|
||||
require.NoError(t, srv.notifyNewHeadEvent(t.Context(), 1, bState, newHeadStateRoot[:], newHeadRoot[:]))
|
||||
events := notifier.ReceivedEvents()
|
||||
require.Equal(t, 1, len(events))
|
||||
|
||||
@@ -185,18 +180,14 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
})
|
||||
t.Run("non_genesis_values", func(t *testing.T) {
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
notifier := &mock.MockStateNotifier{RecordEvents: true}
|
||||
genesisRoot := [32]byte{1}
|
||||
srv := &Service{
|
||||
cfg: &config{
|
||||
StateNotifier: notifier,
|
||||
ForkChoiceStore: doublylinkedtree.New(),
|
||||
},
|
||||
originBlockRoot: genesisRoot,
|
||||
}
|
||||
st, blk, err := prepareForkchoiceState(context.Background(), 0, [32]byte{}, [32]byte{}, [32]byte{}, ðpb.Checkpoint{}, ðpb.Checkpoint{})
|
||||
srv := testServiceWithDB(t)
|
||||
srv.SetGenesisTime(time.Now())
|
||||
srv.originBlockRoot = genesisRoot
|
||||
notifier := srv.cfg.StateNotifier.(*mock.MockStateNotifier)
|
||||
st, blk, err := prepareForkchoiceState(t.Context(), 0, [32]byte{}, [32]byte{}, [32]byte{}, ðpb.Checkpoint{}, ðpb.Checkpoint{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(context.Background(), st, blk))
|
||||
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
|
||||
epoch1Start, err := slots.EpochStart(1)
|
||||
require.NoError(t, err)
|
||||
epoch2Start, err := slots.EpochStart(1)
|
||||
@@ -205,7 +196,7 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
|
||||
newHeadStateRoot := [32]byte{2}
|
||||
newHeadRoot := [32]byte{3}
|
||||
err = srv.notifyNewHeadEvent(context.Background(), epoch2Start, bState, newHeadStateRoot[:], newHeadRoot[:])
|
||||
err = srv.notifyNewHeadEvent(t.Context(), epoch2Start, bState, newHeadStateRoot[:], newHeadRoot[:])
|
||||
require.NoError(t, err)
|
||||
events := notifier.ReceivedEvents()
|
||||
require.Equal(t, 1, len(events))
|
||||
@@ -225,11 +216,11 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRetrieveHead_ReadOnly(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
|
||||
oldBlock := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, util.NewBeaconBlock())
|
||||
oldBlock := util.SaveBlock(t, t.Context(), service.cfg.BeaconDB, util.NewBeaconBlock())
|
||||
oldRoot, err := oldBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.head = &head{
|
||||
@@ -243,7 +234,7 @@ func TestRetrieveHead_ReadOnly(t *testing.T) {
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
|
||||
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||
wsb := util.SaveBlock(t, t.Context(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, slots.PrevSlot(wsb.Block().Slot()), wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
|
||||
@@ -256,9 +247,9 @@ func TestRetrieveHead_ReadOnly(t *testing.T) {
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(1))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
|
||||
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(t.Context(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(t.Context(), headState, newRoot))
|
||||
require.NoError(t, service.saveHead(t.Context(), newRoot, wsb, headState))
|
||||
|
||||
rOnlyState, err := service.HeadStateReadOnly(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -267,7 +258,7 @@ func TestRetrieveHead_ReadOnly(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSaveOrphanedAtts(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
@@ -333,7 +324,7 @@ func TestSaveOrphanedAtts(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSaveOrphanedAttsElectra(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
@@ -404,10 +395,10 @@ func TestSaveOrphanedOps(t *testing.T) {
|
||||
config.ShardCommitteePeriod = 0
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
service.SetGenesisTime(time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
|
||||
|
||||
// Chain setup
|
||||
// 0 -- 1 -- 2 -- 3
|
||||
@@ -481,7 +472,7 @@ func TestSaveOrphanedOps(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.cfg.BLSToExecPool = blstoexec.NewPool()
|
||||
@@ -539,7 +530,7 @@ func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
@@ -604,7 +595,7 @@ func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
@@ -11,7 +10,7 @@ import (
|
||||
)
|
||||
|
||||
func TestService_getBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := setupBeaconChain(t, beaconDB)
|
||||
b1 := util.NewBeaconBlock()
|
||||
@@ -42,7 +41,7 @@ func TestService_getBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_hasBlockInInitSyncOrDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := setupBeaconChain(t, beaconDB)
|
||||
b1 := util.NewBeaconBlock()
|
||||
|
||||
@@ -14,7 +14,10 @@ const BytesPerBlob = ckzg4844.BytesPerBlob
|
||||
type Blob [BytesPerBlob]byte
|
||||
|
||||
// BytesPerCell is the number of bytes in a single cell.
|
||||
const BytesPerCell = ckzg4844.BytesPerCell
|
||||
const (
|
||||
BytesPerCell = ckzg4844.BytesPerCell
|
||||
BytesPerProof = ckzg4844.BytesPerProof
|
||||
)
|
||||
|
||||
// Cell represents a chunk of an encoded Blob.
|
||||
type Cell [BytesPerCell]byte
|
||||
@@ -23,7 +26,7 @@ type Cell [BytesPerCell]byte
|
||||
type Commitment [48]byte
|
||||
|
||||
// Proof represents a KZG proof that attests to the validity of a Blob or parts of it.
|
||||
type Proof [48]byte
|
||||
type Proof [BytesPerProof]byte
|
||||
|
||||
// Bytes48 is a 48-byte array.
|
||||
type Bytes48 = ckzg4844.Bytes48
|
||||
@@ -102,7 +105,6 @@ func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, c
|
||||
for i := range cells {
|
||||
ckzgCells[i] = ckzg4844.Cell(cells[i])
|
||||
}
|
||||
|
||||
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
|
||||
}
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/presets/mainnet/trusted_setups/trusted_setup_4096.json
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/presets/mainnet/trusted_setups/trusted_setup_4096.json
|
||||
//go:embed trusted_setup_4096.json
|
||||
embeddedTrustedSetup []byte // 1.2Mb
|
||||
kzgContext *GoKZG.Context
|
||||
|
||||
@@ -1,32 +1,14 @@
|
||||
package kzg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
ckzg4844 "github.com/ethereum/c-kzg-4844/v2/bindings/go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Verify performs single or batch verification of commitments depending on the number of given BlobSidecars.
|
||||
func Verify(sidecars ...blocks.ROBlob) error {
|
||||
if len(sidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(sidecars) == 1 {
|
||||
return kzgContext.VerifyBlobKZGProof(
|
||||
bytesToBlob(sidecars[0].Blob),
|
||||
bytesToCommitment(sidecars[0].KzgCommitment),
|
||||
bytesToKZGProof(sidecars[0].KzgProof))
|
||||
}
|
||||
blobs := make([]GoKZG.Blob, len(sidecars))
|
||||
cmts := make([]GoKZG.KZGCommitment, len(sidecars))
|
||||
proofs := make([]GoKZG.KZGProof, len(sidecars))
|
||||
for i, sidecar := range sidecars {
|
||||
blobs[i] = *bytesToBlob(sidecar.Blob)
|
||||
cmts[i] = bytesToCommitment(sidecar.KzgCommitment)
|
||||
proofs[i] = bytesToKZGProof(sidecar.KzgProof)
|
||||
}
|
||||
return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs)
|
||||
}
|
||||
|
||||
func bytesToBlob(blob []byte) *GoKZG.Blob {
|
||||
var ret GoKZG.Blob
|
||||
copy(ret[:], blob)
|
||||
@@ -42,3 +24,144 @@ func bytesToKZGProof(proof []byte) (ret GoKZG.KZGProof) {
|
||||
copy(ret[:], proof)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify performs single or batch verification of commitments depending on the number of given BlobSidecars.
|
||||
func Verify(blobSidecars ...blocks.ROBlob) error {
|
||||
if len(blobSidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(blobSidecars) == 1 {
|
||||
return kzgContext.VerifyBlobKZGProof(
|
||||
bytesToBlob(blobSidecars[0].Blob),
|
||||
bytesToCommitment(blobSidecars[0].KzgCommitment),
|
||||
bytesToKZGProof(blobSidecars[0].KzgProof))
|
||||
}
|
||||
blobs := make([]GoKZG.Blob, len(blobSidecars))
|
||||
cmts := make([]GoKZG.KZGCommitment, len(blobSidecars))
|
||||
proofs := make([]GoKZG.KZGProof, len(blobSidecars))
|
||||
for i, sidecar := range blobSidecars {
|
||||
blobs[i] = *bytesToBlob(sidecar.Blob)
|
||||
cmts[i] = bytesToCommitment(sidecar.KzgCommitment)
|
||||
proofs[i] = bytesToKZGProof(sidecar.KzgProof)
|
||||
}
|
||||
return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs)
|
||||
}
|
||||
|
||||
// VerifyBlobKZGProofBatch verifies KZG proofs for multiple blobs using batch verification.
|
||||
// This is more efficient than verifying each blob individually when len(blobs) > 1.
|
||||
// For single blob verification, it uses the optimized single verification path.
|
||||
func VerifyBlobKZGProofBatch(blobs [][]byte, commitments [][]byte, proofs [][]byte) error {
|
||||
if len(blobs) != len(commitments) || len(blobs) != len(proofs) {
|
||||
return errors.Errorf("number of blobs (%d), commitments (%d), and proofs (%d) must match", len(blobs), len(commitments), len(proofs))
|
||||
}
|
||||
|
||||
if len(blobs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Optimize for single blob case - use single verification to avoid batch overhead
|
||||
if len(blobs) == 1 {
|
||||
return kzgContext.VerifyBlobKZGProof(
|
||||
bytesToBlob(blobs[0]),
|
||||
bytesToCommitment(commitments[0]),
|
||||
bytesToKZGProof(proofs[0]))
|
||||
}
|
||||
|
||||
// Use batch verification for multiple blobs
|
||||
ckzgBlobs := make([]ckzg4844.Blob, len(blobs))
|
||||
ckzgCommitments := make([]ckzg4844.Bytes48, len(commitments))
|
||||
ckzgProofs := make([]ckzg4844.Bytes48, len(proofs))
|
||||
|
||||
for i := range blobs {
|
||||
if len(blobs[i]) != len(ckzg4844.Blob{}) {
|
||||
return fmt.Errorf("blobs len (%d) differs from expected (%d)", len(blobs[i]), len(ckzg4844.Blob{}))
|
||||
}
|
||||
if len(commitments[i]) != len(ckzg4844.Bytes48{}) {
|
||||
return fmt.Errorf("commitments len (%d) differs from expected (%d)", len(commitments[i]), len(ckzg4844.Blob{}))
|
||||
}
|
||||
if len(proofs[i]) != len(ckzg4844.Bytes48{}) {
|
||||
return fmt.Errorf("proofs len (%d) differs from expected (%d)", len(proofs[i]), len(ckzg4844.Blob{}))
|
||||
}
|
||||
ckzgBlobs[i] = ckzg4844.Blob(blobs[i])
|
||||
ckzgCommitments[i] = ckzg4844.Bytes48(commitments[i])
|
||||
ckzgProofs[i] = ckzg4844.Bytes48(proofs[i])
|
||||
}
|
||||
|
||||
valid, err := ckzg4844.VerifyBlobKZGProofBatch(ckzgBlobs, ckzgCommitments, ckzgProofs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "batch verification")
|
||||
}
|
||||
if !valid {
|
||||
return errors.New("batch KZG proof verification failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyCellKZGProofBatchFromBlobData verifies cell KZG proofs in batch format directly from blob data.
|
||||
// This is more efficient than reconstructing data column sidecars when you have the raw blob data and cell proofs.
|
||||
// For PeerDAS/Fulu, the execution client provides cell proofs in flattened format via BlobsBundleV2.
|
||||
// For single blob verification, it optimizes by computing cells once and verifying efficiently.
|
||||
func VerifyCellKZGProofBatchFromBlobData(blobs [][]byte, commitments [][]byte, cellProofs [][]byte, numberOfColumns uint64) error {
|
||||
blobCount := uint64(len(blobs))
|
||||
expectedCellProofs := blobCount * numberOfColumns
|
||||
|
||||
if uint64(len(cellProofs)) != expectedCellProofs {
|
||||
return errors.Errorf("expected %d cell proofs, got %d", expectedCellProofs, len(cellProofs))
|
||||
}
|
||||
|
||||
if len(commitments) != len(blobs) {
|
||||
return errors.Errorf("number of commitments (%d) must match number of blobs (%d)", len(commitments), len(blobs))
|
||||
}
|
||||
|
||||
if blobCount == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle multiple blobs - compute cells for all blobs
|
||||
allCells := make([]Cell, 0, expectedCellProofs)
|
||||
allCommitments := make([]Bytes48, 0, expectedCellProofs)
|
||||
allIndices := make([]uint64, 0, expectedCellProofs)
|
||||
allProofs := make([]Bytes48, 0, expectedCellProofs)
|
||||
|
||||
for blobIndex := range blobs {
|
||||
if len(blobs[blobIndex]) != len(Blob{}) {
|
||||
return fmt.Errorf("blobs len (%d) differs from expected (%d)", len(blobs[blobIndex]), len(Blob{}))
|
||||
}
|
||||
// Convert blob to kzg.Blob type
|
||||
blob := Blob(blobs[blobIndex])
|
||||
|
||||
// Compute cells for this blob
|
||||
cells, err := ComputeCells(&blob)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to compute cells for blob %d", blobIndex)
|
||||
}
|
||||
|
||||
// Add cells and corresponding data for each column
|
||||
for columnIndex := range numberOfColumns {
|
||||
cellProofIndex := uint64(blobIndex)*numberOfColumns + columnIndex
|
||||
if len(commitments[blobIndex]) != len(Bytes48{}) {
|
||||
return fmt.Errorf("commitments len (%d) differs from expected (%d)", len(commitments[blobIndex]), len(Bytes48{}))
|
||||
}
|
||||
if len(cellProofs[cellProofIndex]) != len(Bytes48{}) {
|
||||
return fmt.Errorf("proofs len (%d) differs from expected (%d)", len(cellProofs[cellProofIndex]), len(Bytes48{}))
|
||||
}
|
||||
allCells = append(allCells, cells[columnIndex])
|
||||
allCommitments = append(allCommitments, Bytes48(commitments[blobIndex]))
|
||||
allIndices = append(allIndices, columnIndex)
|
||||
|
||||
allProofs = append(allProofs, Bytes48(cellProofs[cellProofIndex]))
|
||||
}
|
||||
}
|
||||
|
||||
// Batch verify all cells
|
||||
valid, err := VerifyCellKZGProofBatch(allCommitments, allIndices, allCells, allProofs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cell batch verification")
|
||||
}
|
||||
if !valid {
|
||||
return errors.New("cell KZG proof batch verification failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -22,8 +22,8 @@ func GenerateCommitmentAndProof(blob GoKZG.Blob) (GoKZG.KZGCommitment, GoKZG.KZG
|
||||
}
|
||||
|
||||
func TestVerify(t *testing.T) {
|
||||
sidecars := make([]blocks.ROBlob, 0)
|
||||
require.NoError(t, Verify(sidecars...))
|
||||
blobSidecars := make([]blocks.ROBlob, 0)
|
||||
require.NoError(t, Verify(blobSidecars...))
|
||||
}
|
||||
|
||||
func TestBytesToAny(t *testing.T) {
|
||||
@@ -37,6 +37,7 @@ func TestBytesToAny(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGenerateCommitmentAndProof(t *testing.T) {
|
||||
require.NoError(t, Start())
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
@@ -45,3 +46,432 @@ func TestGenerateCommitmentAndProof(t *testing.T) {
|
||||
require.Equal(t, expectedCommitment, commitment)
|
||||
require.Equal(t, expectedProof, proof)
|
||||
}
|
||||
|
||||
func TestVerifyBlobKZGProofBatch(t *testing.T) {
|
||||
// Initialize KZG for testing
|
||||
require.NoError(t, Start())
|
||||
|
||||
t.Run("valid single blob batch", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
proofs := [][]byte{proof[:]}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("valid multiple blob batch", func(t *testing.T) {
|
||||
blobCount := 3
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
proofs := make([][]byte, blobCount)
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
blob := random.GetRandBlob(int64(i))
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob[:]
|
||||
commitments[i] = commitment[:]
|
||||
proofs[i] = proof[:]
|
||||
}
|
||||
|
||||
err := VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("empty inputs should pass", func(t *testing.T) {
|
||||
err := VerifyBlobKZGProofBatch([][]byte{}, [][]byte{}, [][]byte{})
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("mismatched input lengths", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test different mismatch scenarios
|
||||
err = VerifyBlobKZGProofBatch(
|
||||
[][]byte{blob[:]},
|
||||
[][]byte{},
|
||||
[][]byte{proof[:]},
|
||||
)
|
||||
require.ErrorContains(t, "number of blobs (1), commitments (0), and proofs (1) must match", err)
|
||||
|
||||
err = VerifyBlobKZGProofBatch(
|
||||
[][]byte{blob[:], blob[:]},
|
||||
[][]byte{commitment[:]},
|
||||
[][]byte{proof[:], proof[:]},
|
||||
)
|
||||
require.ErrorContains(t, "number of blobs (2), commitments (1), and proofs (2) must match", err)
|
||||
})
|
||||
|
||||
t.Run("invalid commitment should fail", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
_, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use a different blob's commitment (mismatch)
|
||||
differentBlob := random.GetRandBlob(456)
|
||||
wrongCommitment, _, err := GenerateCommitmentAndProof(differentBlob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{wrongCommitment[:]}
|
||||
proofs := [][]byte{proof[:]}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
// Single blob optimization uses different error message
|
||||
require.ErrorContains(t, "can't verify opening proof", err)
|
||||
})
|
||||
|
||||
t.Run("invalid proof should fail", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, _, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use wrong proof
|
||||
invalidProof := make([]byte, 48) // All zeros
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
proofs := [][]byte{invalidProof}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.ErrorContains(t, "short buffer", err)
|
||||
})
|
||||
|
||||
t.Run("mixed valid and invalid proofs should fail", func(t *testing.T) {
|
||||
// First blob - valid
|
||||
blob1 := random.GetRandBlob(123)
|
||||
commitment1, proof1, err := GenerateCommitmentAndProof(blob1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Second blob - invalid proof
|
||||
blob2 := random.GetRandBlob(456)
|
||||
commitment2, _, err := GenerateCommitmentAndProof(blob2)
|
||||
require.NoError(t, err)
|
||||
invalidProof := make([]byte, 48) // All zeros
|
||||
|
||||
blobs := [][]byte{blob1[:], blob2[:]}
|
||||
commitments := [][]byte{commitment1[:], commitment2[:]}
|
||||
proofs := [][]byte{proof1[:], invalidProof}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.ErrorContains(t, "batch verification", err)
|
||||
})
|
||||
|
||||
t.Run("batch KZG proof verification failed", func(t *testing.T) {
|
||||
// Create multiple blobs with mismatched commitments and proofs to trigger batch verification failure
|
||||
blob1 := random.GetRandBlob(123)
|
||||
blob2 := random.GetRandBlob(456)
|
||||
|
||||
// Generate valid proof for blob1
|
||||
commitment1, proof1, err := GenerateCommitmentAndProof(blob1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Generate valid proof for blob2 but use wrong commitment (from blob1)
|
||||
_, proof2, err := GenerateCommitmentAndProof(blob2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use blob2 data with blob1's commitment and blob2's proof - this should cause batch verification to fail
|
||||
blobs := [][]byte{blob1[:], blob2[:]}
|
||||
commitments := [][]byte{commitment1[:], commitment1[:]} // Wrong commitment for blob2
|
||||
proofs := [][]byte{proof1[:], proof2[:]}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.ErrorContains(t, "batch KZG proof verification failed", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
|
||||
// Initialize KZG for testing
|
||||
require.NoError(t, Start())
|
||||
|
||||
t.Run("valid single blob cell verification", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
// Generate blob and commitment
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute cells and proofs
|
||||
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create flattened cell proofs (like execution client format)
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = cellsAndProofs.Proofs[i][:]
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("valid multiple blob cell verification", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
blobCount := 2
|
||||
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
var allCellProofs [][]byte
|
||||
|
||||
for i := range blobCount {
|
||||
// Generate blob and commitment
|
||||
randBlob := random.GetRandBlob(int64(i))
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute cells and proofs
|
||||
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob[:]
|
||||
commitments[i] = commitment[:]
|
||||
|
||||
// Add cell proofs for this blob
|
||||
for j := range numberOfColumns {
|
||||
allCellProofs = append(allCellProofs, cellsAndProofs.Proofs[j][:])
|
||||
}
|
||||
}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("empty inputs should pass", func(t *testing.T) {
|
||||
err := VerifyCellKZGProofBatchFromBlobData([][]byte{}, [][]byte{}, [][]byte{}, 128)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("mismatched blob and commitment count", func(t *testing.T) {
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(
|
||||
[][]byte{blob[:]},
|
||||
[][]byte{}, // Empty commitments
|
||||
[][]byte{},
|
||||
128,
|
||||
)
|
||||
require.ErrorContains(t, "expected 128 cell proofs", err)
|
||||
})
|
||||
|
||||
t.Run("wrong cell proof count", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
// Wrong number of cell proofs - should be 128 for 1 blob, but provide 10
|
||||
wrongCellProofs := make([][]byte, 10)
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, wrongCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "expected 128 cell proofs, got 10", err)
|
||||
})
|
||||
|
||||
t.Run("invalid cell proofs should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
// Create invalid cell proofs (all zeros)
|
||||
invalidCellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
invalidCellProofs[i] = make([]byte, 48) // All zeros
|
||||
}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, invalidCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "cell batch verification", err)
|
||||
})
|
||||
|
||||
t.Run("mismatched commitment should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
// Generate blob and correct cell proofs
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Generate wrong commitment from different blob
|
||||
randBlob2 := random.GetRandBlob(456)
|
||||
var differentBlob Blob
|
||||
copy(differentBlob[:], randBlob2[:])
|
||||
wrongCommitment, err := BlobToKZGCommitment(&differentBlob)
|
||||
require.NoError(t, err)
|
||||
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = cellsAndProofs.Proofs[i][:]
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{wrongCommitment[:]}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "cell KZG proof batch verification failed", err)
|
||||
})
|
||||
|
||||
t.Run("invalid blob data that should cause ComputeCells to fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
// Create invalid blob (not properly formatted)
|
||||
invalidBlobData := make([]byte, 10) // Too short
|
||||
commitment := make([]byte, 48) // Dummy commitment
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = make([]byte, 48)
|
||||
}
|
||||
|
||||
blobs := [][]byte{invalidBlobData}
|
||||
commitments := [][]byte{commitment}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.NotNil(t, err)
|
||||
require.ErrorContains(t, "blobs len (10) differs from expected (131072)", err)
|
||||
})
|
||||
|
||||
t.Run("invalid commitment size should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
|
||||
// Create invalid commitment (wrong size)
|
||||
invalidCommitment := make([]byte, 32) // Should be 48 bytes
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = make([]byte, 48)
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{invalidCommitment}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "commitments len (32) differs from expected (48)", err)
|
||||
})
|
||||
|
||||
t.Run("invalid cell proof size should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create invalid cell proofs (wrong size)
|
||||
invalidCellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
if i == 0 {
|
||||
invalidCellProofs[i] = make([]byte, 32) // Wrong size - should be 48
|
||||
} else {
|
||||
invalidCellProofs[i] = make([]byte, 48)
|
||||
}
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, invalidCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "proofs len (32) differs from expected (48)", err)
|
||||
})
|
||||
|
||||
t.Run("multiple blobs with mixed invalid commitments", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
blobCount := 2
|
||||
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
var allCellProofs [][]byte
|
||||
|
||||
// First blob - valid
|
||||
randBlob1 := random.GetRandBlob(123)
|
||||
var blob1 Blob
|
||||
copy(blob1[:], randBlob1[:])
|
||||
commitment1, err := BlobToKZGCommitment(&blob1)
|
||||
require.NoError(t, err)
|
||||
blobs[0] = blob1[:]
|
||||
commitments[0] = commitment1[:]
|
||||
|
||||
// Second blob - use invalid commitment size
|
||||
randBlob2 := random.GetRandBlob(456)
|
||||
var blob2 Blob
|
||||
copy(blob2[:], randBlob2[:])
|
||||
blobs[1] = blob2[:]
|
||||
commitments[1] = make([]byte, 32) // Wrong size
|
||||
|
||||
// Add cell proofs for both blobs
|
||||
for i := 0; i < blobCount; i++ {
|
||||
for j := uint64(0); j < numberOfColumns; j++ {
|
||||
allCellProofs = append(allCellProofs, make([]byte, 48))
|
||||
}
|
||||
}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "commitments len (32) differs from expected (48)", err)
|
||||
})
|
||||
|
||||
t.Run("multiple blobs with mixed invalid cell proof sizes", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
blobCount := 2
|
||||
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
var allCellProofs [][]byte
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
randBlob := random.GetRandBlob(int64(i))
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob[:]
|
||||
commitments[i] = commitment[:]
|
||||
|
||||
// Add cell proofs - make some invalid in the second blob
|
||||
for j := uint64(0); j < numberOfColumns; j++ {
|
||||
if i == 1 && j == 64 {
|
||||
// Invalid proof size in middle of second blob's proofs
|
||||
allCellProofs = append(allCellProofs, make([]byte, 20))
|
||||
} else {
|
||||
allCellProofs = append(allCellProofs, make([]byte, 48))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "proofs len (20) differs from expected (48)", err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -26,9 +26,6 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
if len(b.Body().Attestations()) > 0 {
|
||||
log = log.WithField("attestations", len(b.Body().Attestations()))
|
||||
}
|
||||
if len(b.Body().Deposits()) > 0 {
|
||||
log = log.WithField("deposits", len(b.Body().Deposits()))
|
||||
}
|
||||
if len(b.Body().AttesterSlashings()) > 0 {
|
||||
log = log.WithField("attesterSlashings", len(b.Body().AttesterSlashings()))
|
||||
}
|
||||
@@ -89,8 +86,8 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesisTime uint64, daWaitedTime time.Duration) error {
|
||||
startTime, err := slots.ToTime(genesisTime, block.Slot())
|
||||
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesis time.Time, daWaitedTime time.Duration) error {
|
||||
startTime, err := slots.StartTime(genesis, block.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -111,7 +108,6 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
|
||||
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
|
||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
|
||||
"dataAvailabilityWaitedTime": daWaitedTime,
|
||||
"deposits": len(block.Body().Deposits()),
|
||||
}
|
||||
log.WithFields(lf).Debug("Synced new block")
|
||||
} else {
|
||||
@@ -159,7 +155,9 @@ func logPayload(block interfaces.ReadOnlyBeaconBlock) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||
}
|
||||
fields["blsToExecutionChanges"] = len(changes)
|
||||
if len(changes) > 0 {
|
||||
fields["blsToExecutionChanges"] = len(changes)
|
||||
}
|
||||
}
|
||||
log.WithFields(fields).Debug("Synced new payload")
|
||||
return nil
|
||||
|
||||
@@ -53,7 +53,7 @@ func Test_logStateTransitionData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
},
|
||||
want: "\"Finished applying state transition\" attestations=1 deposits=1 prefix=blockchain slot=0",
|
||||
want: "\"Finished applying state transition\" attestations=1 prefix=blockchain slot=0",
|
||||
},
|
||||
{name: "has attester slashing",
|
||||
b: func() interfaces.ReadOnlyBeaconBlock {
|
||||
@@ -93,7 +93,7 @@ func Test_logStateTransitionData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
},
|
||||
want: "\"Finished applying state transition\" attestations=1 attesterSlashings=1 deposits=1 prefix=blockchain proposerSlashings=1 slot=0 voluntaryExits=1",
|
||||
want: "\"Finished applying state transition\" attestations=1 attesterSlashings=1 prefix=blockchain proposerSlashings=1 slot=0 voluntaryExits=1",
|
||||
},
|
||||
{name: "has payload",
|
||||
b: func() interfaces.ReadOnlyBeaconBlock { return wrappedPayloadBlk },
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
@@ -9,23 +8,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
func TestReportEpochMetrics_BadHeadState(t *testing.T) {
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
h, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, h.SetValidators(nil))
|
||||
err = reportEpochMetrics(context.Background(), s, h)
|
||||
require.ErrorContains(t, "failed to initialize precompute: state has nil validator slice", err)
|
||||
}
|
||||
|
||||
func TestReportEpochMetrics_BadAttestation(t *testing.T) {
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
h, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, h.AppendCurrentEpochAttestations(ð.PendingAttestation{InclusionDelay: 0}))
|
||||
err = reportEpochMetrics(context.Background(), s, h)
|
||||
err = reportEpochMetrics(t.Context(), s, h)
|
||||
require.ErrorContains(t, "attestation with inclusion delay of 0", err)
|
||||
}
|
||||
|
||||
@@ -36,6 +25,6 @@ func TestReportEpochMetrics_SlashedValidatorOutOfBound(t *testing.T) {
|
||||
v.Slashed = true
|
||||
require.NoError(t, h.UpdateValidatorAtIndex(0, v))
|
||||
require.NoError(t, h.AppendCurrentEpochAttestations(ð.PendingAttestation{InclusionDelay: 1, Data: util.HydrateAttestationData(ð.AttestationData{})}))
|
||||
err = reportEpochMetrics(context.Background(), h, h)
|
||||
err = reportEpochMetrics(t.Context(), h, h)
|
||||
require.ErrorContains(t, "slot 0 out of bounds", err)
|
||||
}
|
||||
|
||||
@@ -8,9 +8,10 @@ import (
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func testServiceOptsWithDB(t *testing.T) []Option {
|
||||
func testServiceOptsWithDB(t testing.TB) []Option {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
cs := startup.NewClockSynchronizer()
|
||||
@@ -31,3 +32,15 @@ func testServiceOptsNoDB() []Option {
|
||||
cs := startup.NewClockSynchronizer()
|
||||
return []Option{WithClockSynchronizer(cs)}
|
||||
}
|
||||
|
||||
func testServiceNoDB(t testing.TB) *Service {
|
||||
s, err := NewService(t.Context(), testServiceOptsNoDB()...)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}
|
||||
|
||||
func testServiceWithDB(t testing.TB) *Service {
|
||||
s, err := NewService(t.Context(), testServiceOptsWithDB(t)...)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -6,12 +6,11 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/slashings"
|
||||
@@ -33,6 +32,14 @@ func WithMaxGoroutines(x int) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithLCStore for light client store access.
|
||||
func WithLCStore() Option {
|
||||
return func(s *Service) error {
|
||||
s.lcStore = lightclient.NewLightClientStore(s.cfg.P2P, s.cfg.StateNotifier.StateFeed(), s.cfg.BeaconDB)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithWeakSubjectivityCheckpoint for checkpoint sync.
|
||||
func WithWeakSubjectivityCheckpoint(c *ethpb.Checkpoint) Option {
|
||||
return func(s *Service) error {
|
||||
@@ -227,14 +234,6 @@ func WithSyncChecker(checker Checker) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithCustodyInfo sets the custody info for the blockchain service.
|
||||
func WithCustodyInfo(custodyInfo *peerdas.CustodyInfo) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.CustodyInfo = custodyInfo
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSlasherEnabled sets whether the slasher is enabled or not.
|
||||
func WithSlasherEnabled(enabled bool) Option {
|
||||
return func(s *Service) error {
|
||||
@@ -246,7 +245,7 @@ func WithSlasherEnabled(enabled bool) Option {
|
||||
// WithGenesisTime sets the genesis time for the blockchain service.
|
||||
func WithGenesisTime(genesisTime time.Time) Option {
|
||||
return func(s *Service) error {
|
||||
s.genesisTime = genesisTime
|
||||
s.genesisTime = genesisTime.Truncate(time.Second) // Genesis time has a precision of 1 second.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -258,3 +257,12 @@ func WithLightClientStore(lcs *lightclient.Store) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithStartWaitingDataColumnSidecars sets a channel that the `areDataColumnsAvailable` function will fill
|
||||
// in when starting to wait for additional data columns.
|
||||
func WithStartWaitingDataColumnSidecars(c chan bool) Option {
|
||||
return func(s *Service) error {
|
||||
s.startWaitingDataColumnSidecars = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,10 +60,8 @@ func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time
|
||||
return err
|
||||
}
|
||||
|
||||
genesisTime := uint64(s.genesisTime.Unix())
|
||||
|
||||
// Verify attestation target is from current epoch or previous epoch.
|
||||
if err := verifyAttTargetEpoch(ctx, genesisTime, uint64(time.Now().Add(disparity).Unix()), tgt); err != nil {
|
||||
if err := verifyAttTargetEpoch(ctx, s.genesisTime, time.Now().Add(disparity), tgt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -76,7 +74,7 @@ func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time
|
||||
// validate_aggregate_proof.go and validate_beacon_attestation.go
|
||||
|
||||
// Verify attestations can only affect the fork choice of subsequent slots.
|
||||
if err := slots.VerifyTime(genesisTime, a.GetData().Slot+1, disparity); err != nil {
|
||||
if err := slots.VerifyTime(s.genesisTime, a.GetData().Slot+1, disparity); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -4,12 +4,12 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
@@ -139,8 +139,8 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (stat
|
||||
}
|
||||
|
||||
// verifyAttTargetEpoch validates attestation is from the current or previous epoch.
|
||||
func verifyAttTargetEpoch(_ context.Context, genesisTime, nowTime uint64, c *ethpb.Checkpoint) error {
|
||||
currentSlot := primitives.Slot((nowTime - genesisTime) / params.BeaconConfig().SecondsPerSlot)
|
||||
func verifyAttTargetEpoch(_ context.Context, genesis, now time.Time, c *ethpb.Checkpoint) error {
|
||||
currentSlot := slots.At(genesis, now)
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
var prevEpoch primitives.Epoch
|
||||
// Prevents previous epoch under flow
|
||||
|
||||
@@ -161,7 +161,7 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
|
||||
func TestService_GetRecentPreState(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
@@ -183,7 +183,7 @@ func TestService_GetRecentPreState(t *testing.T) {
|
||||
|
||||
func TestService_GetAttPreState_Concurrency(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
@@ -353,29 +353,29 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
|
||||
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
require.NoError(t, verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)}))
|
||||
nowTime := time.Unix(int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot), 0)
|
||||
require.NoError(t, verifyAttTargetEpoch(ctx, time.Unix(0, 0), nowTime, ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)}))
|
||||
}
|
||||
|
||||
func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
|
||||
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
require.NoError(t, verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Epoch: 1}))
|
||||
nowTime := time.Unix(int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot), 0)
|
||||
require.NoError(t, verifyAttTargetEpoch(ctx, time.Unix(0, 0), nowTime, ðpb.Checkpoint{Epoch: 1}))
|
||||
}
|
||||
|
||||
func TestAttEpoch_NotMatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
|
||||
nowTime := 2 * uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
err := verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)})
|
||||
nowTime := time.Unix(2*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot), 0)
|
||||
err := verifyAttTargetEpoch(ctx, time.Unix(0, 0), nowTime, ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)})
|
||||
assert.ErrorContains(t, "target epoch 0 does not match current epoch 2 or prev epoch 1", err)
|
||||
}
|
||||
|
||||
func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
@@ -385,7 +385,7 @@ func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
@@ -402,7 +402,7 @@ func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestVerifyBeaconBlock_OK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
|
||||
@@ -3,7 +3,6 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
@@ -159,7 +158,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
}
|
||||
|
||||
// Fill in missing blocks
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.CurrentJustifiedCheckpoint(), preState.FinalizedCheckpoint()); err != nil {
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.FinalizedCheckpoint(), preState.CurrentJustifiedCheckpoint()); err != nil {
|
||||
return errors.Wrap(err, "could not fill in missing blocks to forkchoice")
|
||||
}
|
||||
|
||||
@@ -240,13 +239,14 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
}
|
||||
}
|
||||
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", b.Block().Slot())
|
||||
if err := s.areSidecarsAvailable(ctx, avs, b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate sidecar availability for block %#x at slot %d", b.Root(), b.Block().Slot())
|
||||
}
|
||||
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
|
||||
JustifiedCheckpoint: jCheckpoints[i],
|
||||
FinalizedCheckpoint: fCheckpoints[i]}
|
||||
pendingNodes[len(blks)-i-1] = args
|
||||
pendingNodes[i] = args
|
||||
if err := s.saveInitSyncBlock(ctx, root, b); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
@@ -283,14 +283,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
|
||||
return err
|
||||
}
|
||||
// Insert all nodes but the last one to forkchoice
|
||||
// Insert all nodes to forkchoice
|
||||
if err := s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes); err != nil {
|
||||
return errors.Wrap(err, "could not insert batch to forkchoice")
|
||||
}
|
||||
// Insert the last block to forkchoice
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastB); err != nil {
|
||||
return errors.Wrap(err, "could not insert last block in batch to forkchoice")
|
||||
}
|
||||
// Set their optimistic status
|
||||
if isValidPayload {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, lastBR); err != nil {
|
||||
@@ -308,6 +304,30 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
|
||||
}
|
||||
|
||||
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityStore, roBlock consensusblocks.ROBlock) error {
|
||||
blockVersion := roBlock.Version()
|
||||
block := roBlock.Block()
|
||||
slot := block.Slot()
|
||||
|
||||
if blockVersion >= version.Fulu {
|
||||
if err := s.areDataColumnsAvailable(ctx, roBlock.Root(), block); err != nil {
|
||||
return errors.Wrapf(err, "are data columns available for block %#x with slot %d", roBlock.Root(), slot)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if blockVersion >= version.Deneb {
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), roBlock); err != nil {
|
||||
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", slot)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
|
||||
e := coreTime.CurrentEpoch(st)
|
||||
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
|
||||
@@ -329,7 +349,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
// The latest block header is from the previous epoch
|
||||
r, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not update proposer index state-root map")
|
||||
log.WithError(err).Error("Could not update proposer index state-root map")
|
||||
return nil
|
||||
}
|
||||
// The proposer indices cache takes the target root for the previous
|
||||
@@ -339,12 +359,12 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
}
|
||||
target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not update proposer index state-root map")
|
||||
log.WithError(err).Error("Could not update proposer index state-root map")
|
||||
return nil
|
||||
}
|
||||
err = helpers.UpdateCachedCheckpointToStateRoot(st, &forkchoicetypes.Checkpoint{Epoch: e, Root: target})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not update proposer index state-root map")
|
||||
log.WithError(err).Error("Could not update proposer index state-root map")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -562,7 +582,7 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
|
||||
// If there is not, it will call forkchoice updated with the correct payload attribute then cache the payload ID.
|
||||
func (s *Service) runLateBlockTasks() {
|
||||
if err := s.waitForSync(); err != nil {
|
||||
log.WithError(err).Error("failed to wait for initial sync")
|
||||
log.WithError(err).Error("Failed to wait for initial sync")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -584,7 +604,7 @@ func (s *Service) runLateBlockTasks() {
|
||||
// It returns a map where each key represents a missing BlobSidecar index.
|
||||
// An empty map means we have all indices; a non-empty map can be used to compare incoming
|
||||
// BlobSidecars against the set of known missing sidecars.
|
||||
func missingBlobIndices(bs *filesystem.BlobStorage, root [fieldparams.RootLength]byte, expected [][]byte, slot primitives.Slot) (map[uint64]bool, error) {
|
||||
func missingBlobIndices(store *filesystem.BlobStorage, root [fieldparams.RootLength]byte, expected [][]byte, slot primitives.Slot) (map[uint64]bool, error) {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
@@ -592,7 +612,7 @@ func missingBlobIndices(bs *filesystem.BlobStorage, root [fieldparams.RootLength
|
||||
if len(expected) > maxBlobsPerBlock {
|
||||
return nil, errMaxBlobsExceeded
|
||||
}
|
||||
indices := bs.Summary(root)
|
||||
indices := store.Summary(root)
|
||||
missing := make(map[uint64]bool, len(expected))
|
||||
for i := range expected {
|
||||
if len(expected[i]) > 0 && !indices.HasIndex(uint64(i)) {
|
||||
@@ -607,7 +627,7 @@ func missingBlobIndices(bs *filesystem.BlobStorage, root [fieldparams.RootLength
|
||||
// It returns a map where each key represents a missing DataColumnSidecar index.
|
||||
// An empty map means we have all indices; a non-empty map can be used to compare incoming
|
||||
// DataColumns against the set of known missing sidecars.
|
||||
func missingDataColumnIndices(bs *filesystem.DataColumnStorage, root [fieldparams.RootLength]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
func missingDataColumnIndices(store *filesystem.DataColumnStorage, root [fieldparams.RootLength]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -619,7 +639,7 @@ func missingDataColumnIndices(bs *filesystem.DataColumnStorage, root [fieldparam
|
||||
}
|
||||
|
||||
// Get a summary of the data columns stored in the database.
|
||||
summary := bs.Summary(root)
|
||||
summary := store.Summary(root)
|
||||
|
||||
// Check all expected data columns against the summary.
|
||||
missing := make(map[uint64]bool)
|
||||
@@ -637,12 +657,16 @@ func missingDataColumnIndices(bs *filesystem.DataColumnStorage, root [fieldparam
|
||||
// The function will first check the database to see if all sidecars have been persisted. If any
|
||||
// sidecars are missing, it will then read from the sidecar notifier channel for the given root until the channel is
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signedBlock interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
block := signedBlock.Block()
|
||||
func (s *Service) isDataAvailable(
|
||||
ctx context.Context,
|
||||
roBlock consensusblocks.ROBlock,
|
||||
) error {
|
||||
block := roBlock.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
|
||||
root := roBlock.Root()
|
||||
blockVersion := block.Version()
|
||||
if blockVersion >= version.Fulu {
|
||||
return s.areDataColumnsAvailable(ctx, root, block)
|
||||
@@ -657,11 +681,14 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signedBloc
|
||||
|
||||
// areDataColumnsAvailable blocks until all data columns committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
func (s *Service) areDataColumnsAvailable(ctx context.Context, root [fieldparams.RootLength]byte, block interfaces.ReadOnlyBeaconBlock) error {
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
func (s *Service) areDataColumnsAvailable(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
block interfaces.ReadOnlyBeaconBlock,
|
||||
) error {
|
||||
// We are only required to check within MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS
|
||||
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
|
||||
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
|
||||
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
@@ -682,16 +709,21 @@ func (s *Service) areDataColumnsAvailable(ctx context.Context, root [fieldparams
|
||||
}
|
||||
|
||||
// All columns to sample need to be available for the block to be considered available.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#custody-sampling
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
|
||||
// Prevent custody group count to change during the rest of the function.
|
||||
s.cfg.CustodyInfo.Mut.RLock()
|
||||
defer s.cfg.CustodyInfo.Mut.RUnlock()
|
||||
|
||||
// Get the custody group sampling size for the node.
|
||||
custodyGroupSamplingSize := s.cfg.CustodyInfo.CustodyGroupSamplingSize(peerdas.Actual)
|
||||
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupSamplingSize)
|
||||
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
// Compute the sampling size.
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#custody-sampling
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
samplingSize := max(samplesPerSlot, custodyGroupCount)
|
||||
|
||||
// Get the peer info for the node.
|
||||
peerInfo, _, err := peerdas.Info(nodeID, samplingSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "peer info")
|
||||
}
|
||||
@@ -704,7 +736,7 @@ func (s *Service) areDataColumnsAvailable(ctx context.Context, root [fieldparams
|
||||
summary := s.dataColumnStorage.Summary(root)
|
||||
storedDataColumnsCount := summary.Count()
|
||||
|
||||
minimumColumnCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
|
||||
minimumColumnCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
|
||||
// As soon as we have enough data column sidecars, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
@@ -713,50 +745,41 @@ func (s *Service) areDataColumnsAvailable(ctx context.Context, root [fieldparams
|
||||
}
|
||||
|
||||
// Get a map of data column indices that are not currently available.
|
||||
missingMap, err := missingDataColumnIndices(s.dataColumnStorage, root, peerInfo.CustodyColumns)
|
||||
missing, err := missingDataColumnIndices(s.dataColumnStorage, root, peerInfo.CustodyColumns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "missing data columns")
|
||||
}
|
||||
|
||||
// If there are no missing indices, all data column sidecars are available.
|
||||
// This is the happy path.
|
||||
if len(missingMap) == 0 {
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if s.startWaitingDataColumnSidecars != nil {
|
||||
s.startWaitingDataColumnSidecars <- true
|
||||
}
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(block.Slot()+1, s.genesisTime)
|
||||
nextSlot, err := slots.StartTime(s.genesisTime, block.Slot()+1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to determine slot start time: %w", err)
|
||||
}
|
||||
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
timer := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
missingMapCount := uint64(len(missingMap))
|
||||
missingCount := uint64(len(missing))
|
||||
|
||||
if missingMapCount == 0 {
|
||||
if missingCount == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
expected interface{} = "all"
|
||||
missing interface{} = "all"
|
||||
)
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
colMapCount := uint64(len(peerInfo.CustodyColumns))
|
||||
|
||||
if colMapCount < numberOfColumns {
|
||||
expected = uint64MapToSortedSlice(peerInfo.CustodyColumns)
|
||||
}
|
||||
|
||||
if missingMapCount < numberOfColumns {
|
||||
missing = uint64MapToSortedSlice(missingMap)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"columnsExpected": expected,
|
||||
"columnsWaiting": missing,
|
||||
"columnsExpected": helpers.SortedPrettySliceFromMap(peerInfo.CustodyColumns),
|
||||
"columnsWaiting": helpers.SortedPrettySliceFromMap(missing),
|
||||
}).Warning("Data columns still missing at slot end")
|
||||
})
|
||||
defer timer.Stop()
|
||||
@@ -772,7 +795,7 @@ func (s *Service) areDataColumnsAvailable(ctx context.Context, root [fieldparams
|
||||
|
||||
for _, index := range idents.Indices {
|
||||
// This is a data column we are expecting.
|
||||
if _, ok := missingMap[index]; ok {
|
||||
if _, ok := missing[index]; ok {
|
||||
storedDataColumnsCount++
|
||||
}
|
||||
|
||||
@@ -783,10 +806,10 @@ func (s *Service) areDataColumnsAvailable(ctx context.Context, root [fieldparams
|
||||
}
|
||||
|
||||
// Remove the index from the missing map.
|
||||
delete(missingMap, index)
|
||||
delete(missing, index)
|
||||
|
||||
// Return if there is no more missing data columns.
|
||||
if len(missingMap) == 0 {
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -794,13 +817,13 @@ func (s *Service) areDataColumnsAvailable(ctx context.Context, root [fieldparams
|
||||
case <-ctx.Done():
|
||||
var missingIndices interface{} = "all"
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
missingIndicesCount := uint64(len(missingMap))
|
||||
missingIndicesCount := uint64(len(missing))
|
||||
|
||||
if missingIndicesCount < numberOfColumns {
|
||||
missingIndices = uint64MapToSortedSlice(missingMap)
|
||||
missingIndices = helpers.SortedPrettySliceFromMap(missing)
|
||||
}
|
||||
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing %v", block.Slot(), root, missingIndices)
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing: %v", block.Slot(), root, missingIndices)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -843,7 +866,10 @@ func (s *Service) areBlobsAvailable(ctx context.Context, root [fieldparams.RootL
|
||||
nc := s.blobNotifiers.forRoot(root, block.Slot())
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(block.Slot()+1, s.genesisTime)
|
||||
nextSlot, err := slots.StartTime(s.genesisTime, block.Slot()+1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to determine slot start time: %w", err)
|
||||
}
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
nst := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
@@ -878,23 +904,13 @@ func (s *Service) areBlobsAvailable(ctx context.Context, root [fieldparams.RootL
|
||||
}
|
||||
}
|
||||
|
||||
// uint64MapToSortedSlice produces a sorted uint64 slice from a map.
|
||||
func uint64MapToSortedSlice(input map[uint64]bool) []uint64 {
|
||||
output := make([]uint64, 0, len(input))
|
||||
for idx := range input {
|
||||
output = append(output, idx)
|
||||
}
|
||||
slices.Sort[[]uint64](output)
|
||||
return output
|
||||
}
|
||||
|
||||
// lateBlockTasks is called 4 seconds into the slot and performs tasks
|
||||
// related to late blocks. It emits a MissedSlot state feed event.
|
||||
// It calls FCU and sets the right attributes if we are proposing next slot
|
||||
// it also updates the next slot cache and the proposer index cache to deal with skipped slots.
|
||||
func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
currentSlot := s.CurrentSlot()
|
||||
if s.CurrentSlot() == s.HeadSlot() {
|
||||
if currentSlot == s.HeadSlot() {
|
||||
return
|
||||
}
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
@@ -915,10 +931,10 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
// blocks.
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("could not update next slot state cache")
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("lateBlockTasks: could not update epoch boundary caches")
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
// return early if we already started building a block for the current
|
||||
// head root
|
||||
@@ -932,7 +948,7 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if attribute.IsEmpty() {
|
||||
headBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("head_root", headRoot).Error("unable to retrieve head block to fire payload attributes event")
|
||||
log.WithError(err).WithField("head_root", headRoot).Error("Unable to retrieve head block to fire payload attributes event")
|
||||
}
|
||||
// notifyForkchoiceUpdate fires the payload attribute event. But in this case, we won't
|
||||
// call notifyForkchoiceUpdate, so the event is fired here.
|
||||
|
||||
@@ -1,17 +1,14 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
@@ -31,9 +28,13 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ErrInvalidCheckpointArgs may be returned when the finalized checkpoint has an epoch greater than the justified checkpoint epoch.
|
||||
// If you are seeing this error, make sure you haven't mixed up the order of the arguments in the method you are calling.
|
||||
var ErrInvalidCheckpointArgs = errors.New("finalized checkpoint cannot be greater than justified checkpoint")
|
||||
|
||||
// CurrentSlot returns the current slot based on time.
|
||||
func (s *Service) CurrentSlot() primitives.Slot {
|
||||
return slots.CurrentSlot(uint64(s.genesisTime.Unix()))
|
||||
return slots.CurrentSlot(s.genesisTime)
|
||||
}
|
||||
|
||||
// getFCUArgs returns the arguments to call forkchoice update
|
||||
@@ -45,7 +46,7 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
|
||||
return nil
|
||||
}
|
||||
slot := cfg.roblock.Block().Slot()
|
||||
if slots.WithinVotingWindow(uint64(s.genesisTime.Unix()), slot) {
|
||||
if slots.WithinVotingWindow(s.genesisTime, slot) {
|
||||
return nil
|
||||
}
|
||||
return s.computePayloadAttributes(cfg, fcuArgs)
|
||||
@@ -68,11 +69,11 @@ func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcu
|
||||
func (s *Service) logNonCanonicalBlockReceived(blockRoot [32]byte, headRoot [32]byte) {
|
||||
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Warn("could not determine node weight")
|
||||
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Warn("Could not determine node weight")
|
||||
}
|
||||
headWeight, err := s.cfg.ForkChoiceStore.Weight(headRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", headRoot)).Warn("could not determine node weight")
|
||||
log.WithField("root", fmt.Sprintf("%#x", headRoot)).Warn("Could not determine node weight")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"receivedRoot": fmt.Sprintf("%#x", blockRoot),
|
||||
@@ -130,38 +131,26 @@ func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
|
||||
})
|
||||
}
|
||||
|
||||
// processLightClientUpdates saves the light client data in lcStore, when feature flag is enabled.
|
||||
func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) {
|
||||
if err := s.processLightClientUpdate(cfg); err != nil {
|
||||
log.WithError(err).Error("Failed to process light client update")
|
||||
}
|
||||
if err := s.processLightClientBootstrap(cfg); err != nil {
|
||||
log.WithError(err).Error("Failed to process light client bootstrap")
|
||||
}
|
||||
if err := s.processLightClientOptimisticUpdate(cfg.ctx, cfg.roblock, cfg.postState); err != nil {
|
||||
log.WithError(err).Error("Failed to process light client optimistic update")
|
||||
}
|
||||
if err := s.processLightClientFinalityUpdate(cfg.ctx, cfg.roblock, cfg.postState); err != nil {
|
||||
log.WithError(err).Error("Failed to process light client finality update")
|
||||
}
|
||||
}
|
||||
|
||||
// processLightClientUpdate saves the light client update for this block
|
||||
// if it's better than the already saved one, when feature flag is enabled.
|
||||
func (s *Service) processLightClientUpdate(cfg *postBlockProcessConfig) error {
|
||||
attestedRoot := cfg.roblock.Block().ParentRoot()
|
||||
attestedBlock, err := s.getBlock(cfg.ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested block for root %#x", attestedRoot)
|
||||
log.WithError(err).Error("processLightClientUpdates: Could not get attested block")
|
||||
return
|
||||
}
|
||||
if attestedBlock == nil || attestedBlock.IsNil() {
|
||||
return errors.New("attested block is nil")
|
||||
log.Error("processLightClientUpdates: Could not get attested block")
|
||||
return
|
||||
}
|
||||
attestedState, err := s.cfg.StateGen.StateByRoot(cfg.ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
|
||||
log.WithError(err).Error("processLightClientUpdates: Could not get attested state")
|
||||
return
|
||||
}
|
||||
if attestedState == nil || attestedState.IsNil() {
|
||||
return errors.New("attested state is nil")
|
||||
log.Error("processLightClientUpdates: Could not get attested state")
|
||||
return
|
||||
}
|
||||
|
||||
finalizedRoot := attestedState.FinalizedCheckpoint().Root
|
||||
@@ -169,201 +158,17 @@ func (s *Service) processLightClientUpdate(cfg *postBlockProcessConfig) error {
|
||||
if err != nil {
|
||||
if errors.Is(err, errBlockNotFoundInCacheOrDB) {
|
||||
log.Debugf("Skipping saving light client update because finalized block is nil for root %#x", finalizedRoot)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
return errors.Wrapf(err, "could not get finalized block for root %#x", finalizedRoot)
|
||||
log.WithError(err).Error("processLightClientUpdates: Could not get finalized block")
|
||||
return
|
||||
}
|
||||
|
||||
update, err := lightclient.NewLightClientUpdateFromBeaconState(
|
||||
cfg.ctx,
|
||||
s.CurrentSlot(),
|
||||
cfg.postState,
|
||||
cfg.roblock,
|
||||
attestedState,
|
||||
attestedBlock,
|
||||
finalizedBlock,
|
||||
)
|
||||
err = s.lcStore.SaveLCData(cfg.ctx, cfg.postState, cfg.roblock, attestedState, attestedBlock, finalizedBlock, s.headRoot())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not create light client update")
|
||||
log.WithError(err).Error("processLightClientUpdates: Could not save light client data")
|
||||
}
|
||||
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(attestedState.Slot()))
|
||||
|
||||
oldUpdate, err := s.cfg.BeaconDB.LightClientUpdate(cfg.ctx, period)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get current light client update")
|
||||
}
|
||||
|
||||
if oldUpdate == nil {
|
||||
if err := s.cfg.BeaconDB.SaveLightClientUpdate(cfg.ctx, period, update); err != nil {
|
||||
return errors.Wrapf(err, "could not save light client update")
|
||||
}
|
||||
log.WithField("period", period).Debug("Saved new light client update")
|
||||
return nil
|
||||
}
|
||||
|
||||
isNewUpdateBetter, err := lightclient.IsBetterUpdate(update, oldUpdate)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not compare light client updates")
|
||||
}
|
||||
|
||||
if isNewUpdateBetter {
|
||||
if err := s.cfg.BeaconDB.SaveLightClientUpdate(cfg.ctx, period, update); err != nil {
|
||||
return errors.Wrapf(err, "could not save light client update")
|
||||
}
|
||||
log.WithField("period", period).Debug("Saved new light client update")
|
||||
return nil
|
||||
}
|
||||
log.WithField("period", period).Debug("New light client update is not better than the current one, skipping save")
|
||||
return nil
|
||||
}
|
||||
|
||||
// processLightClientBootstrap saves a light client bootstrap for this block
|
||||
// when feature flag is enabled.
|
||||
func (s *Service) processLightClientBootstrap(cfg *postBlockProcessConfig) error {
|
||||
blockRoot := cfg.roblock.Root()
|
||||
bootstrap, err := lightclient.NewLightClientBootstrapFromBeaconState(cfg.ctx, s.CurrentSlot(), cfg.postState, cfg.roblock)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not create light client bootstrap")
|
||||
}
|
||||
if err := s.cfg.BeaconDB.SaveLightClientBootstrap(cfg.ctx, blockRoot[:], bootstrap); err != nil {
|
||||
return errors.Wrapf(err, "could not save light client bootstrap")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) processLightClientFinalityUpdate(
|
||||
ctx context.Context,
|
||||
signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
postState state.BeaconState,
|
||||
) error {
|
||||
attestedRoot := signed.Block().ParentRoot()
|
||||
attestedBlock, err := s.cfg.BeaconDB.Block(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested block for root %#x", attestedRoot)
|
||||
}
|
||||
attestedState, err := s.cfg.StateGen.StateByRoot(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
|
||||
}
|
||||
|
||||
finalizedCheckpoint := attestedState.FinalizedCheckpoint()
|
||||
|
||||
// Check if the finalized checkpoint has changed
|
||||
if finalizedCheckpoint == nil || bytes.Equal(finalizedCheckpoint.GetRoot(), postState.FinalizedCheckpoint().Root) {
|
||||
return nil
|
||||
}
|
||||
|
||||
finalizedRoot := bytesutil.ToBytes32(finalizedCheckpoint.Root)
|
||||
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
if errors.Is(err, errBlockNotFoundInCacheOrDB) {
|
||||
log.Debugf("Skipping processing light client finality update: Finalized block is nil for root %#x", finalizedRoot)
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "could not get finalized block for root %#x", finalizedRoot)
|
||||
}
|
||||
|
||||
newUpdate, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(
|
||||
ctx,
|
||||
postState.Slot(),
|
||||
postState,
|
||||
signed,
|
||||
attestedState,
|
||||
attestedBlock,
|
||||
finalizedBlock,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create light client finality update")
|
||||
}
|
||||
|
||||
lastUpdate := s.lcStore.LastFinalityUpdate()
|
||||
if lastUpdate != nil {
|
||||
// The finalized_header.beacon.lastUpdateSlot is greater than that of all previously forwarded finality_updates,
|
||||
// or it matches the highest previously forwarded lastUpdateSlot and also has a sync_aggregate indicating supermajority (> 2/3)
|
||||
// sync committee participation while the previously forwarded finality_update for that lastUpdateSlot did not indicate supermajority
|
||||
newUpdateSlot := newUpdate.FinalizedHeader().Beacon().Slot
|
||||
newHasSupermajority := lightclient.UpdateHasSupermajority(newUpdate.SyncAggregate())
|
||||
|
||||
lastUpdateSlot := lastUpdate.FinalizedHeader().Beacon().Slot
|
||||
lastHasSupermajority := lightclient.UpdateHasSupermajority(lastUpdate.SyncAggregate())
|
||||
|
||||
if newUpdateSlot < lastUpdateSlot {
|
||||
log.Debug("Skip saving light client finality newUpdate: Older than local newUpdate")
|
||||
return nil
|
||||
}
|
||||
if newUpdateSlot == lastUpdateSlot && (lastHasSupermajority || !newHasSupermajority) {
|
||||
log.Debug("Skip saving light client finality update: No supermajority advantage")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
log.Debug("Saving new light client finality update")
|
||||
s.lcStore.SetLastFinalityUpdate(newUpdate)
|
||||
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.LightClientFinalityUpdate,
|
||||
Data: newUpdate,
|
||||
})
|
||||
|
||||
if err = s.cfg.P2P.BroadcastLightClientFinalityUpdate(ctx, newUpdate); err != nil {
|
||||
return errors.Wrap(err, "could not broadcast light client finality update")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) processLightClientOptimisticUpdate(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
postState state.BeaconState) error {
|
||||
attestedRoot := signed.Block().ParentRoot()
|
||||
attestedBlock, err := s.cfg.BeaconDB.Block(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested block for root %#x", attestedRoot)
|
||||
}
|
||||
attestedState, err := s.cfg.StateGen.StateByRoot(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
|
||||
}
|
||||
|
||||
newUpdate, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(
|
||||
ctx,
|
||||
postState.Slot(),
|
||||
postState,
|
||||
signed,
|
||||
attestedState,
|
||||
attestedBlock,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), lightclient.ErrNotEnoughSyncCommitteeBits) {
|
||||
log.WithError(err).Debug("Skipping processing light client optimistic update")
|
||||
return nil
|
||||
}
|
||||
return errors.Wrap(err, "could not create light client optimistic update")
|
||||
}
|
||||
|
||||
lastUpdate := s.lcStore.LastOptimisticUpdate()
|
||||
if lastUpdate != nil {
|
||||
// The attested_header.beacon.slot is greater than that of all previously forwarded optimistic updates
|
||||
if newUpdate.AttestedHeader().Beacon().Slot <= lastUpdate.AttestedHeader().Beacon().Slot {
|
||||
log.Debug("Skip saving light client optimistic update: Older than local update")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
log.Debug("Saving new light client optimistic update")
|
||||
s.lcStore.SetLastOptimisticUpdate(newUpdate)
|
||||
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.LightClientOptimisticUpdate,
|
||||
Data: newUpdate,
|
||||
})
|
||||
|
||||
if err = s.cfg.P2P.BroadcastLightClientOptimisticUpdate(ctx, newUpdate); err != nil {
|
||||
return errors.Wrap(err, "could not broadcast light client optimistic update")
|
||||
}
|
||||
|
||||
return nil
|
||||
log.Debug("Processed light client updates")
|
||||
}
|
||||
|
||||
// updateCachesPostBlockProcessing updates the next slot cache and handles the epoch
|
||||
@@ -432,7 +237,7 @@ func (s *Service) getBlockPreState(ctx context.Context, b interfaces.ReadOnlyBea
|
||||
}
|
||||
|
||||
// Verify block slot time is not from the future.
|
||||
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), b.Slot(), params.BeaconConfig().MaximumGossipClockDisparityDuration()); err != nil {
|
||||
if err := slots.VerifyTime(s.genesisTime, b.Slot(), params.BeaconConfig().MaximumGossipClockDisparityDuration()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -527,7 +332,7 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
|
||||
// is meant to be asynchronous and run in the background rather than being
|
||||
// tied to the execution of a block.
|
||||
if err := s.cfg.StateGen.MigrateToCold(s.ctx, fRoot); err != nil {
|
||||
log.WithError(err).Error("could not migrate to cold")
|
||||
log.WithError(err).Error("Could not migrate to cold")
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
@@ -561,6 +366,9 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot primitives.
|
||||
// This is useful for block tree visualizer and additional vote accounting.
|
||||
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
if fCheckpoint.Epoch > jCheckpoint.Epoch {
|
||||
return ErrInvalidCheckpointArgs
|
||||
}
|
||||
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
|
||||
|
||||
// Fork choice only matters from last finalized slot.
|
||||
@@ -569,15 +377,8 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed inte
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// The first block can have a bogus root since the block is not inserted in forkchoice
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(signed, [32]byte{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pendingNodes = append(pendingNodes, &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
|
||||
JustifiedCheckpoint: jCheckpoint, FinalizedCheckpoint: fCheckpoint})
|
||||
root := signed.Block().ParentRoot()
|
||||
// As long as parent node is not in fork choice store, and parent node is in DB.
|
||||
root := roblock.Block().ParentRoot()
|
||||
for !s.cfg.ForkChoiceStore.HasNode(root) && s.cfg.BeaconDB.HasBlock(ctx, root) {
|
||||
b, err := s.getBlock(ctx, root)
|
||||
if err != nil {
|
||||
@@ -596,12 +397,13 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed inte
|
||||
FinalizedCheckpoint: fCheckpoint}
|
||||
pendingNodes = append(pendingNodes, args)
|
||||
}
|
||||
if len(pendingNodes) == 1 {
|
||||
if len(pendingNodes) == 0 {
|
||||
return nil
|
||||
}
|
||||
if root != s.ensureRootNotZeros(finalized.Root) && !s.cfg.ForkChoiceStore.HasNode(root) {
|
||||
return ErrNotDescendantOfFinalized
|
||||
}
|
||||
slices.Reverse(pendingNodes)
|
||||
return s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes)
|
||||
}
|
||||
|
||||
@@ -616,7 +418,7 @@ func (s *Service) insertFinalizedDepositsAndPrune(ctx context.Context, fRoot [32
|
||||
// Update deposit cache.
|
||||
finalizedState, err := s.cfg.StateGen.StateByRoot(ctx, fRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not fetch finalized state")
|
||||
log.WithError(err).Error("Could not fetch finalized state")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -634,7 +436,7 @@ func (s *Service) insertFinalizedDepositsAndPrune(ctx context.Context, fRoot [32
|
||||
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
|
||||
eth1DepositIndex, err := mathutil.Int(finalizedState.Eth1DepositIndex())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not cast eth1 deposit index")
|
||||
log.WithError(err).Error("Could not cast eth1 deposit index")
|
||||
return
|
||||
}
|
||||
// The deposit index in the state is always the index of the next deposit
|
||||
@@ -643,12 +445,12 @@ func (s *Service) insertFinalizedDepositsAndPrune(ctx context.Context, fRoot [32
|
||||
finalizedEth1DepIdx := eth1DepositIndex - 1
|
||||
if err = s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(finalizedEth1DepIdx), common.Hash(finalizedState.Eth1Data().BlockHash),
|
||||
0 /* Setting a zero value as we have no access to block height */); err != nil {
|
||||
log.WithError(err).Error("could not insert finalized deposits")
|
||||
log.WithError(err).Error("Could not insert finalized deposits")
|
||||
return
|
||||
}
|
||||
// Deposit proofs are only used during state transition and can be safely removed to save space.
|
||||
if err = s.cfg.DepositCache.PruneProofs(ctx, int64(finalizedEth1DepIdx)); err != nil {
|
||||
log.WithError(err).Error("could not prune deposit proofs")
|
||||
log.WithError(err).Error("Could not prune deposit proofs")
|
||||
}
|
||||
// Prune deposits which have already been finalized, the below method prunes all pending deposits (non-inclusive) up
|
||||
// to the provided eth1 deposit index.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -43,7 +43,7 @@ func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Chec
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := slots.ValidateClock(ss, uint64(s.genesisTime.Unix())); err != nil {
|
||||
if err := slots.ValidateClock(ss, s.genesisTime); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// We acquire the lock here instead than on gettAttPreState because that function gets called from UpdateHead that holds a write lock
|
||||
@@ -69,7 +69,7 @@ func (s *Service) spawnProcessAttestationsRoutine() {
|
||||
go func() {
|
||||
_, err := s.clockWaiter.WaitForClock(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("spawnProcessAttestationsRoutine failed to receive genesis data")
|
||||
log.WithError(err).Error("Failed to receive genesis data")
|
||||
return
|
||||
}
|
||||
if s.genesisTime.IsZero() {
|
||||
@@ -103,7 +103,7 @@ func (s *Service) spawnProcessAttestationsRoutine() {
|
||||
} else {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
|
||||
log.WithError(err).Error("could not process new slot")
|
||||
log.WithError(err).Error("Could not process new slot")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
@@ -144,7 +144,7 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
log.WithField("newHeadRoot", fmt.Sprintf("%#x", newHeadRoot)).Debug("Head changed due to attestations")
|
||||
headState, headBlock, err := s.getStateAndBlock(ctx, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get head block")
|
||||
log.WithError(err).Error("Could not get head block")
|
||||
return
|
||||
}
|
||||
newAttHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
@@ -161,7 +161,7 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
return
|
||||
}
|
||||
if err := s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("could not update forkchoice")
|
||||
log.WithError(err).Error("Could not update forkchoice")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -177,9 +177,9 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
|
||||
for _, a := range atts {
|
||||
// Based on the spec, don't process the attestation until the subsequent slot.
|
||||
// This delays consideration in the fork choice until their slot is in the past.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/fork-choice.md#validate_on_attestation
|
||||
nextSlot := a.GetData().Slot + 1
|
||||
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), nextSlot, disparity); err != nil {
|
||||
if err := slots.VerifyTime(s.genesisTime, nextSlot, disparity); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -32,7 +31,7 @@ func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) {
|
||||
service.genesisTime = time.Now()
|
||||
|
||||
e := primitives.Epoch(slots.MaxSlotBuffer/uint64(params.BeaconConfig().SlotsPerEpoch) + 1)
|
||||
_, err := service.AttestationTargetState(context.Background(), ðpb.Checkpoint{Epoch: e})
|
||||
_, err := service.AttestationTargetState(t.Context(), ðpb.Checkpoint{Epoch: e})
|
||||
require.ErrorContains(t, "exceeds max allowed value relative to the local clock", err)
|
||||
}
|
||||
|
||||
@@ -56,11 +55,11 @@ func TestVerifyLMDFFGConsistent(t *testing.T) {
|
||||
a.Data.Target.Root = []byte{'c'}
|
||||
r33Root := r33.Root()
|
||||
a.Data.BeaconBlockRoot = r33Root[:]
|
||||
require.ErrorContains(t, wanted, service.VerifyLmdFfgConsistency(context.Background(), a))
|
||||
require.ErrorContains(t, wanted, service.VerifyLmdFfgConsistency(t.Context(), a))
|
||||
|
||||
r32Root := r32.Root()
|
||||
a.Data.Target.Root = r32Root[:]
|
||||
err = service.VerifyLmdFfgConsistency(context.Background(), a)
|
||||
err = service.VerifyLmdFfgConsistency(t.Context(), a)
|
||||
require.NoError(t, err, "Could not verify LMD and FFG votes to be consistent")
|
||||
}
|
||||
|
||||
@@ -71,7 +70,7 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
|
||||
service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
|
||||
require.NoError(t, genesisState.SetGenesisTime(time.Now().Add(-1*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second)))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
atts, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/slasher/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -84,12 +83,16 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
}
|
||||
|
||||
receivedTime := time.Now()
|
||||
s.blockBeingSynced.set(blockRoot)
|
||||
err := s.blockBeingSynced.set(blockRoot)
|
||||
if errors.Is(err, errBlockBeingSynced) {
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Debug("Ignoring block currently being synced")
|
||||
return nil
|
||||
}
|
||||
defer s.blockBeingSynced.unset(blockRoot)
|
||||
|
||||
blockCopy, err := block.Copy()
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "block copy")
|
||||
}
|
||||
|
||||
preState, err := s.getBlockPreState(ctx, blockCopy.Block())
|
||||
@@ -100,17 +103,17 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
currentCheckpoints := s.saveCurrentCheckpoints(preState)
|
||||
roblock, err := blocks.NewROBlockWithRoot(blockCopy, blockRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "new ro block with root")
|
||||
}
|
||||
|
||||
postState, isValidPayload, err := s.validateExecutionAndConsensus(ctx, preState, roblock)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "validator execution and consensus")
|
||||
}
|
||||
|
||||
daWaitedTime, err := s.handleDA(ctx, blockCopy, blockRoot, avs)
|
||||
daWaitedTime, err := s.handleDA(ctx, avs, roblock)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "handle da")
|
||||
}
|
||||
|
||||
// Defragment the state before continuing block processing.
|
||||
@@ -131,10 +134,10 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
if err := s.postBlockProcess(args); err != nil {
|
||||
err := errors.Wrap(err, "could not process block")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
return errors.Wrap(err, "post block process")
|
||||
}
|
||||
if err := s.updateCheckpoints(ctx, currentCheckpoints, preState, postState, blockRoot); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "update checkpoints")
|
||||
}
|
||||
// If slasher is configured, forward the attestations in the block via an event feed for processing.
|
||||
if s.slasherEnabled {
|
||||
@@ -148,12 +151,12 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
|
||||
// Have we been finalizing? Should we start saving hot states to db?
|
||||
if err := s.checkSaveHotStateDB(ctx); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "check save hot state db")
|
||||
}
|
||||
|
||||
// We apply the same heuristic to some of our more important caches.
|
||||
if err := s.handleCaches(); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "handle caches")
|
||||
}
|
||||
s.reportPostBlockProcessing(blockCopy, blockRoot, receivedTime, daWaitedTime)
|
||||
return nil
|
||||
@@ -183,7 +186,7 @@ func (s *Service) updateCheckpoints(
|
||||
return errors.Wrap(err, "could not get head state")
|
||||
}
|
||||
if err := reportEpochMetrics(ctx, postState, headSt); err != nil {
|
||||
log.WithError(err).Error("could not report epoch metrics")
|
||||
log.WithError(err).Error("Could not report epoch metrics")
|
||||
}
|
||||
}
|
||||
if err := s.updateJustificationOnBlock(ctx, preState, postState, cp.j); err != nil {
|
||||
@@ -236,37 +239,19 @@ func (s *Service) validateExecutionAndConsensus(
|
||||
return postState, isValidPayload, nil
|
||||
}
|
||||
|
||||
func (s *Service) handleDA(
|
||||
ctx context.Context,
|
||||
block interfaces.SignedBeaconBlock,
|
||||
blockRoot [fieldparams.RootLength]byte,
|
||||
avs das.AvailabilityStore,
|
||||
) (elapsed time.Duration, err error) {
|
||||
defer func(start time.Time) {
|
||||
elapsed = time.Since(start)
|
||||
|
||||
if err == nil {
|
||||
dataAvailWaitedTime.Observe(float64(elapsed.Milliseconds()))
|
||||
}
|
||||
}(time.Now())
|
||||
|
||||
if avs == nil {
|
||||
if err = s.isDataAvailable(ctx, blockRoot, block); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
func (s *Service) handleDA(ctx context.Context, avs das.AvailabilityStore, block blocks.ROBlock) (time.Duration, error) {
|
||||
var err error
|
||||
start := time.Now()
|
||||
if avs != nil {
|
||||
err = avs.IsDataAvailable(ctx, s.CurrentSlot(), block)
|
||||
} else {
|
||||
err = s.isDataAvailable(ctx, block)
|
||||
}
|
||||
|
||||
var rob blocks.ROBlock
|
||||
rob, err = blocks.NewROBlockWithRoot(block, blockRoot)
|
||||
if err != nil {
|
||||
return
|
||||
elapsed := time.Since(start)
|
||||
if err == nil {
|
||||
dataAvailWaitedTime.Observe(float64(elapsed.Milliseconds()))
|
||||
}
|
||||
|
||||
err = avs.IsDataAvailable(ctx, s.CurrentSlot(), rob)
|
||||
|
||||
return
|
||||
return elapsed, err
|
||||
}
|
||||
|
||||
func (s *Service) reportPostBlockProcessing(
|
||||
@@ -283,7 +268,7 @@ func (s *Service) reportPostBlockProcessing(
|
||||
// Log block sync status.
|
||||
cp = s.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
justified := ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
if err := logBlockSyncStatus(block.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix()), daWaitedTime); err != nil {
|
||||
if err := logBlockSyncStatus(block.Block(), blockRoot, justified, finalized, receivedTime, s.genesisTime, daWaitedTime); err != nil {
|
||||
log.WithError(err).Error("Unable to log block sync status")
|
||||
}
|
||||
// Log payload data
|
||||
@@ -300,15 +285,45 @@ func (s *Service) reportPostBlockProcessing(
|
||||
|
||||
func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedState state.BeaconState) {
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
|
||||
// Send finalization event
|
||||
go func() {
|
||||
s.sendNewFinalizedEvent(ctx, finalizedState)
|
||||
}()
|
||||
|
||||
// Insert finalized deposits into finalized deposit trie
|
||||
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
|
||||
go func() {
|
||||
s.insertFinalizedDepositsAndPrune(depCtx, finalized.Root)
|
||||
cancel()
|
||||
}()
|
||||
|
||||
if features.Get().EnableLightClient {
|
||||
// Save a light client bootstrap for the finalized checkpoint
|
||||
go func() {
|
||||
st, err := s.cfg.StateGen.StateByRoot(ctx, finalized.Root)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve state for finalized root to save light client bootstrap")
|
||||
return
|
||||
}
|
||||
err = s.lcStore.SaveLightClientBootstrap(s.ctx, finalized.Root, st)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not save light client bootstrap by block root")
|
||||
} else {
|
||||
log.Debugf("Saved light client bootstrap for finalized root %#x", finalized.Root)
|
||||
}
|
||||
}()
|
||||
|
||||
// Clean up the light client store caches
|
||||
go func() {
|
||||
err := s.lcStore.MigrateToCold(s.ctx, finalized.Root)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not migrate light client store to cold storage")
|
||||
} else {
|
||||
log.Debugf("Migrated light client store to cold storage for finalized root %#x", finalized.Root)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -9,7 +8,10 @@ import (
|
||||
blockchainTesting "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
@@ -19,6 +21,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpbv1 "github.com/OffchainLabs/prysm/v6/proto/eth/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -28,7 +31,7 @@ import (
|
||||
)
|
||||
|
||||
func TestService_ReceiveBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
|
||||
genesis, keys := util.DeterministicGenesisState(t, 64)
|
||||
copiedGen := genesis.Copy()
|
||||
@@ -189,7 +192,9 @@ func TestHandleDA(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
s, _ := minimalTestService(t)
|
||||
elapsed, err := s.handleDA(context.Background(), signedBeaconBlock, [fieldparams.RootLength]byte{}, nil)
|
||||
block, err := blocks.NewROBlockWithRoot(signedBeaconBlock, [32]byte{})
|
||||
require.NoError(t, err)
|
||||
elapsed, err := s.handleDA(t.Context(), nil, block)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, elapsed > 0, "Elapsed time should be greater than 0")
|
||||
}
|
||||
@@ -228,7 +233,7 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
|
||||
genesis, keys := util.DeterministicGenesisState(t, 64)
|
||||
genFullBlock := func(t *testing.T, conf *util.BlockGenConfig, slot primitives.Slot) *ethpb.SignedBeaconBlock {
|
||||
@@ -293,23 +298,26 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
func TestService_HasBlock(t *testing.T) {
|
||||
s, _ := minimalTestService(t)
|
||||
r := [32]byte{'a'}
|
||||
if s.HasBlock(context.Background(), r) {
|
||||
if s.HasBlock(t.Context(), r) {
|
||||
t.Error("Should not have block")
|
||||
}
|
||||
wsb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.saveInitSyncBlock(context.Background(), r, wsb))
|
||||
if !s.HasBlock(context.Background(), r) {
|
||||
require.NoError(t, s.saveInitSyncBlock(t.Context(), r, wsb))
|
||||
if !s.HasBlock(t.Context(), r) {
|
||||
t.Error("Should have block")
|
||||
}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
util.SaveBlock(t, context.Background(), s.cfg.BeaconDB, b)
|
||||
util.SaveBlock(t, t.Context(), s.cfg.BeaconDB, b)
|
||||
r, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, s.HasBlock(context.Background(), r))
|
||||
s.blockBeingSynced.set(r)
|
||||
require.Equal(t, false, s.HasBlock(context.Background(), r))
|
||||
require.Equal(t, true, s.HasBlock(t.Context(), r))
|
||||
err = s.blockBeingSynced.set(r)
|
||||
require.NoError(t, err)
|
||||
err = s.blockBeingSynced.set(r)
|
||||
require.ErrorIs(t, err, errBlockBeingSynced)
|
||||
require.Equal(t, false, s.HasBlock(t.Context(), r))
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
@@ -318,7 +326,7 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
require.NoError(t, s.checkSaveHotStateDB(t.Context()))
|
||||
assert.LogsContain(t, hook, "Entering mode to save hot states in DB")
|
||||
}
|
||||
|
||||
@@ -329,19 +337,19 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
require.NoError(t, s.checkSaveHotStateDB(t.Context()))
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
require.NoError(t, s.checkSaveHotStateDB(t.Context()))
|
||||
assert.LogsContain(t, hook, "Exiting mode to save hot states in DB")
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s, _ := minimalTestService(t)
|
||||
s.genesisTime = time.Now()
|
||||
s.SetGenesisTime(time.Now())
|
||||
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
require.NoError(t, s.checkSaveHotStateDB(t.Context()))
|
||||
assert.LogsDoNotContain(t, hook, "Entering mode to save hot states in DB")
|
||||
}
|
||||
|
||||
@@ -349,8 +357,9 @@ func TestHandleCaches_EnablingLargeSize(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s, _ := minimalTestService(t)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
s.SetGenesisTime(time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
|
||||
|
||||
helpers.ClearCache()
|
||||
require.NoError(t, s.handleCaches())
|
||||
assert.LogsContain(t, hook, "Expanding committee cache size")
|
||||
}
|
||||
@@ -456,7 +465,7 @@ func Test_executePostFinalizationTasks(t *testing.T) {
|
||||
|
||||
headState, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
finalizedStRoot, err := headState.HashTreeRoot(context.Background())
|
||||
finalizedStRoot, err := headState.HashTreeRoot(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
genesis := util.NewBeaconBlock()
|
||||
@@ -563,3 +572,44 @@ func Test_executePostFinalizationTasks(t *testing.T) {
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestProcessLightClientBootstrap(t *testing.T) {
|
||||
featCfg := &features.Flags{}
|
||||
featCfg.EnableLightClient = true
|
||||
reset := features.InitWithReset(featCfg)
|
||||
defer reset()
|
||||
|
||||
s, tr := minimalTestService(t, WithLCStore())
|
||||
ctx := tr.ctx
|
||||
|
||||
for testVersion := version.Altair; testVersion <= version.Electra; testVersion++ {
|
||||
t.Run(version.String(testVersion), func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, testVersion)
|
||||
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock))
|
||||
finalizedBlockRoot, err := l.FinalizedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveState(ctx, l.FinalizedState, finalizedBlockRoot))
|
||||
|
||||
cp := l.AttestedState.FinalizedCheckpoint()
|
||||
require.DeepSSZEqual(t, finalizedBlockRoot, [32]byte(cp.Root))
|
||||
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: cp.Epoch, Root: [32]byte(cp.Root)}))
|
||||
|
||||
s.executePostFinalizationTasks(s.ctx, l.AttestedState)
|
||||
|
||||
// wait for the goroutine to finish processing
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// Check that the light client bootstrap is saved
|
||||
b, err := s.lcStore.LightClientBootstrap(ctx, [32]byte(cp.Root))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b)
|
||||
|
||||
btst, err := lightClient.NewLightClientBootstrapFromBeaconState(ctx, l.FinalizedState.Slot(), l.FinalizedState, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, btst, b)
|
||||
require.Equal(t, b.Version(), testVersion)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,10 +15,9 @@ func (s *Service) ReceiveDataColumns(dataColumnSidecars []blocks.VerifiedRODataC
|
||||
}
|
||||
|
||||
// ReceiveDataColumn receives a single data column.
|
||||
// (It is only a wrapper around ReceiveDataColumns.)
|
||||
func (s *Service) ReceiveDataColumn(dataColumnSidecar blocks.VerifiedRODataColumn) error {
|
||||
if err := s.dataColumnStorage.Save([]blocks.VerifiedRODataColumn{dataColumnSidecar}); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
return errors.Wrap(err, "save data column sidecar")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -12,17 +12,15 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
f "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/slashings"
|
||||
@@ -31,6 +29,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -47,27 +46,29 @@ import (
|
||||
// Service represents a service that handles the internal
|
||||
// logic of managing the full PoS beacon chain.
|
||||
type Service struct {
|
||||
cfg *config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
genesisTime time.Time
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
|
||||
boundaryRoots [][32]byte
|
||||
checkpointStateCache *cache.CheckpointStateCache
|
||||
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
clockSetter startup.ClockSetter
|
||||
clockWaiter startup.ClockWaiter
|
||||
syncComplete chan struct{}
|
||||
blobNotifiers *blobNotifierMap
|
||||
blockBeingSynced *currentlySyncingBlock
|
||||
blobStorage *filesystem.BlobStorage
|
||||
dataColumnStorage *filesystem.DataColumnStorage
|
||||
slasherEnabled bool
|
||||
lcStore *lightClient.Store
|
||||
cfg *config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
genesisTime time.Time
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
|
||||
boundaryRoots [][32]byte
|
||||
checkpointStateCache *cache.CheckpointStateCache
|
||||
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
clockSetter startup.ClockSetter
|
||||
clockWaiter startup.ClockWaiter
|
||||
syncComplete chan struct{}
|
||||
blobNotifiers *blobNotifierMap
|
||||
blockBeingSynced *currentlySyncingBlock
|
||||
blobStorage *filesystem.BlobStorage
|
||||
dataColumnStorage *filesystem.DataColumnStorage
|
||||
slasherEnabled bool
|
||||
lcStore *lightClient.Store
|
||||
startWaitingDataColumnSidecars chan bool // for testing purposes only
|
||||
syncCommitteeHeadState *cache.SyncCommitteeHeadStateCache
|
||||
}
|
||||
|
||||
// config options for the service.
|
||||
@@ -95,7 +96,6 @@ type config struct {
|
||||
FinalizedStateAtStartUp state.BeaconState
|
||||
ExecutionEngineCaller execution.EngineCaller
|
||||
SyncChecker Checker
|
||||
CustodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
// Checker is an interface used to determine if a node is in initial sync
|
||||
@@ -179,14 +179,15 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
seenIndex: make(map[[32]byte][]bool),
|
||||
}
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
boundaryRoots: [][32]byte{},
|
||||
checkpointStateCache: cache.NewCheckpointStateCache(),
|
||||
initSyncBlocks: make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock),
|
||||
blobNotifiers: bn,
|
||||
cfg: &config{},
|
||||
blockBeingSynced: ¤tlySyncingBlock{roots: make(map[[32]byte]struct{})},
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
boundaryRoots: [][32]byte{},
|
||||
checkpointStateCache: cache.NewCheckpointStateCache(),
|
||||
initSyncBlocks: make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock),
|
||||
blobNotifiers: bn,
|
||||
cfg: &config{},
|
||||
blockBeingSynced: ¤tlySyncingBlock{roots: make(map[[32]byte]struct{})},
|
||||
syncCommitteeHeadState: cache.NewSyncCommitteeHeadState(),
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if err := opt(srv); err != nil {
|
||||
@@ -205,17 +206,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
|
||||
// Start a blockchain service's main event loop.
|
||||
func (s *Service) Start() {
|
||||
saved := s.cfg.FinalizedStateAtStartUp
|
||||
defer s.removeStartupState()
|
||||
|
||||
if saved != nil && !saved.IsNil() {
|
||||
if err := s.StartFromSavedState(saved); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
if err := s.startFromExecutionChain(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := s.StartFromSavedState(s.cfg.FinalizedStateAtStartUp); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
s.spawnProcessAttestationsRoutine()
|
||||
go s.runLateBlockTasks()
|
||||
@@ -264,8 +257,11 @@ func (s *Service) Status() error {
|
||||
|
||||
// StartFromSavedState initializes the blockchain using a previously saved finalized checkpoint.
|
||||
func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
if state.IsNil(saved) {
|
||||
return errors.New("Last finalized state at startup is nil")
|
||||
}
|
||||
log.Info("Blockchain data already exists in DB, initializing...")
|
||||
s.genesisTime = time.Unix(int64(saved.GenesisTime()), 0) // lint:ignore uintcast -- Genesis time will not exceed int64 in your lifetime.
|
||||
s.genesisTime = saved.GenesisTime()
|
||||
s.cfg.AttService.SetGenesisTime(saved.GenesisTime())
|
||||
|
||||
originRoot, err := s.originRootFromSavedState(s.ctx)
|
||||
@@ -293,6 +289,20 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
if err := s.clockSetter.SetClock(startup.NewClock(s.genesisTime, vr)); err != nil {
|
||||
return errors.Wrap(err, "failed to initialize blockchain service")
|
||||
}
|
||||
|
||||
if !params.FuluEnabled() {
|
||||
return nil
|
||||
}
|
||||
|
||||
earliestAvailableSlot, custodySubnetCount, err := s.updateCustodyInfoInDB(saved.Slot())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get and save custody group count")
|
||||
}
|
||||
|
||||
if _, _, err := s.cfg.P2P.UpdateCustodyInfo(earliestAvailableSlot, custodySubnetCount); err != nil {
|
||||
return errors.Wrap(err, "update custody info")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -355,62 +365,6 @@ func (s *Service) initializeHead(ctx context.Context, st state.BeaconState) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) startFromExecutionChain() error {
|
||||
log.Info("Waiting to reach the validator deposit threshold to start the beacon chain...")
|
||||
if s.cfg.ChainStartFetcher == nil {
|
||||
return errors.New("not configured execution chain")
|
||||
}
|
||||
go func() {
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case e := <-stateChannel:
|
||||
if e.Type == statefeed.ChainStarted {
|
||||
data, ok := e.Data.(*statefeed.ChainStartedData)
|
||||
if !ok {
|
||||
log.Error("event data is not type *statefeed.ChainStartedData")
|
||||
return
|
||||
}
|
||||
log.WithField("startTime", data.StartTime).Debug("Received chain start event")
|
||||
s.onExecutionChainStart(s.ctx, data.StartTime)
|
||||
return
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("Subscription to state forRoot failed")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// onExecutionChainStart initializes a series of deposits from the ChainStart deposits in the eth1
|
||||
// deposit contract, initializes the beacon chain's state, and kicks off the beacon chain.
|
||||
func (s *Service) onExecutionChainStart(ctx context.Context, genesisTime time.Time) {
|
||||
preGenesisState := s.cfg.ChainStartFetcher.PreGenesisState()
|
||||
initializedState, err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.cfg.ChainStartFetcher.ChainStartEth1Data())
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Could not initialize beacon chain")
|
||||
}
|
||||
// We start a counter to genesis, if needed.
|
||||
gRoot, err := initializedState.HashTreeRoot(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Could not hash tree root genesis state")
|
||||
}
|
||||
go slots.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()), gRoot)
|
||||
|
||||
vr := bytesutil.ToBytes32(initializedState.GenesisValidatorsRoot())
|
||||
if err := s.clockSetter.SetClock(startup.NewClock(genesisTime, vr)); err != nil {
|
||||
log.WithError(err).Fatal("failed to initialize blockchain service from execution start event")
|
||||
}
|
||||
}
|
||||
|
||||
// initializes the state and genesis block of the beacon chain to persistent storage
|
||||
// based on a genesis timestamp value obtained from the ChainStart event emitted
|
||||
// by the ETH1.0 Deposit Contract and the POWChain service of the node.
|
||||
@@ -421,7 +375,7 @@ func (s *Service) initializeBeaconChain(
|
||||
eth1data *ethpb.Eth1Data) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.Service.initializeBeaconChain")
|
||||
defer span.End()
|
||||
s.genesisTime = genesisTime
|
||||
s.genesisTime = genesisTime.Truncate(time.Second) // Genesis time has a precision of 1 second.
|
||||
unixTime := uint64(genesisTime.Unix())
|
||||
|
||||
genesisState, err := transition.OptimizedGenesisBeaconState(unixTime, preGenesisState, eth1data)
|
||||
@@ -482,7 +436,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "Could not set optimistic status of genesis block to false")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(s.genesisTime)
|
||||
|
||||
if err := s.setHead(&head{
|
||||
genesisBlkRoot,
|
||||
@@ -513,6 +467,57 @@ func (s *Service) removeStartupState() {
|
||||
s.cfg.FinalizedStateAtStartUp = nil
|
||||
}
|
||||
|
||||
// UpdateCustodyInfoInDB updates the custody information in the database.
|
||||
// It returns the (potentially updated) custody group count and the earliest available slot.
|
||||
func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot, uint64, error) {
|
||||
isSubscribedToAllDataSubnets := flags.Get().SubscribeAllDataSubnets
|
||||
|
||||
beaconConfig := params.BeaconConfig()
|
||||
custodyRequirement := beaconConfig.CustodyRequirement
|
||||
|
||||
// Check if the node was previously subscribed to all data subnets, and if so,
|
||||
// store the new status accordingly.
|
||||
wasSubscribedToAllDataSubnets, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSubscribedToAllDataSubnets)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update subscription status to all data subnets")
|
||||
}
|
||||
|
||||
// Warn the user if the node was previously subscribed to all data subnets and is not any more.
|
||||
if wasSubscribedToAllDataSubnets && !isSubscribedToAllDataSubnets {
|
||||
log.Warnf(
|
||||
"Because the flag `--%s` was previously used, the node will still subscribe to all data subnets.",
|
||||
flags.SubscribeAllDataSubnets.Name,
|
||||
)
|
||||
}
|
||||
|
||||
// Compute the custody group count.
|
||||
custodyGroupCount := custodyRequirement
|
||||
if isSubscribedToAllDataSubnets {
|
||||
custodyGroupCount = beaconConfig.NumberOfColumns
|
||||
}
|
||||
|
||||
// Safely compute the fulu fork slot.
|
||||
fuluForkSlot, err := fuluForkSlot()
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "fulu fork slot")
|
||||
}
|
||||
|
||||
// If slot is before the fulu fork slot, then use the earliest stored slot as the reference slot.
|
||||
if slot < fuluForkSlot {
|
||||
slot, err = s.cfg.BeaconDB.EarliestSlot(s.ctx)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "earliest slot")
|
||||
}
|
||||
}
|
||||
|
||||
earliestAvailableSlot, custodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, custodyGroupCount)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "update custody info")
|
||||
}
|
||||
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
|
||||
currentTime := prysmTime.Now()
|
||||
if currentTime.After(genesisTime) {
|
||||
@@ -529,3 +534,19 @@ func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db d
|
||||
}
|
||||
go slots.CountdownToGenesis(ctx, genesisTime, uint64(gState.NumValidators()), gRoot)
|
||||
}
|
||||
|
||||
func fuluForkSlot() (primitives.Slot, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
fuluForkEpoch := beaconConfig.FuluForkEpoch
|
||||
if fuluForkEpoch == beaconConfig.FarFutureEpoch {
|
||||
return beaconConfig.FarFutureSlot, nil
|
||||
}
|
||||
|
||||
forkFuluSlot, err := slots.EpochStart(fuluForkEpoch)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "epoch start")
|
||||
}
|
||||
|
||||
return forkFuluSlot, nil
|
||||
}
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -18,15 +16,12 @@ func init() {
|
||||
}
|
||||
|
||||
func TestChainService_SaveHead_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB},
|
||||
}
|
||||
s := testServiceWithDB(t)
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, err)
|
||||
go func() {
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
require.NoError(t, s.saveHead(t.Context(), [32]byte{}, b, st))
|
||||
}()
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
require.NoError(t, s.saveHead(t.Context(), [32]byte{}, b, st))
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/container/trie"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -42,7 +43,7 @@ import (
|
||||
)
|
||||
|
||||
func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
var web3Service *execution.Service
|
||||
var err error
|
||||
srv, endpoint, err := mockExecution.SetupRPCServer()
|
||||
@@ -51,6 +52,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
srv.Stop()
|
||||
})
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
genesis.StoreStateDuringTest(t, bState)
|
||||
pbState, err := state_native.ProtobufBeaconStatePhase0(bState.ToProtoUnsafe())
|
||||
require.NoError(t, err)
|
||||
mockTrie, err := trie.NewTrie(0)
|
||||
@@ -71,20 +73,22 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
DepositContainers: []*ethpb.DepositContainer{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
web3Service, err = execution.NewService(
|
||||
ctx,
|
||||
execution.WithDatabase(beaconDB),
|
||||
execution.WithHttpEndpoint(endpoint),
|
||||
execution.WithDepositContractAddress(common.Address{}),
|
||||
execution.WithDepositCache(depositCache),
|
||||
)
|
||||
require.NoError(t, err, "Unable to set up web3 service")
|
||||
|
||||
attService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
|
||||
require.NoError(t, err)
|
||||
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
fc := doublylinkedtree.New()
|
||||
stateGen := stategen.New(beaconDB, fc)
|
||||
// Safe a state in stategen to purposes of testing a service stop / shutdown.
|
||||
@@ -115,7 +119,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
|
||||
func TestChainStartStop_Initialized(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
chainService := setupBeaconChain(t, beaconDB)
|
||||
@@ -127,7 +131,7 @@ func TestChainStartStop_Initialized(t *testing.T) {
|
||||
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
|
||||
require.NoError(t, s.SetGenesisTime(gt))
|
||||
require.NoError(t, s.SetSlot(1))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
|
||||
@@ -152,7 +156,7 @@ func TestChainStartStop_Initialized(t *testing.T) {
|
||||
|
||||
func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
chainService := setupBeaconChain(t, beaconDB)
|
||||
@@ -164,7 +168,7 @@ func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
|
||||
wsb := util.SaveBlock(t, ctx, beaconDB, genesisBlk)
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
|
||||
require.NoError(t, s.SetGenesisTime(gt))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
@@ -184,7 +188,7 @@ func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
|
||||
func TestChainService_InitializeBeaconChain(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
|
||||
bc := setupBeaconChain(t, beaconDB)
|
||||
var err error
|
||||
@@ -226,7 +230,7 @@ func TestChainService_InitializeBeaconChain(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestChainService_CorrectGenesisRoots(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
chainService := setupBeaconChain(t, beaconDB)
|
||||
@@ -238,7 +242,7 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
|
||||
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
|
||||
require.NoError(t, s.SetGenesisTime(gt))
|
||||
require.NoError(t, s.SetSlot(0))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
|
||||
@@ -295,7 +299,7 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, headState.ToProtoUnsafe(), s.ToProtoUnsafe(), "Head state incorrect")
|
||||
assert.Equal(t, c.HeadSlot(), headBlock.Block.Slot, "Head slot incorrect")
|
||||
r, err := c.HeadRoot(context.Background())
|
||||
r, err := c.HeadRoot(t.Context())
|
||||
require.NoError(t, err)
|
||||
if !bytes.Equal(headRoot[:], r) {
|
||||
t.Error("head slot incorrect")
|
||||
@@ -345,12 +349,8 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestChainService_SaveHeadNoDB(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
fc := doublylinkedtree.New()
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, fc), ForkChoiceStore: fc},
|
||||
}
|
||||
ctx := t.Context()
|
||||
s := testServiceWithDB(t)
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 1
|
||||
r, err := blk.HashTreeRoot()
|
||||
@@ -370,11 +370,8 @@ func TestChainService_SaveHeadNoDB(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
|
||||
}
|
||||
ctx := t.Context()
|
||||
s := testServiceWithDB(t)
|
||||
b := util.NewBeaconBlock()
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
@@ -391,48 +388,21 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestServiceStop_SaveCachedBlocks(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
initSyncBlocks: make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock),
|
||||
}
|
||||
s := testServiceWithDB(t)
|
||||
s.initSyncBlocks = make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock)
|
||||
bb := util.NewBeaconBlock()
|
||||
r, err := bb.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(bb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.saveInitSyncBlock(ctx, r, wsb))
|
||||
require.NoError(t, s.saveInitSyncBlock(s.ctx, r, wsb))
|
||||
require.NoError(t, s.Stop())
|
||||
require.Equal(t, true, s.cfg.BeaconDB.HasBlock(ctx, r))
|
||||
}
|
||||
|
||||
func TestProcessChainStartTime_ReceivedFeed(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
mgs := &MockClockSetter{}
|
||||
service.clockSetter = mgs
|
||||
gt := time.Now()
|
||||
service.onExecutionChainStart(context.Background(), gt)
|
||||
gs, err := beaconDB.GenesisState(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, nil, gs)
|
||||
require.Equal(t, 32, len(gs.GenesisValidatorsRoot()))
|
||||
var zero [32]byte
|
||||
require.DeepNotEqual(t, gs.GenesisValidatorsRoot(), zero[:])
|
||||
require.Equal(t, gt, mgs.G.GenesisTime())
|
||||
require.Equal(t, bytesutil.ToBytes32(gs.GenesisValidatorsRoot()), mgs.G.GenesisValidatorsRoot())
|
||||
require.Equal(t, true, s.cfg.BeaconDB.HasBlock(s.ctx, r))
|
||||
}
|
||||
|
||||
func BenchmarkHasBlockDB(b *testing.B) {
|
||||
beaconDB := testDB.SetupDB(b)
|
||||
ctx := context.Background()
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB},
|
||||
}
|
||||
ctx := b.Context()
|
||||
s := testServiceWithDB(b)
|
||||
blk := util.NewBeaconBlock()
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(b, err)
|
||||
@@ -447,11 +417,8 @@ func BenchmarkHasBlockDB(b *testing.B) {
|
||||
}
|
||||
|
||||
func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(b)
|
||||
s := &Service{
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
|
||||
}
|
||||
ctx := b.Context()
|
||||
s := testServiceWithDB(b)
|
||||
blk := util.NewBeaconBlock()
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
@@ -20,7 +21,7 @@ func (s *Service) setupForkchoice(st state.BeaconState) error {
|
||||
return errors.Wrap(err, "could not set up forkchoice checkpoints")
|
||||
}
|
||||
if err := s.setupForkchoiceTree(st); err != nil {
|
||||
return errors.Wrap(err, "could not set up forkchoice root")
|
||||
return errors.Wrap(err, "could not set up forkchoice tree")
|
||||
}
|
||||
if err := s.initializeHead(s.ctx, st); err != nil {
|
||||
return errors.Wrap(err, "could not initialize head from db")
|
||||
@@ -30,24 +31,24 @@ func (s *Service) setupForkchoice(st state.BeaconState) error {
|
||||
|
||||
func (s *Service) startupHeadRoot() [32]byte {
|
||||
headStr := features.Get().ForceHead
|
||||
cp := s.FinalizedCheckpt()
|
||||
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
|
||||
jp := s.CurrentJustifiedCheckpt()
|
||||
jRoot := s.ensureRootNotZeros([32]byte(jp.Root))
|
||||
if headStr == "" {
|
||||
return fRoot
|
||||
return jRoot
|
||||
}
|
||||
if headStr == "head" {
|
||||
root, err := s.cfg.BeaconDB.HeadBlockRoot()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get head block root, starting with finalized block as head")
|
||||
return fRoot
|
||||
log.WithError(err).Error("Could not get head block root, starting with justified block as head")
|
||||
return jRoot
|
||||
}
|
||||
log.Infof("Using Head root of %#x", root)
|
||||
return root
|
||||
}
|
||||
root, err := bytesutil.DecodeHexWithLength(headStr, 32)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not parse head root, starting with finalized block as head")
|
||||
return fRoot
|
||||
log.WithError(err).Error("Could not parse head root, starting with justified block as head")
|
||||
return jRoot
|
||||
}
|
||||
return [32]byte(root)
|
||||
}
|
||||
@@ -64,16 +65,16 @@ func (s *Service) setupForkchoiceTree(st state.BeaconState) error {
|
||||
}
|
||||
blk, err := s.cfg.BeaconDB.Block(s.ctx, headRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get head block, starting with finalized block as head")
|
||||
log.WithError(err).Error("Could not get head block, starting with finalized block as head")
|
||||
return nil
|
||||
}
|
||||
if slots.ToEpoch(blk.Block().Slot()) < cp.Epoch {
|
||||
log.WithField("headRoot", fmt.Sprintf("%#x", headRoot)).Error("head block is older than finalized block, starting with finalized block as head")
|
||||
log.WithField("headRoot", fmt.Sprintf("%#x", headRoot)).Error("Head block is older than finalized block, starting with finalized block as head")
|
||||
return nil
|
||||
}
|
||||
chain, err := s.buildForkchoiceChain(s.ctx, blk)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not build forkchoice chain, starting with finalized block as head")
|
||||
log.WithError(err).Error("Could not build forkchoice chain, starting with finalized block as head")
|
||||
return nil
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
@@ -112,6 +113,7 @@ func (s *Service) buildForkchoiceChain(ctx context.Context, head interfaces.Read
|
||||
return nil, errors.New("head block is not a descendant of the finalized checkpoint")
|
||||
}
|
||||
}
|
||||
slices.Reverse(chain)
|
||||
return chain, nil
|
||||
}
|
||||
|
||||
@@ -170,6 +172,6 @@ func (s *Service) setupForkchoiceCheckpoints() error {
|
||||
Root: fRoot}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(s.genesisTime)
|
||||
return nil
|
||||
}
|
||||
@@ -32,7 +32,7 @@ func Test_startupHeadRoot(t *testing.T) {
|
||||
})
|
||||
defer resetCfg()
|
||||
require.Equal(t, service.startupHeadRoot(), gr)
|
||||
require.LogsContain(t, hook, "could not get head block root, starting with finalized block as head")
|
||||
require.LogsContain(t, hook, "Could not get head block root, starting with justified block as head")
|
||||
})
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
@@ -124,5 +124,5 @@ func Test_setupForkchoiceTree_Head(t *testing.T) {
|
||||
require.NotEqual(t, fRoot, root)
|
||||
require.Equal(t, root, service.startupHeadRoot())
|
||||
require.NoError(t, service.setupForkchoiceTree(st))
|
||||
require.Equal(t, 2, service.cfg.ForkChoiceStore.NodeCount())
|
||||
require.Equal(t, 3, service.cfg.ForkChoiceStore.NodeCount())
|
||||
}
|
||||
|
||||
@@ -4,29 +4,33 @@ import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
mockExecution "github.com/OffchainLabs/prysm/v6/beacon-chain/execution/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2pTesting "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@@ -51,6 +55,7 @@ type mockBroadcaster struct {
|
||||
|
||||
type mockAccessor struct {
|
||||
mockBroadcaster
|
||||
mockCustodyManager
|
||||
p2pTesting.MockPeerManager
|
||||
}
|
||||
|
||||
@@ -84,7 +89,7 @@ func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumn(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar, _ ...chan<- bool) error {
|
||||
func (mb *mockBroadcaster) BroadcastDataColumnSidecar(_ uint64, _ blocks.VerifiedRODataColumn) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
@@ -94,6 +99,43 @@ func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.Sig
|
||||
|
||||
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
|
||||
|
||||
// mockCustodyManager is a mock implementation of p2p.CustodyManager
|
||||
type mockCustodyManager struct {
|
||||
mut sync.RWMutex
|
||||
earliestAvailableSlot primitives.Slot
|
||||
custodyGroupCount uint64
|
||||
}
|
||||
|
||||
func (dch *mockCustodyManager) EarliestAvailableSlot() (primitives.Slot, error) {
|
||||
dch.mut.RLock()
|
||||
defer dch.mut.RUnlock()
|
||||
|
||||
return dch.earliestAvailableSlot, nil
|
||||
}
|
||||
|
||||
func (dch *mockCustodyManager) CustodyGroupCount() (uint64, error) {
|
||||
dch.mut.RLock()
|
||||
defer dch.mut.RUnlock()
|
||||
|
||||
return dch.custodyGroupCount, nil
|
||||
}
|
||||
|
||||
func (dch *mockCustodyManager) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
|
||||
dch.mut.Lock()
|
||||
defer dch.mut.Unlock()
|
||||
|
||||
dch.earliestAvailableSlot = earliestAvailableSlot
|
||||
dch.custodyGroupCount = custodyGroupCount
|
||||
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
func (dch *mockCustodyManager) CustodyGroupCountFromPeer(peer.ID) uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var _ p2p.CustodyManager = (*mockCustodyManager)(nil)
|
||||
|
||||
type testServiceRequirements struct {
|
||||
ctx context.Context
|
||||
db db.Database
|
||||
@@ -108,9 +150,11 @@ type testServiceRequirements struct {
|
||||
}
|
||||
|
||||
func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceRequirements) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
genesis := time.Now().Add(-1 * 4 * time.Duration(params.BeaconConfig().SlotsPerEpoch*primitives.Slot(params.BeaconConfig().SecondsPerSlot)) * time.Second) // Genesis was 4 epochs ago.
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
fcs.SetGenesisTime(genesis)
|
||||
sg := stategen.New(beaconDB, fcs)
|
||||
notif := &mockBeaconNode{}
|
||||
fcs.SetBalancesByRooter(sg.ActiveNonSlashedBalancesByRoot)
|
||||
@@ -149,6 +193,7 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
|
||||
WithExecutionEngineCaller(&mockExecution.EngineClient{}),
|
||||
WithP2PBroadcaster(&mockAccessor{}),
|
||||
WithLightClientStore(&lightclient.Store{}),
|
||||
WithGenesisTime(genesis),
|
||||
}
|
||||
// append the variadic opts so they override the defaults by being processed afterwards
|
||||
opts = append(defOpts, opts...)
|
||||
|
||||
@@ -555,11 +555,11 @@ func (s *ChainService) UpdateHead(ctx context.Context, slot primitives.Slot) {
|
||||
ojc := ðpb.Checkpoint{}
|
||||
st, root, err := prepareForkchoiceState(ctx, slot, bytesutil.ToBytes32(s.Root), [32]byte{}, [32]byte{}, ojc, ojc)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("could not update head")
|
||||
logrus.WithError(err).Error("Could not update head")
|
||||
}
|
||||
err = s.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("could not insert node to forkchoice")
|
||||
logrus.WithError(err).Error("Could not insert node to forkchoice")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -641,7 +641,7 @@ func (s *ChainService) GetProposerHead() [32]byte {
|
||||
}
|
||||
|
||||
// SetForkChoiceGenesisTime mocks the same method in the chain service
|
||||
func (s *ChainService) SetForkChoiceGenesisTime(timestamp uint64) {
|
||||
func (s *ChainService) SetForkChoiceGenesisTime(timestamp time.Time) {
|
||||
if s.ForkChoiceStore != nil {
|
||||
s.ForkChoiceStore.SetGenesisTime(timestamp)
|
||||
}
|
||||
@@ -723,7 +723,8 @@ func (c *ChainService) ReceiveDataColumn(dc blocks.VerifiedRODataColumn) error {
|
||||
}
|
||||
|
||||
// ReceiveDataColumns implements the same method in chain service
|
||||
func (*ChainService) ReceiveDataColumns(_ []blocks.VerifiedRODataColumn) error {
|
||||
func (c *ChainService) ReceiveDataColumns(dcs []blocks.VerifiedRODataColumn) error {
|
||||
c.DataColumns = append(c.DataColumns, dcs...)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,11 +1,8 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -18,11 +15,8 @@ import (
|
||||
)
|
||||
|
||||
func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 1792480
|
||||
util.SaveBlock(t, context.Background(), beaconDB, b)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -69,17 +63,17 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := testServiceWithDB(t)
|
||||
beaconDB := s.cfg.BeaconDB
|
||||
util.SaveBlock(t, t.Context(), beaconDB, b)
|
||||
wv, err := NewWeakSubjectivityVerifier(tt.checkpt, beaconDB)
|
||||
require.Equal(t, !tt.disabled, wv.enabled)
|
||||
require.NoError(t, err)
|
||||
fcs := doublylinkedtree.New()
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, WeakSubjectivityCheckpt: tt.checkpt, ForkChoiceStore: fcs},
|
||||
wsVerifier: wv,
|
||||
}
|
||||
require.NoError(t, fcs.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: tt.finalizedEpoch}))
|
||||
s.cfg.WeakSubjectivityCheckpt = tt.checkpt
|
||||
s.wsVerifier = wv
|
||||
require.Equal(t, !tt.disabled, wv.enabled)
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: tt.finalizedEpoch}))
|
||||
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
err = s.wsVerifier.VerifyWeakSubjectivity(context.Background(), cp.Epoch)
|
||||
err = s.wsVerifier.VerifyWeakSubjectivity(t.Context(), cp.Epoch)
|
||||
if tt.wantErr == nil {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
|
||||
@@ -24,7 +24,8 @@ var ErrNoBuilder = errors.New("builder endpoint not configured")
|
||||
|
||||
// BlockBuilder defines the interface for interacting with the block builder
|
||||
type BlockBuilder interface {
|
||||
SubmitBlindedBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error)
|
||||
SubmitBlindedBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error)
|
||||
SubmitBlindedBlockPostFulu(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) error
|
||||
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubKey [48]byte) (builder.SignedBid, error)
|
||||
RegisterValidator(ctx context.Context, reg []*ethpb.SignedValidatorRegistrationV1) error
|
||||
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
@@ -68,7 +69,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
log.WithError(err).Error("Failed to check builder status")
|
||||
} else {
|
||||
log.WithField("endpoint", s.c.NodeURL()).Info("Builder has been configured")
|
||||
log.Warn("Outsourcing block construction to external builders adds non-trivial delay to block propagation time. " +
|
||||
log.Warn("Outsourcing block construction to external builders adds non-trivial delay to block propagation time. " +
|
||||
"Builder-constructed blocks or fallback blocks may get orphaned. Use at your own risk!")
|
||||
}
|
||||
}
|
||||
@@ -87,7 +88,7 @@ func (s *Service) Stop() error {
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock submits a blinded block to the builder relay network.
|
||||
func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.SubmitBlindedBlock")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
@@ -101,6 +102,22 @@ func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlyS
|
||||
return s.c.SubmitBlindedBlock(ctx, b)
|
||||
}
|
||||
|
||||
// SubmitBlindedBlockPostFulu submits a blinded block to the builder relay network post-Fulu.
|
||||
// After Fulu, relays only return status codes (no payload).
|
||||
func (s *Service) SubmitBlindedBlockPostFulu(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.SubmitBlindedBlockPostFulu")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
submitBlindedBlockLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
if s.c == nil {
|
||||
return ErrNoBuilder
|
||||
}
|
||||
|
||||
return s.c.SubmitBlindedBlockPostFulu(ctx, b)
|
||||
}
|
||||
|
||||
// GetHeader retrieves the header for a given slot and parent hash from the builder relay network.
|
||||
func (s *Service) GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubKey [48]byte) (builder.SignedBid, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.GetHeader")
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -15,19 +14,19 @@ import (
|
||||
)
|
||||
|
||||
func Test_NewServiceWithBuilder(t *testing.T) {
|
||||
s, err := NewService(context.Background(), WithBuilderClient(&buildertesting.MockClient{}))
|
||||
s, err := NewService(t.Context(), WithBuilderClient(&buildertesting.MockClient{}))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, s.Configured())
|
||||
}
|
||||
|
||||
func Test_NewServiceWithoutBuilder(t *testing.T) {
|
||||
s, err := NewService(context.Background())
|
||||
s, err := NewService(t.Context())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, s.Configured())
|
||||
}
|
||||
|
||||
func Test_RegisterValidator(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
db := dbtesting.SetupDB(t)
|
||||
headFetcher := &blockchainTesting.ChainService{}
|
||||
builder := buildertesting.NewClient()
|
||||
@@ -40,7 +39,7 @@ func Test_RegisterValidator(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_RegisterValidator_WithCache(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
headFetcher := &blockchainTesting.ChainService{}
|
||||
builder := buildertesting.NewClient()
|
||||
s, err := NewService(ctx, WithRegistrationCache(), WithHeadFetcher(headFetcher), WithBuilderClient(&builder))
|
||||
@@ -55,16 +54,16 @@ func Test_RegisterValidator_WithCache(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_BuilderMethodsWithouClient(t *testing.T) {
|
||||
s, err := NewService(context.Background())
|
||||
s, err := NewService(t.Context())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, s.Configured())
|
||||
|
||||
_, err = s.GetHeader(context.Background(), 0, [32]byte{}, [48]byte{})
|
||||
_, err = s.GetHeader(t.Context(), 0, [32]byte{}, [48]byte{})
|
||||
assert.ErrorContains(t, ErrNoBuilder.Error(), err)
|
||||
|
||||
_, _, err = s.SubmitBlindedBlock(context.Background(), nil)
|
||||
_, _, err = s.SubmitBlindedBlock(t.Context(), nil)
|
||||
assert.ErrorContains(t, ErrNoBuilder.Error(), err)
|
||||
|
||||
err = s.RegisterValidator(context.Background(), nil)
|
||||
err = s.RegisterValidator(t.Context(), nil)
|
||||
assert.ErrorContains(t, ErrNoBuilder.Error(), err)
|
||||
}
|
||||
|
||||
@@ -24,20 +24,22 @@ type Config struct {
|
||||
|
||||
// MockBuilderService to mock builder.
|
||||
type MockBuilderService struct {
|
||||
HasConfigured bool
|
||||
Payload *v1.ExecutionPayload
|
||||
PayloadCapella *v1.ExecutionPayloadCapella
|
||||
PayloadDeneb *v1.ExecutionPayloadDeneb
|
||||
BlobBundle *v1.BlobsBundle
|
||||
ErrSubmitBlindedBlock error
|
||||
Bid *ethpb.SignedBuilderBid
|
||||
BidCapella *ethpb.SignedBuilderBidCapella
|
||||
BidDeneb *ethpb.SignedBuilderBidDeneb
|
||||
BidElectra *ethpb.SignedBuilderBidElectra
|
||||
RegistrationCache *cache.RegistrationCache
|
||||
ErrGetHeader error
|
||||
ErrRegisterValidator error
|
||||
Cfg *Config
|
||||
HasConfigured bool
|
||||
Payload *v1.ExecutionPayload
|
||||
PayloadCapella *v1.ExecutionPayloadCapella
|
||||
PayloadDeneb *v1.ExecutionPayloadDeneb
|
||||
BlobBundle *v1.BlobsBundle
|
||||
BlobBundleV2 *v1.BlobsBundleV2
|
||||
ErrSubmitBlindedBlock error
|
||||
ErrSubmitBlindedBlockPostFulu error
|
||||
Bid *ethpb.SignedBuilderBid
|
||||
BidCapella *ethpb.SignedBuilderBidCapella
|
||||
BidDeneb *ethpb.SignedBuilderBidDeneb
|
||||
BidElectra *ethpb.SignedBuilderBidElectra
|
||||
RegistrationCache *cache.RegistrationCache
|
||||
ErrGetHeader error
|
||||
ErrRegisterValidator error
|
||||
Cfg *Config
|
||||
}
|
||||
|
||||
// Configured for mocking.
|
||||
@@ -46,7 +48,7 @@ func (s *MockBuilderService) Configured() bool {
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock for mocking.
|
||||
func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error) {
|
||||
switch b.Version() {
|
||||
case version.Bellatrix:
|
||||
w, err := blocks.WrappedExecutionPayload(s.Payload)
|
||||
@@ -66,6 +68,16 @@ func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.
|
||||
return nil, nil, errors.Wrap(err, "could not wrap deneb payload")
|
||||
}
|
||||
return w, s.BlobBundle, s.ErrSubmitBlindedBlock
|
||||
case version.Fulu:
|
||||
w, err := blocks.WrappedExecutionPayloadDeneb(s.PayloadDeneb)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not wrap deneb payload for fulu")
|
||||
}
|
||||
// For Fulu, return BlobsBundleV2 if available, otherwise regular BlobsBundle
|
||||
if s.BlobBundleV2 != nil {
|
||||
return w, s.BlobBundleV2, s.ErrSubmitBlindedBlock
|
||||
}
|
||||
return w, s.BlobBundle, s.ErrSubmitBlindedBlock
|
||||
default:
|
||||
return nil, nil, errors.New("unknown block version for mocking")
|
||||
}
|
||||
@@ -104,3 +116,8 @@ func (s *MockBuilderService) RegistrationByValidatorID(ctx context.Context, id p
|
||||
func (s *MockBuilderService) RegisterValidator(context.Context, []*ethpb.SignedValidatorRegistrationV1) error {
|
||||
return s.ErrRegisterValidator
|
||||
}
|
||||
|
||||
// SubmitBlindedBlockPostFulu for mocking.
|
||||
func (s *MockBuilderService) SubmitBlindedBlockPostFulu(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
return s.ErrSubmitBlindedBlockPostFulu
|
||||
}
|
||||
|
||||
9
beacon-chain/cache/committee_fuzz_test.go
vendored
9
beacon-chain/cache/committee_fuzz_test.go
vendored
@@ -3,7 +3,6 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -30,8 +29,8 @@ func TestCommitteeCache_FuzzCommitteesByEpoch(t *testing.T) {
|
||||
|
||||
for i := 0; i < 100000; i++ {
|
||||
fuzzer.Fuzz(c)
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), c))
|
||||
_, err := cache.Committee(context.Background(), 0, c.Seed, 0)
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), c))
|
||||
_, err := cache.Committee(t.Context(), 0, c.Seed, 0)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -45,9 +44,9 @@ func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
|
||||
|
||||
for i := 0; i < 100000; i++ {
|
||||
fuzzer.Fuzz(c)
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), c))
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), c))
|
||||
|
||||
indices, err := cache.ActiveIndices(context.Background(), c.Seed)
|
||||
indices, err := cache.ActiveIndices(t.Context(), c.Seed)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, c.SortedIndices, indices)
|
||||
}
|
||||
|
||||
28
beacon-chain/cache/committee_test.go
vendored
28
beacon-chain/cache/committee_test.go
vendored
@@ -44,15 +44,15 @@ func TestCommitteeCache_CommitteesByEpoch(t *testing.T) {
|
||||
|
||||
slot := params.BeaconConfig().SlotsPerEpoch
|
||||
committeeIndex := primitives.CommitteeIndex(1)
|
||||
indices, err := cache.Committee(context.Background(), slot, item.Seed, committeeIndex)
|
||||
indices, err := cache.Committee(t.Context(), slot, item.Seed, committeeIndex)
|
||||
require.NoError(t, err)
|
||||
if indices != nil {
|
||||
t.Error("Expected committee not to exist in empty cache")
|
||||
}
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), item))
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), item))
|
||||
|
||||
wantedIndex := primitives.CommitteeIndex(0)
|
||||
indices, err = cache.Committee(context.Background(), slot, item.Seed, wantedIndex)
|
||||
indices, err = cache.Committee(t.Context(), slot, item.Seed, wantedIndex)
|
||||
require.NoError(t, err)
|
||||
|
||||
start, end := startEndIndices(item, uint64(wantedIndex))
|
||||
@@ -63,15 +63,15 @@ func TestCommitteeCache_ActiveIndices(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6}}
|
||||
indices, err := cache.ActiveIndices(context.Background(), item.Seed)
|
||||
indices, err := cache.ActiveIndices(t.Context(), item.Seed)
|
||||
require.NoError(t, err)
|
||||
if indices != nil {
|
||||
t.Error("Expected committee not to exist in empty cache")
|
||||
}
|
||||
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), item))
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), item))
|
||||
|
||||
indices, err = cache.ActiveIndices(context.Background(), item.Seed)
|
||||
indices, err = cache.ActiveIndices(t.Context(), item.Seed)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, item.SortedIndices, indices)
|
||||
}
|
||||
@@ -80,13 +80,13 @@ func TestCommitteeCache_ActiveCount(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6}}
|
||||
count, err := cache.ActiveIndicesCount(context.Background(), item.Seed)
|
||||
count, err := cache.ActiveIndicesCount(t.Context(), item.Seed)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, count, "Expected active count not to exist in empty cache")
|
||||
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), item))
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), item))
|
||||
|
||||
count, err = cache.ActiveIndicesCount(context.Background(), item.Seed)
|
||||
count, err = cache.ActiveIndicesCount(t.Context(), item.Seed)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(item.SortedIndices), count)
|
||||
}
|
||||
@@ -100,7 +100,7 @@ func TestCommitteeCache_CanRotate(t *testing.T) {
|
||||
for i := start; i < end; i++ {
|
||||
s := []byte(strconv.Itoa(i))
|
||||
item := &Committees{Seed: bytesutil.ToBytes32(s)}
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), item))
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), item))
|
||||
}
|
||||
|
||||
k := cache.CommitteeCache.Keys()
|
||||
@@ -130,7 +130,7 @@ func TestCommitteeCacheOutOfRange(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
_ = cache.CommitteeCache.Add(key, comms)
|
||||
|
||||
_, err = cache.Committee(context.Background(), 0, seed, math.MaxUint64) // Overflow!
|
||||
_, err = cache.Committee(t.Context(), 0, seed, math.MaxUint64) // Overflow!
|
||||
require.NotNil(t, err, "Did not fail as expected")
|
||||
}
|
||||
|
||||
@@ -138,15 +138,15 @@ func TestCommitteeCache_DoesNothingWhenCancelledContext(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6}}
|
||||
count, err := cache.ActiveIndicesCount(context.Background(), item.Seed)
|
||||
count, err := cache.ActiveIndicesCount(t.Context(), item.Seed)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, count, "Expected active count not to exist in empty cache")
|
||||
|
||||
cancelled, cancel := context.WithCancel(context.Background())
|
||||
cancelled, cancel := context.WithCancel(t.Context())
|
||||
cancel()
|
||||
require.ErrorIs(t, cache.AddCommitteeShuffledList(cancelled, item), context.Canceled)
|
||||
|
||||
count, err = cache.ActiveIndicesCount(context.Background(), item.Seed)
|
||||
count, err = cache.ActiveIndicesCount(t.Context(), item.Seed)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, count)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package depositsnapshot
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"testing"
|
||||
@@ -55,7 +54,7 @@ func TestAllDeposits_ReturnsAllDeposits(t *testing.T) {
|
||||
}
|
||||
dc.deposits = deposits
|
||||
|
||||
d := dc.AllDeposits(context.Background(), nil)
|
||||
d := dc.AllDeposits(t.Context(), nil)
|
||||
assert.Equal(t, len(deposits), len(d))
|
||||
}
|
||||
|
||||
@@ -95,7 +94,7 @@ func TestAllDeposits_FiltersDepositUpToAndIncludingBlockNumber(t *testing.T) {
|
||||
}
|
||||
dc.deposits = deposits
|
||||
|
||||
d := dc.AllDeposits(context.Background(), big.NewInt(11))
|
||||
d := dc.AllDeposits(t.Context(), big.NewInt(11))
|
||||
assert.Equal(t, 5, len(d))
|
||||
}
|
||||
|
||||
@@ -127,7 +126,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
|
||||
DepositRoot: wantedRoot,
|
||||
},
|
||||
}
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(13))
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(t.Context(), big.NewInt(13))
|
||||
assert.Equal(t, 4, int(n))
|
||||
require.DeepEqual(t, wantedRoot, root[:])
|
||||
})
|
||||
@@ -143,7 +142,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
|
||||
DepositRoot: wantedRoot,
|
||||
},
|
||||
}
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(10))
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(t.Context(), big.NewInt(10))
|
||||
assert.Equal(t, 1, int(n))
|
||||
require.DeepEqual(t, wantedRoot, root[:])
|
||||
})
|
||||
@@ -169,7 +168,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
}
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(10))
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(t.Context(), big.NewInt(10))
|
||||
assert.Equal(t, 2, int(n))
|
||||
require.DeepEqual(t, wantedRoot, root[:])
|
||||
})
|
||||
@@ -185,7 +184,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
|
||||
DepositRoot: wantedRoot,
|
||||
},
|
||||
}
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(7))
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(t.Context(), big.NewInt(7))
|
||||
assert.Equal(t, 0, int(n))
|
||||
require.DeepEqual(t, params.BeaconConfig().ZeroHash, root)
|
||||
})
|
||||
@@ -201,7 +200,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
|
||||
DepositRoot: wantedRoot,
|
||||
},
|
||||
}
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(10))
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(t.Context(), big.NewInt(10))
|
||||
assert.Equal(t, 1, int(n))
|
||||
require.DeepEqual(t, wantedRoot, root[:])
|
||||
})
|
||||
@@ -237,7 +236,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
}
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(9))
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(t.Context(), big.NewInt(9))
|
||||
assert.Equal(t, 3, int(n))
|
||||
require.DeepEqual(t, wantedRoot, root[:])
|
||||
})
|
||||
@@ -288,10 +287,10 @@ func TestDepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
dc.InsertDepositContainers(context.Background(), ctrs)
|
||||
dc.InsertDepositContainers(t.Context(), ctrs)
|
||||
|
||||
pk1 := bytesutil.PadTo([]byte("pk1"), 48)
|
||||
dep, blkNum := dc.DepositByPubkey(context.Background(), pk1)
|
||||
dep, blkNum := dc.DepositByPubkey(t.Context(), pk1)
|
||||
|
||||
if dep == nil || !bytes.Equal(dep.Data.PublicKey, pk1) {
|
||||
t.Error("Returned wrong deposit")
|
||||
@@ -303,7 +302,7 @@ func TestDepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
|
||||
func TestInsertDepositContainers_NotNil(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
dc.InsertDepositContainers(context.Background(), nil)
|
||||
dc.InsertDepositContainers(t.Context(), nil)
|
||||
assert.DeepEqual(t, []*ethpb.DepositContainer{}, dc.deposits)
|
||||
}
|
||||
|
||||
@@ -359,10 +358,10 @@ func TestFinalizedDeposits_DepositsCachedCorrectly(t *testing.T) {
|
||||
err = dc.finalizedDeposits.depositTree.pushLeaf(root)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 2, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedDeposits, err := dc.FinalizedDeposits(context.Background())
|
||||
cachedDeposits, err := dc.FinalizedDeposits(t.Context())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cachedDeposits, "Deposits not cached")
|
||||
assert.Equal(t, int64(2), cachedDeposits.MerkleTrieIndex())
|
||||
@@ -425,15 +424,15 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
|
||||
err = dc.finalizedDeposits.Deposits().Insert(root[:], 0)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 1, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 2, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
dc.deposits = append(dc.deposits, []*ethpb.DepositContainer{newFinalizedDeposit}...)
|
||||
|
||||
cachedDeposits, err := dc.FinalizedDeposits(context.Background())
|
||||
cachedDeposits, err := dc.FinalizedDeposits(t.Context())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cachedDeposits, "Deposits not cached")
|
||||
require.Equal(t, int64(1), cachedDeposits.MerkleTrieIndex())
|
||||
@@ -459,10 +458,10 @@ func TestFinalizedDeposits_HandleZeroDeposits(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 2, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedDeposits, err := dc.FinalizedDeposits(context.Background())
|
||||
cachedDeposits, err := dc.FinalizedDeposits(t.Context())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cachedDeposits, "Deposits not cached")
|
||||
assert.Equal(t, int64(-1), cachedDeposits.MerkleTrieIndex())
|
||||
@@ -509,10 +508,10 @@ func TestFinalizedDeposits_HandleSmallerThanExpectedDeposits(t *testing.T) {
|
||||
}
|
||||
dc.deposits = finalizedDeposits
|
||||
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 5, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 5, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedDeposits, err := dc.FinalizedDeposits(context.Background())
|
||||
cachedDeposits, err := dc.FinalizedDeposits(t.Context())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cachedDeposits, "Deposits not cached")
|
||||
assert.Equal(t, int64(2), cachedDeposits.MerkleTrieIndex())
|
||||
@@ -592,14 +591,14 @@ func TestFinalizedDeposits_HandleLowerEth1DepositIndex(t *testing.T) {
|
||||
}
|
||||
dc.deposits = finalizedDeposits
|
||||
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 5, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 5, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Reinsert finalized deposits with a lower index.
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 2, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedDeposits, err := dc.FinalizedDeposits(context.Background())
|
||||
cachedDeposits, err := dc.FinalizedDeposits(t.Context())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cachedDeposits, "Deposits not cached")
|
||||
assert.Equal(t, int64(5), cachedDeposits.MerkleTrieIndex())
|
||||
@@ -670,10 +669,10 @@ func TestNonFinalizedDeposits_ReturnsAllNonFinalizedDeposits(t *testing.T) {
|
||||
Index: 3,
|
||||
DepositRoot: rootCreator('D'),
|
||||
})
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 1, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), 1, nil)
|
||||
deps := dc.NonFinalizedDeposits(t.Context(), 1, nil)
|
||||
assert.Equal(t, 2, len(deps))
|
||||
}
|
||||
|
||||
@@ -681,7 +680,7 @@ func TestNonFinalizedDeposits_ReturnsAllNonFinalizedDeposits_Nil(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), 0, nil)
|
||||
deps := dc.NonFinalizedDeposits(t.Context(), 0, nil)
|
||||
assert.Equal(t, 0, len(deps))
|
||||
}
|
||||
|
||||
@@ -740,10 +739,10 @@ func TestNonFinalizedDeposits_ReturnsNonFinalizedDepositsUpToBlockNumber(t *test
|
||||
Index: 3,
|
||||
DepositRoot: rootCreator('D'),
|
||||
})
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 1, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), 1, big.NewInt(10))
|
||||
deps := dc.NonFinalizedDeposits(t.Context(), 1, big.NewInt(10))
|
||||
assert.Equal(t, 1, len(deps))
|
||||
}
|
||||
|
||||
@@ -785,21 +784,21 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Perform this in a nonsensical ordering
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 1, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 2, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 3, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 3, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 4, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 4, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 4, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 4, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Mimic finalized deposit trie fetch.
|
||||
fd, err := dc.FinalizedDeposits(context.Background())
|
||||
fd, err := dc.FinalizedDeposits(t.Context())
|
||||
require.NoError(t, err)
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex(), nil)
|
||||
deps := dc.NonFinalizedDeposits(t.Context(), fd.MerkleTrieIndex(), nil)
|
||||
insertIndex := fd.MerkleTrieIndex() + 1
|
||||
|
||||
for _, dep := range deps {
|
||||
@@ -810,24 +809,24 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
|
||||
}
|
||||
insertIndex++
|
||||
}
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 5, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 5, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 6, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 6, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 9, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 9, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 12, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 12, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 15, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 15, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 15, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 15, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 14, [32]byte{}, 0)
|
||||
err = dc.InsertFinalizedDeposits(t.Context(), 14, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
fd, err = dc.FinalizedDeposits(context.Background())
|
||||
fd, err = dc.FinalizedDeposits(t.Context())
|
||||
require.NoError(t, err)
|
||||
deps = dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex(), nil)
|
||||
deps = dc.NonFinalizedDeposits(t.Context(), fd.MerkleTrieIndex(), nil)
|
||||
insertIndex = fd.MerkleTrieIndex() + 1
|
||||
|
||||
for _, dep := range dc.deposits {
|
||||
@@ -888,9 +887,9 @@ func TestMin(t *testing.T) {
|
||||
}
|
||||
dc.deposits = finalizedDeposits
|
||||
|
||||
fd, err := dc.FinalizedDeposits(context.Background())
|
||||
fd, err := dc.FinalizedDeposits(t.Context())
|
||||
require.NoError(t, err)
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex(), big.NewInt(16))
|
||||
deps := dc.NonFinalizedDeposits(t.Context(), fd.MerkleTrieIndex(), big.NewInt(16))
|
||||
insertIndex := fd.MerkleTrieIndex() + 1
|
||||
for _, dep := range deps {
|
||||
depHash, err := dep.Data.HashTreeRoot()
|
||||
@@ -908,28 +907,28 @@ func TestDepositMap_WorksCorrectly(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
pk0 := bytesutil.PadTo([]byte("pk0"), 48)
|
||||
dep, _ := dc.DepositByPubkey(context.Background(), pk0)
|
||||
dep, _ := dc.DepositByPubkey(t.Context(), pk0)
|
||||
var nilDep *ethpb.Deposit
|
||||
assert.DeepEqual(t, nilDep, dep)
|
||||
|
||||
dep = ðpb.Deposit{Proof: makeDepositProof(), Data: ðpb.Deposit_Data{PublicKey: pk0, Amount: 1000}}
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), dep, 1000, 0, [32]byte{}))
|
||||
assert.NoError(t, dc.InsertDeposit(t.Context(), dep, 1000, 0, [32]byte{}))
|
||||
|
||||
dep, _ = dc.DepositByPubkey(context.Background(), pk0)
|
||||
dep, _ = dc.DepositByPubkey(t.Context(), pk0)
|
||||
assert.NotEqual(t, nilDep, dep)
|
||||
assert.Equal(t, uint64(1000), dep.Data.Amount)
|
||||
|
||||
dep = ðpb.Deposit{Proof: makeDepositProof(), Data: ðpb.Deposit_Data{PublicKey: pk0, Amount: 10000}}
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), dep, 1000, 1, [32]byte{}))
|
||||
assert.NoError(t, dc.InsertDeposit(t.Context(), dep, 1000, 1, [32]byte{}))
|
||||
|
||||
// Make sure we have the same deposit returned over here.
|
||||
dep, _ = dc.DepositByPubkey(context.Background(), pk0)
|
||||
dep, _ = dc.DepositByPubkey(t.Context(), pk0)
|
||||
assert.NotEqual(t, nilDep, dep)
|
||||
assert.Equal(t, uint64(1000), dep.Data.Amount)
|
||||
|
||||
// Make sure another key doesn't work.
|
||||
pk1 := bytesutil.PadTo([]byte("pk1"), 48)
|
||||
dep, _ = dc.DepositByPubkey(context.Background(), pk1)
|
||||
dep, _ = dc.DepositByPubkey(t.Context(), pk1)
|
||||
assert.DeepEqual(t, nilDep, dep)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package depositsnapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
@@ -13,14 +12,14 @@ var _ PendingDepositsFetcher = (*Cache)(nil)
|
||||
|
||||
func TestInsertPendingDeposit_OK(t *testing.T) {
|
||||
dc := Cache{}
|
||||
dc.InsertPendingDeposit(context.Background(), ðpb.Deposit{}, 111, 100, [32]byte{})
|
||||
dc.InsertPendingDeposit(t.Context(), ðpb.Deposit{}, 111, 100, [32]byte{})
|
||||
|
||||
assert.Equal(t, 1, len(dc.pendingDeposits), "deposit not inserted")
|
||||
}
|
||||
|
||||
func TestInsertPendingDeposit_ignoresNilDeposit(t *testing.T) {
|
||||
dc := Cache{}
|
||||
dc.InsertPendingDeposit(context.Background(), nil /*deposit*/, 0 /*blockNum*/, 0, [32]byte{})
|
||||
dc.InsertPendingDeposit(t.Context(), nil /*deposit*/, 0 /*blockNum*/, 0, [32]byte{})
|
||||
|
||||
assert.Equal(t, 0, len(dc.pendingDeposits))
|
||||
}
|
||||
@@ -34,13 +33,13 @@ func TestPendingDeposits_OK(t *testing.T) {
|
||||
{Eth1BlockHeight: 6, Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("c")}}},
|
||||
}
|
||||
|
||||
deposits := dc.PendingDeposits(context.Background(), big.NewInt(4))
|
||||
deposits := dc.PendingDeposits(t.Context(), big.NewInt(4))
|
||||
expected := []*ethpb.Deposit{
|
||||
{Proof: [][]byte{[]byte("A")}},
|
||||
{Proof: [][]byte{[]byte("B")}},
|
||||
}
|
||||
assert.DeepSSZEqual(t, expected, deposits)
|
||||
|
||||
all := dc.PendingDeposits(context.Background(), nil)
|
||||
all := dc.PendingDeposits(t.Context(), nil)
|
||||
assert.Equal(t, len(dc.pendingDeposits), len(all), "PendingDeposits(ctx, nil) did not return all deposits")
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package depositsnapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
@@ -22,7 +21,7 @@ func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 0)
|
||||
dc.PrunePendingDeposits(t.Context(), 0)
|
||||
expected := []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
@@ -46,7 +45,7 @@ func TestPrunePendingDeposits_OK(t *testing.T) {
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 6)
|
||||
dc.PrunePendingDeposits(t.Context(), 6)
|
||||
expected := []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
@@ -65,7 +64,7 @@ func TestPrunePendingDeposits_OK(t *testing.T) {
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 10)
|
||||
dc.PrunePendingDeposits(t.Context(), 10)
|
||||
expected = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
@@ -86,7 +85,7 @@ func TestPruneAllPendingDeposits(t *testing.T) {
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PruneAllPendingDeposits(context.Background())
|
||||
dc.PruneAllPendingDeposits(t.Context())
|
||||
expected := []*ethpb.DepositContainer{}
|
||||
|
||||
assert.DeepEqual(t, expected, dc.pendingDeposits)
|
||||
@@ -128,10 +127,10 @@ func TestPruneProofs_Ok(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
assert.NoError(t, dc.InsertDeposit(t.Context(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 1))
|
||||
require.NoError(t, dc.PruneProofs(t.Context(), 1))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
@@ -173,10 +172,10 @@ func TestPruneProofs_SomeAlreadyPruned(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
assert.NoError(t, dc.InsertDeposit(t.Context(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 2))
|
||||
require.NoError(t, dc.PruneProofs(t.Context(), 2))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
|
||||
}
|
||||
@@ -217,10 +216,10 @@ func TestPruneProofs_PruneAllWhenDepositIndexTooBig(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
assert.NoError(t, dc.InsertDeposit(t.Context(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 99))
|
||||
require.NoError(t, dc.PruneProofs(t.Context(), 99))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
@@ -264,10 +263,10 @@ func TestPruneProofs_CorrectlyHandleLastIndex(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
assert.NoError(t, dc.InsertDeposit(t.Context(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 4))
|
||||
require.NoError(t, dc.PruneProofs(t.Context(), 4))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
@@ -311,10 +310,10 @@ func TestPruneAllProofs(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
assert.NoError(t, dc.InsertDeposit(t.Context(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
dc.PruneAllProofs(context.Background())
|
||||
dc.PruneAllProofs(t.Context())
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
|
||||
5
beacon-chain/cache/registration_test.go
vendored
5
beacon-chain/cache/registration_test.go
vendored
@@ -1,7 +1,6 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -24,7 +23,7 @@ func TestRegistrationCache(t *testing.T) {
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
Pubkey: pubkey,
|
||||
}
|
||||
cache.UpdateIndexToRegisteredMap(context.Background(), m)
|
||||
cache.UpdateIndexToRegisteredMap(t.Context(), m)
|
||||
reg, err := cache.RegistrationByIndex(validatorIndex)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(reg.Pubkey), string(pubkey))
|
||||
@@ -38,7 +37,7 @@ func TestRegistrationCache(t *testing.T) {
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
Pubkey: pubkey,
|
||||
}
|
||||
cache.UpdateIndexToRegisteredMap(context.Background(), m)
|
||||
cache.UpdateIndexToRegisteredMap(t.Context(), m)
|
||||
reg, err := cache.RegistrationByIndex(validatorIndex2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(reg.Pubkey), string(pubkey))
|
||||
|
||||
5
beacon-chain/cache/skip_slot_cache_test.go
vendored
5
beacon-chain/cache/skip_slot_cache_test.go
vendored
@@ -1,7 +1,6 @@
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
@@ -14,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func TestSkipSlotCache_RoundTrip(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
c := cache.NewSkipSlotCache()
|
||||
|
||||
r := [32]byte{'a'}
|
||||
@@ -38,7 +37,7 @@ func TestSkipSlotCache_RoundTrip(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSkipSlotCache_DisabledAndEnabled(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
c := cache.NewSkipSlotCache()
|
||||
|
||||
r := [32]byte{'a'}
|
||||
|
||||
62
beacon-chain/cache/sync_committee.go
vendored
62
beacon-chain/cache/sync_committee.go
vendored
@@ -67,6 +67,30 @@ func (s *SyncCommitteeCache) Clear() {
|
||||
s.cache = cache.NewFIFO(keyFn)
|
||||
}
|
||||
|
||||
// CurrentPeriodPositions returns current period positions of validator indices with respect with
|
||||
// sync committee. If any input validator index has no assignment, an empty list will be returned
|
||||
// for that validator. If the input root does not exist in cache, `ErrNonExistingSyncCommitteeKey` is returned.
|
||||
// Manual checking of state for index position in state is recommended when `ErrNonExistingSyncCommitteeKey` is returned.
|
||||
func (s *SyncCommitteeCache) CurrentPeriodPositions(root [32]byte, indices []primitives.ValidatorIndex) ([][]primitives.CommitteeIndex, error) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
|
||||
pos, err := s.positionsInCommittee(root, indices)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := make([][]primitives.CommitteeIndex, len(pos))
|
||||
for i, p := range pos {
|
||||
if p == nil {
|
||||
result[i] = []primitives.CommitteeIndex{}
|
||||
} else {
|
||||
result[i] = p.currentPeriod
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// CurrentPeriodIndexPosition returns current period index position of a validator index with respect with
|
||||
// sync committee. If the input validator index has no assignment, an empty list will be returned.
|
||||
// If the input root does not exist in cache, `ErrNonExistingSyncCommitteeKey` is returned.
|
||||
@@ -104,11 +128,7 @@ func (s *SyncCommitteeCache) NextPeriodIndexPosition(root [32]byte, valIdx primi
|
||||
return pos.nextPeriod, nil
|
||||
}
|
||||
|
||||
// Helper function for `CurrentPeriodIndexPosition` and `NextPeriodIndexPosition` to return a mapping
|
||||
// of validator index to its index(s) position in the sync committee.
|
||||
func (s *SyncCommitteeCache) idxPositionInCommittee(
|
||||
root [32]byte, valIdx primitives.ValidatorIndex,
|
||||
) (*positionInCommittee, error) {
|
||||
func (s *SyncCommitteeCache) positionsInCommittee(root [32]byte, indices []primitives.ValidatorIndex) ([]*positionInCommittee, error) {
|
||||
obj, exists, err := s.cache.GetByKey(key(root))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -121,13 +141,33 @@ func (s *SyncCommitteeCache) idxPositionInCommittee(
|
||||
if !ok {
|
||||
return nil, errNotSyncCommitteeIndexPosition
|
||||
}
|
||||
idxInCommittee, ok := item.vIndexToPositionMap[valIdx]
|
||||
if !ok {
|
||||
SyncCommitteeCacheMiss.Inc()
|
||||
result := make([]*positionInCommittee, len(indices))
|
||||
for i, idx := range indices {
|
||||
idxInCommittee, ok := item.vIndexToPositionMap[idx]
|
||||
if ok {
|
||||
SyncCommitteeCacheHit.Inc()
|
||||
result[i] = idxInCommittee
|
||||
} else {
|
||||
SyncCommitteeCacheMiss.Inc()
|
||||
result[i] = nil
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Helper function for `CurrentPeriodIndexPosition` and `NextPeriodIndexPosition` to return a mapping
|
||||
// of validator index to its index(s) position in the sync committee.
|
||||
func (s *SyncCommitteeCache) idxPositionInCommittee(
|
||||
root [32]byte, valIdx primitives.ValidatorIndex,
|
||||
) (*positionInCommittee, error) {
|
||||
positions, err := s.positionsInCommittee(root, []primitives.ValidatorIndex{valIdx})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(positions) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
SyncCommitteeCacheHit.Inc()
|
||||
return idxInCommittee, nil
|
||||
return positions[0], nil
|
||||
}
|
||||
|
||||
// UpdatePositionsInCommittee updates caching of validators position in sync committee in respect to
|
||||
@@ -178,7 +218,7 @@ func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoo
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
if clearCount != s.cleared.Load() {
|
||||
log.Warn("cache rotated during async committee update operation - abandoning cache update")
|
||||
log.Warn("Cache rotated during async committee update operation - abandoning cache update")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,11 @@ func NewSyncCommittee() *FakeSyncCommitteeCache {
|
||||
return &FakeSyncCommitteeCache{}
|
||||
}
|
||||
|
||||
// CurrentPeriodPositions -- fake
|
||||
func (s *FakeSyncCommitteeCache) CurrentPeriodPositions(root [32]byte, indices []primitives.ValidatorIndex) ([][]primitives.CommitteeIndex, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// CurrentEpochIndexPosition -- fake.
|
||||
func (s *FakeSyncCommitteeCache) CurrentPeriodIndexPosition(root [32]byte, valIdx primitives.ValidatorIndex) ([]primitives.CommitteeIndex, error) {
|
||||
return nil, nil
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user