mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
117 Commits
v4.1.0-alp
...
initsync-d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e5a2873376 | ||
|
|
4d4753ebf3 | ||
|
|
d4fd310615 | ||
|
|
58cdb29ef3 | ||
|
|
cc2b4db582 | ||
|
|
be9b6ea837 | ||
|
|
806a394c89 | ||
|
|
97a99874e8 | ||
|
|
945b087ca9 | ||
|
|
b57effd096 | ||
|
|
867db1aeee | ||
|
|
99843688cd | ||
|
|
a536612c39 | ||
|
|
c5501f8775 | ||
|
|
55e4c6e1db | ||
|
|
2806326155 | ||
|
|
d7318ea485 | ||
|
|
e183d1dff4 | ||
|
|
a3868e7fc6 | ||
|
|
af70677778 | ||
|
|
8eb82dd378 | ||
|
|
0bd232667b | ||
|
|
39072e1b74 | ||
|
|
66011d5d9c | ||
|
|
419dbd57f7 | ||
|
|
da6ae3c204 | ||
|
|
44973b0bb3 | ||
|
|
de0c7e6256 | ||
|
|
c1c0cd040c | ||
|
|
734eb98941 | ||
|
|
ffaef83634 | ||
|
|
f9a40ef111 | ||
|
|
7454041356 | ||
|
|
f37301c0c0 | ||
|
|
cf1bfb9d67 | ||
|
|
9d8dd5c9ad | ||
|
|
f812bdcf60 | ||
|
|
58c0899676 | ||
|
|
4628c19f51 | ||
|
|
58f23d2302 | ||
|
|
9e33723b26 | ||
|
|
2c32a87d17 | ||
|
|
56f3dafb54 | ||
|
|
70380660b3 | ||
|
|
ecd55e5462 | ||
|
|
46b2442127 | ||
|
|
d8b2d9060f | ||
|
|
b667c68c87 | ||
|
|
fb19ee8895 | ||
|
|
51b8075474 | ||
|
|
367504f403 | ||
|
|
b4e72f1e19 | ||
|
|
a7361cd5ab | ||
|
|
02ee6897bb | ||
|
|
c20f188966 | ||
|
|
5870536dca | ||
|
|
c8b39e08ef | ||
|
|
b0b4e42436 | ||
|
|
b0caea3fae | ||
|
|
a46370f5bf | ||
|
|
0919b2245f | ||
|
|
42c192d97d | ||
|
|
3394bbe359 | ||
|
|
70225186ff | ||
|
|
942d63fcc1 | ||
|
|
90fb2325db | ||
|
|
57a63f37f7 | ||
|
|
9a2c2470c6 | ||
|
|
723f73795f | ||
|
|
6454081577 | ||
|
|
489b34a01f | ||
|
|
e22258caa9 | ||
|
|
3bc9ac37f6 | ||
|
|
7247b8bd3c | ||
|
|
e2591f7c5b | ||
|
|
b40729f6c0 | ||
|
|
b7cb5c81be | ||
|
|
e76aedf1ae | ||
|
|
0e5d299d02 | ||
|
|
14f040de48 | ||
|
|
1a1a30591e | ||
|
|
3070878d59 | ||
|
|
f59307358e | ||
|
|
ef1f5e6dbe | ||
|
|
a22ca3fecb | ||
|
|
14ce051668 | ||
|
|
998a493ee2 | ||
|
|
398f44bb53 | ||
|
|
4098b3a1d2 | ||
|
|
d8e6d2cb2e | ||
|
|
6b915bab26 | ||
|
|
dd73f762ec | ||
|
|
4d120b53ae | ||
|
|
4d6b3252ae | ||
|
|
9bb81537c8 | ||
|
|
0fdf63b565 | ||
|
|
bd85b0e4e1 | ||
|
|
d1562bab53 | ||
|
|
1e29877406 | ||
|
|
8c39c55f05 | ||
|
|
0ccfc74e86 | ||
|
|
9ecf4d34f5 | ||
|
|
a8793c9f21 | ||
|
|
79445d2bf6 | ||
|
|
35fc1c976f | ||
|
|
b0423a94af | ||
|
|
6c16e90fe9 | ||
|
|
c0a01bb859 | ||
|
|
2a408a0dd8 | ||
|
|
af16c71d6e | ||
|
|
ec954ec9a6 | ||
|
|
7781a3186b | ||
|
|
440cf32966 | ||
|
|
13c69af717 | ||
|
|
ca88e59ee3 | ||
|
|
809a67ebcc | ||
|
|
f17898f658 |
2
.bazelrc
2
.bazelrc
@@ -15,7 +15,7 @@ coverage --define=coverage_enabled=1
|
||||
# Stamp binaries with git information
|
||||
build --workspace_status_command=./hack/workspace_status.sh
|
||||
|
||||
build --define blst_disabled=false --define blst_modern=true
|
||||
build --define blst_disabled=false
|
||||
run --define blst_disabled=false
|
||||
|
||||
build:blst_disabled --define blst_disabled=true
|
||||
|
||||
@@ -12,7 +12,8 @@
|
||||
#build:remote-cache --disk_cache=
|
||||
build:remote-cache --remote_download_toplevel
|
||||
build:remote-cache --remote_cache=grpc://bazel-remote-cache:9092
|
||||
build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
|
||||
# Does not work with rules_oci. See https://github.com/bazel-contrib/rules_oci/issues/292
|
||||
#build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
|
||||
build:remote-cache --remote_local_fallback
|
||||
build:remote-cache --experimental_remote_cache_async
|
||||
build:remote-cache --experimental_remote_merkle_tree_cache
|
||||
|
||||
59
.github/ISSUE_TEMPLATE/bug_report.md
vendored
59
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,59 +0,0 @@
|
||||
---
|
||||
name: "\U0001F41EBug report"
|
||||
about: Report a bug or problem with running Prysm
|
||||
---
|
||||
<!--💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎
|
||||
|
||||
Hellooo! 😄
|
||||
|
||||
To help us tend to your issue faster, please search our currently open issues before submitting a new one.
|
||||
Existing issues often contain information about workarounds, resolution, or progress updates.
|
||||
|
||||
💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎-->
|
||||
|
||||
# 🐞 Bug Report
|
||||
|
||||
### Description
|
||||
|
||||
<!-- ✍️--> A clear and concise description of the problem...
|
||||
|
||||
### Has this worked before in a previous version?
|
||||
|
||||
<!-- Did this behavior use to work in the previous version? -->
|
||||
<!-- ✍️--> Yes, the previous version in which this bug was not present was: ....
|
||||
|
||||
## 🔬 Minimal Reproduction
|
||||
|
||||
<!--
|
||||
Please let us know how we can reproduce this issue. Include the exact method you used to run Prysm along with any flags used in your beacon chain and/or validator. Make sure you don't upload any confidential files or private keys.
|
||||
-->
|
||||
|
||||
## 🔥 Error
|
||||
|
||||
<pre><code>
|
||||
<!-- If the issue is accompanied by an error, please share the error logs with us below. If you have a lot of logs, place make a paste bin with your logs and share the link with us here: -->
|
||||
<!-- ✍️-->
|
||||
|
||||
</code></pre>
|
||||
|
||||
|
||||
## 🌍 Your Environment
|
||||
|
||||
**Operating System:**
|
||||
|
||||
<pre>
|
||||
<code>
|
||||
|
||||
</code>
|
||||
</pre>
|
||||
|
||||
**What version of Prysm are you running? (Which release)**
|
||||
|
||||
<pre>
|
||||
<code>
|
||||
|
||||
</code>
|
||||
</pre>
|
||||
|
||||
**Anything else relevant (validator index / public key)?**
|
||||
|
||||
66
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
66
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
name: 🐞 Bug report
|
||||
description: Report a bug or problem with running Prysm
|
||||
labels: ["Bug"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
To help us tend to your issue faster, please search our currently open issues before submitting a new one.
|
||||
Existing issues often contain information about workarounds, resolution, or progress updates.
|
||||
- type: textarea
|
||||
id: what-happened
|
||||
attributes:
|
||||
label: Describe the bug
|
||||
description: |
|
||||
A clear and concise description of the problem...
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: previous-version
|
||||
attributes:
|
||||
label: Has this worked before in a previous version?
|
||||
description: Did this behavior use to work in the previous version?
|
||||
render: text
|
||||
- type: textarea
|
||||
id: reproduction-steps
|
||||
attributes:
|
||||
label: 🔬 Minimal Reproduction
|
||||
description: |
|
||||
Please let us know how we can reproduce this issue.
|
||||
Include the exact method you used to run Prysm along with any flags used in your beacon chain and/or validator.
|
||||
Make sure you don't upload any confidential files or private keys.
|
||||
placeholder: |
|
||||
Steps to reproduce:
|
||||
|
||||
1. Start '...'
|
||||
2. Then '...'
|
||||
3. Check '...'
|
||||
4. See error
|
||||
- type: textarea
|
||||
id: errors
|
||||
attributes:
|
||||
label: Error
|
||||
description: |
|
||||
If the issue is accompanied by an error, please share the error logs with us below.
|
||||
If you have a lot of logs, place make a paste bin with your logs and share the link with us here:
|
||||
render: text
|
||||
- type: dropdown
|
||||
id: platform
|
||||
attributes:
|
||||
label: Platform(s)
|
||||
description: What platform(s) did this occur on?
|
||||
multiple: true
|
||||
options:
|
||||
- Linux (x86)
|
||||
- Linux (ARM)
|
||||
- Mac (Intel)
|
||||
- Mac (Apple Silicon)
|
||||
- Windows (x86)
|
||||
- Windows (ARM)
|
||||
- type: input
|
||||
attributes:
|
||||
label: What version of Prysm are you running? (Which release)
|
||||
description: You can check your Prysm version by running your beacon node or validator with the `--version` flag.
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Anything else relevant (validator index / public key)?
|
||||
8
.github/workflows/go.yml
vendored
8
.github/workflows/go.yml
vendored
@@ -75,14 +75,14 @@ jobs:
|
||||
- name: Build
|
||||
# Use blst tag to allow go and bazel builds for blst.
|
||||
run: go build -v ./...
|
||||
env:
|
||||
CGO_CFLAGS: "-O -D__BLST_PORTABLE__"
|
||||
env:
|
||||
CGO_CFLAGS: "-O2 -D__BLST_PORTABLE__"
|
||||
# fuzz leverage go tag based stubs at compile time.
|
||||
# Building and testing with these tags should be checked and enforced at pre-submit.
|
||||
- name: Test for fuzzing
|
||||
run: go test -tags=fuzz,develop ./... -test.run=^Fuzz
|
||||
env:
|
||||
CGO_CFLAGS: "-O -D__BLST_PORTABLE__"
|
||||
env:
|
||||
CGO_CFLAGS: "-O2 -D__BLST_PORTABLE__"
|
||||
|
||||
# Tests run via Bazel for now...
|
||||
# - name: Test
|
||||
|
||||
95
WORKSPACE
95
WORKSPACE
@@ -50,8 +50,6 @@ load("@prysm//tools/cross-toolchain:prysm_toolchains.bzl", "configure_prysm_tool
|
||||
|
||||
configure_prysm_toolchains()
|
||||
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "bazel_skylib",
|
||||
sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c",
|
||||
@@ -67,10 +65,10 @@ bazel_skylib_workspace()
|
||||
|
||||
http_archive(
|
||||
name = "bazel_gazelle",
|
||||
sha256 = "29d5dafc2a5582995488c6735115d1d366fcd6a0fc2e2a153f02988706349825",
|
||||
sha256 = "d3fa66a39028e97d76f9e2db8f1b0c11c099e8e01bf363a923074784e451f809",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.31.0/bazel-gazelle-v0.31.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.31.0/bazel-gazelle-v0.31.0.tar.gz",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.33.0/bazel-gazelle-v0.33.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.33.0/bazel-gazelle-v0.33.0.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -87,6 +85,24 @@ http_archive(
|
||||
urls = ["https://github.com/bazelbuild/rules_docker/releases/download/v0.25.0/rules_docker-v0.25.0.tar.gz"],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "rules_oci",
|
||||
sha256 = "c71c25ed333a4909d2dd77e0b16c39e9912525a98c7fa85144282be8d04ef54c",
|
||||
strip_prefix = "rules_oci-1.3.4",
|
||||
url = "https://github.com/bazel-contrib/rules_oci/releases/download/v1.3.4/rules_oci-v1.3.4.tar.gz",
|
||||
)
|
||||
|
||||
load("@rules_oci//oci:dependencies.bzl", "rules_oci_dependencies")
|
||||
|
||||
rules_oci_dependencies()
|
||||
|
||||
load("@rules_oci//oci:repositories.bzl", "LATEST_CRANE_VERSION", "oci_register_toolchains")
|
||||
|
||||
oci_register_toolchains(
|
||||
name = "oci",
|
||||
crane_version = LATEST_CRANE_VERSION,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_go",
|
||||
patch_args = ["-p1"],
|
||||
@@ -94,10 +110,10 @@ http_archive(
|
||||
# Expose internals of go_test for custom build transitions.
|
||||
"//third_party:io_bazel_rules_go_test.patch",
|
||||
],
|
||||
sha256 = "bfc5ce70b9d1634ae54f4e7b495657a18a04e0d596785f672d35d5f505ab491a",
|
||||
sha256 = "91585017debb61982f7054c9688857a2ad1fd823fc3f9cb05048b0025c47d023",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.40.0/rules_go-v0.40.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.40.0/rules_go-v0.40.0.zip",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.42.0/rules_go-v0.42.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.42.0/rules_go-v0.42.0.zip",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -167,12 +183,30 @@ container_pull(
|
||||
repository = "pinglamb/alpine-glibc",
|
||||
)
|
||||
|
||||
load("@rules_oci//oci:pull.bzl", "oci_pull")
|
||||
|
||||
# A multi-arch base image
|
||||
oci_pull(
|
||||
name = "linux_debian11_multiarch_base", # Debian bullseye
|
||||
digest = "sha256:9b8e0854865dcaf49470b4ec305df45957020fbcf17b71eeb50ffd3bc5bf885d", # 2023-05-17
|
||||
image = "gcr.io/distroless/cc-debian11",
|
||||
platforms = [
|
||||
"linux/amd64",
|
||||
"linux/arm64",
|
||||
],
|
||||
reproducible = True,
|
||||
)
|
||||
|
||||
load("@prysm//tools:image_deps.bzl", "prysm_image_deps")
|
||||
|
||||
prysm_image_deps()
|
||||
|
||||
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
|
||||
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.20.7",
|
||||
go_version = "1.20.9",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
@@ -213,7 +247,9 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.4.0-beta.1"
|
||||
consensus_spec_test_version = "v1.4.0-beta.2-hotfix"
|
||||
|
||||
consensus_spec_version = "v1.4.0-beta.2"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -229,8 +265,8 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "24399b60ce3fbeb2311952d213dc3731b6dcb0f8881b016c283de5b518d2bbba",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
sha256 = "99770a001189f66204a4ef79161c8002bcbbcbd8236f1c6479bd5b83a3c68d42",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_test_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -245,8 +281,8 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "8e656ee48d2e2ebc9cf9baedb81f27925bc625b3e3fbb2883444a08758a5884a",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
sha256 = "56763f6492ee137108271007d62feef60d8e3f1698e53dee4bc4b07e55f7326b",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_test_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -261,8 +297,8 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "8bd137da6cc57a25383bfac5bc37e31265098145278bd8002b88e24c8b4718b9",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
sha256 = "bc1cac1a991cdc7426efea14385dcf215df85ed3f0572b824ad6a1d7ca0c89ad",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_test_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -276,7 +312,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "2bc1edb6e4a4f86c00317c04618a90b0ca29ee1eba833d0a64dd67fdd83fdbe3",
|
||||
sha256 = "c5898001aaab2a5bb38a39ff9d17a52f1f9befcc26e63752cbf556040f0c884e",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
@@ -323,9 +359,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "4116c8acb54eb3ca28cc4dc9bc688e08e25da91d70ed1f2622f02d3c33eba922",
|
||||
strip_prefix = "holesky-76057d57ab1f585519ecb606a9e5f7780e925a37",
|
||||
url = "https://github.com/eth-clients/holesky/archive/76057d57ab1f585519ecb606a9e5f7780e925a37.tar.gz", # Aug 27, 2023
|
||||
sha256 = "9f66d8d5644982d3d0d2e3d2b9ebe77a5f96638a5d7fcd715599c32818195cb3",
|
||||
strip_prefix = "holesky-ea39b9006210848e13f28d92e12a30548cecd41d",
|
||||
url = "https://github.com/eth-clients/holesky/archive/ea39b9006210848e13f28d92e12a30548cecd41d.tar.gz", # 2023-09-21
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -337,10 +373,23 @@ http_archive(
|
||||
],
|
||||
)
|
||||
|
||||
# Group the sources of the library so that CMake rule have access to it
|
||||
all_content = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])"""
|
||||
|
||||
# External dependencies
|
||||
http_archive(
|
||||
name = "googleapis",
|
||||
sha256 = "9d1a930e767c93c825398b8f8692eca3fe353b9aaadedfbcf1fca2282c85df88",
|
||||
strip_prefix = "googleapis-64926d52febbf298cb82a8f472ade4a3969ba922",
|
||||
urls = [
|
||||
"https://github.com/googleapis/googleapis/archive/64926d52febbf298cb82a8f472ade4a3969ba922.zip",
|
||||
],
|
||||
)
|
||||
|
||||
load("@googleapis//:repository_rules.bzl", "switched_rules_by_language")
|
||||
|
||||
switched_rules_by_language(
|
||||
name = "com_google_googleapis_imports",
|
||||
go = True,
|
||||
)
|
||||
|
||||
load("//:deps.bzl", "prysm_deps")
|
||||
|
||||
# gazelle:repository_macro deps.bzl%prysm_deps
|
||||
|
||||
@@ -13,6 +13,8 @@ go_library(
|
||||
"//api/client:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/apimiddleware:go_default_library",
|
||||
"//beacon-chain/rpc/eth/beacon:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
|
||||
@@ -14,6 +14,8 @@ import (
|
||||
"text/template"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/beacon"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/forks"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
|
||||
@@ -148,14 +150,14 @@ func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fo
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting fork by state id = %s", stateId)
|
||||
}
|
||||
fr := &forkResponse{}
|
||||
dataWrapper := &struct{ Data *forkResponse }{Data: fr}
|
||||
fr := &shared.Fork{}
|
||||
dataWrapper := &struct{ Data *shared.Fork }{Data: fr}
|
||||
err = json.Unmarshal(body, dataWrapper)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error decoding json response in GetFork")
|
||||
}
|
||||
|
||||
return fr.Fork()
|
||||
return fr.ToConsensus()
|
||||
}
|
||||
|
||||
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
|
||||
@@ -283,7 +285,7 @@ func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData
|
||||
|
||||
// SubmitChangeBLStoExecution calls a beacon API endpoint to set the withdrawal addresses based on the given signed messages.
|
||||
// If the API responds with something other than OK there will be failure messages associated to the corresponding request message.
|
||||
func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apimiddleware.SignedBLSToExecutionChangeJson) error {
|
||||
func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*shared.SignedBLSToExecutionChange) error {
|
||||
u := c.BaseURL().ResolveReference(&url.URL{Path: changeBLStoExecutionPath})
|
||||
body, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
@@ -322,12 +324,12 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apim
|
||||
|
||||
// GetBLStoExecutionChanges gets all the set withdrawal messages in the node's operation pool.
|
||||
// Returns a struct representation of json response.
|
||||
func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*apimiddleware.BLSToExecutionChangesPoolResponseJson, error) {
|
||||
func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*beacon.BLSToExecutionChangesPoolResponse, error) {
|
||||
body, err := c.Get(ctx, changeBLStoExecutionPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
poolResponse := &apimiddleware.BLSToExecutionChangesPoolResponseJson{}
|
||||
poolResponse := &beacon.BLSToExecutionChangesPoolResponse{}
|
||||
err = json.Unmarshal(body, poolResponse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -335,40 +337,8 @@ func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*apimiddleware.B
|
||||
return poolResponse, nil
|
||||
}
|
||||
|
||||
type forkResponse struct {
|
||||
PreviousVersion string `json:"previous_version"`
|
||||
CurrentVersion string `json:"current_version"`
|
||||
Epoch string `json:"epoch"`
|
||||
}
|
||||
|
||||
func (f *forkResponse) Fork() (*ethpb.Fork, error) {
|
||||
epoch, err := strconv.ParseUint(f.Epoch, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cSlice, err := hexutil.Decode(f.CurrentVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(cSlice) != 4 {
|
||||
return nil, fmt.Errorf("got %d byte version for CurrentVersion, expected 4 bytes. hex=%s", len(cSlice), f.CurrentVersion)
|
||||
}
|
||||
pSlice, err := hexutil.Decode(f.PreviousVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(pSlice) != 4 {
|
||||
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(pSlice), f.PreviousVersion)
|
||||
}
|
||||
return ðpb.Fork{
|
||||
CurrentVersion: cSlice,
|
||||
PreviousVersion: pSlice,
|
||||
Epoch: primitives.Epoch(epoch),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type forkScheduleResponse struct {
|
||||
Data []forkResponse
|
||||
Data []shared.Fork
|
||||
}
|
||||
|
||||
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
|
||||
|
||||
@@ -11,6 +11,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/builder",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
@@ -41,6 +42,7 @@ go_test(
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
@@ -52,7 +54,7 @@ go_test(
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_golang_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -13,9 +13,11 @@ import (
|
||||
"text/template"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v4/network"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/authorization"
|
||||
@@ -268,9 +270,13 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
vs := make([]*SignedValidatorRegistration, len(svr))
|
||||
vs := make([]*shared.SignedValidatorRegistration, len(svr))
|
||||
for i := 0; i < len(svr); i++ {
|
||||
vs[i] = &SignedValidatorRegistration{SignedValidatorRegistrationV1: svr[i]}
|
||||
svrJson, err := shared.SignedValidatorRegistrationFromConsensus(svr[i])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("failed to encode to SignedValidatorRegistration at index %d", i))
|
||||
}
|
||||
vs[i] = svrJson
|
||||
}
|
||||
body, err := json.Marshal(vs)
|
||||
if err != nil {
|
||||
@@ -295,12 +301,14 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
|
||||
}
|
||||
b := &SignedBlindedBeaconBlockBellatrix{SignedBlindedBeaconBlockBellatrix: psb}
|
||||
b, err := shared.SignedBlindedBeaconBlockBellatrixFromConsensus(ðpb.SignedBlindedBeaconBlockBellatrix{Block: psb.Block, Signature: bytesutil.SafeCopyBytes(psb.Signature)})
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not convert SignedBlindedBeaconBlockBellatrix to json marshalable type")
|
||||
}
|
||||
body, err := json.Marshal(b)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockBellatrix value body in SubmitBlindedBlock")
|
||||
}
|
||||
|
||||
versionOpt := func(r *http.Request) {
|
||||
r.Header.Add("Eth-Consensus-Version", version.String(version.Bellatrix))
|
||||
}
|
||||
@@ -330,12 +338,14 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
|
||||
}
|
||||
b := &SignedBlindedBeaconBlockCapella{SignedBlindedBeaconBlockCapella: psb}
|
||||
b, err := shared.SignedBlindedBeaconBlockCapellaFromConsensus(ðpb.SignedBlindedBeaconBlockCapella{Block: psb.Block, Signature: bytesutil.SafeCopyBytes(psb.Signature)})
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not convert SignedBlindedBeaconBlockCapella to json marshalable type")
|
||||
}
|
||||
body, err := json.Marshal(b)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockCapella value body in SubmitBlindedBlockCapella")
|
||||
}
|
||||
|
||||
versionOpt := func(r *http.Request) {
|
||||
r.Header.Add("Eth-Consensus-Version", version.String(version.Capella))
|
||||
}
|
||||
@@ -365,8 +375,10 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
|
||||
}
|
||||
|
||||
b := ðpb.SignedBlindedBeaconBlockAndBlobsDeneb{Block: psb, Blobs: blobs}
|
||||
b, err := shared.SignedBlindedBeaconBlockContentsDenebFromConsensus(ðpb.SignedBlindedBeaconBlockAndBlobsDeneb{SignedBlindedBlock: psb, SignedBlindedBlobSidecars: blobs})
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not convert SignedBlindedBeaconBlockContentsDeneb to json marshalable type")
|
||||
}
|
||||
body, err := json.Marshal(b)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockDeneb value body in SubmitBlindedBlockDeneb")
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
@@ -23,6 +24,7 @@ import (
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type roundtrip func(*http.Request) (*http.Response, error)
|
||||
@@ -397,10 +399,18 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
assert.Equal(t, uint64(1), withdrawals[0].Amount)
|
||||
})
|
||||
t.Run("deneb", func(t *testing.T) {
|
||||
|
||||
test := testSignedBlindedBeaconBlockAndBlobsDeneb(t)
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "deneb", r.Header.Get("Eth-Consensus-Version"))
|
||||
var req shared.SignedBlindedBeaconBlockContentsDeneb
|
||||
err := json.NewDecoder(r.Body).Decode(&req)
|
||||
require.NoError(t, err)
|
||||
block, err := req.SignedBlindedBlock.ToConsensus()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, block, test.SignedBlindedBlock)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadDeneb)),
|
||||
@@ -412,11 +422,11 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
test := testSignedBlindedBeaconBlockAndBlobsDeneb(t)
|
||||
sbb, err := blocks.NewSignedBeaconBlock(test.Block)
|
||||
|
||||
sbb, err := blocks.NewSignedBeaconBlock(test.SignedBlindedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
ep, blobBundle, err := c.SubmitBlindedBlock(ctx, sbb, test.Blobs)
|
||||
ep, blobBundle, err := c.SubmitBlindedBlock(ctx, sbb, test.SignedBlindedBlobSidecars)
|
||||
require.NoError(t, err)
|
||||
withdrawals, err := ep.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
@@ -744,9 +754,13 @@ func testSignedBlindedBeaconBlockCapella(t *testing.T) *eth.SignedBlindedBeaconB
|
||||
}
|
||||
|
||||
func testSignedBlindedBeaconBlockAndBlobsDeneb(t *testing.T) *eth.SignedBlindedBeaconBlockAndBlobsDeneb {
|
||||
basebytes, err := shared.Uint256ToSSZBytes("14074904626401341155369551180448584754667373453244490859944217516317499064576")
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
return ð.SignedBlindedBeaconBlockAndBlobsDeneb{
|
||||
Block: ð.SignedBlindedBeaconBlockDeneb{
|
||||
Block: ð.BlindedBeaconBlockDeneb{
|
||||
SignedBlindedBlock: ð.SignedBlindedBeaconBlockDeneb{
|
||||
Message: ð.BlindedBeaconBlockDeneb{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
@@ -758,7 +772,7 @@ func testSignedBlindedBeaconBlockAndBlobsDeneb(t *testing.T) *eth.SignedBlindedB
|
||||
DepositCount: 1,
|
||||
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Graffiti: ezDecode(t, "0xdeadbeefc0ffee"),
|
||||
Graffiti: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
ProposerSlashings: []*eth.ProposerSlashing{
|
||||
{
|
||||
Header_1: ð.SignedBeaconBlockHeader{
|
||||
@@ -861,8 +875,8 @@ func testSignedBlindedBeaconBlockAndBlobsDeneb(t *testing.T) *eth.SignedBlindedB
|
||||
},
|
||||
},
|
||||
SyncAggregate: ð.SyncAggregate{
|
||||
SyncCommitteeSignature: make([]byte, 48),
|
||||
SyncCommitteeBits: bitfield.Bitvector512{0x01},
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
SyncCommitteeBits: bitfield.Bitvector512(ezDecode(t, "0x6451e9f951ebf05edc01de67e593484b672877054f055903ff0df1a1a945cf30ca26bb4d4b154f94a1bc776bcf5d0efb3603e1f9b8ee2499ccdcfe2a18cef458")),
|
||||
},
|
||||
ExecutionPayloadHeader: &v1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
@@ -876,7 +890,7 @@ func testSignedBlindedBeaconBlockAndBlobsDeneb(t *testing.T) *eth.SignedBlindedB
|
||||
GasUsed: 1,
|
||||
Timestamp: 1,
|
||||
ExtraData: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BaseFeePerGas: []byte(strconv.FormatUint(1, 10)),
|
||||
BaseFeePerGas: basebytes,
|
||||
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
WithdrawalsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
@@ -887,7 +901,7 @@ func testSignedBlindedBeaconBlockAndBlobsDeneb(t *testing.T) *eth.SignedBlindedB
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
Blobs: []*eth.SignedBlindedBlobSidecar{
|
||||
SignedBlindedBlobSidecars: []*eth.SignedBlindedBlobSidecar{
|
||||
{
|
||||
Message: ð.BlindedBlobSidecar{
|
||||
BlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -16,87 +16,6 @@ import (
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// SignedValidatorRegistration a struct for signed validator registrations.
|
||||
type SignedValidatorRegistration struct {
|
||||
*eth.SignedValidatorRegistrationV1
|
||||
}
|
||||
|
||||
// ValidatorRegistration a struct for validator registrations.
|
||||
type ValidatorRegistration struct {
|
||||
*eth.ValidatorRegistrationV1
|
||||
}
|
||||
|
||||
// MarshalJSON returns a json representation copy of signed validator registration.
|
||||
func (r *SignedValidatorRegistration) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Message *ValidatorRegistration `json:"message"`
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
}{
|
||||
Message: &ValidatorRegistration{r.Message},
|
||||
Signature: r.SignedValidatorRegistrationV1.Signature,
|
||||
})
|
||||
}
|
||||
|
||||
// UnmarshalJSON returns a byte representation of signed validator registration from json.
|
||||
func (r *SignedValidatorRegistration) UnmarshalJSON(b []byte) error {
|
||||
if r.SignedValidatorRegistrationV1 == nil {
|
||||
r.SignedValidatorRegistrationV1 = ð.SignedValidatorRegistrationV1{}
|
||||
}
|
||||
o := struct {
|
||||
Message *ValidatorRegistration `json:"message"`
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
}{}
|
||||
if err := json.Unmarshal(b, &o); err != nil {
|
||||
return err
|
||||
}
|
||||
r.Message = o.Message.ValidatorRegistrationV1
|
||||
r.Signature = o.Signature
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON returns a json representation copy of validator registration.
|
||||
func (r *ValidatorRegistration) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
Pubkey hexutil.Bytes `json:"pubkey"`
|
||||
}{
|
||||
FeeRecipient: r.FeeRecipient,
|
||||
GasLimit: fmt.Sprintf("%d", r.GasLimit),
|
||||
Timestamp: fmt.Sprintf("%d", r.Timestamp),
|
||||
Pubkey: r.Pubkey,
|
||||
})
|
||||
}
|
||||
|
||||
// UnmarshalJSON returns a byte representation of validator registration from json.
|
||||
func (r *ValidatorRegistration) UnmarshalJSON(b []byte) error {
|
||||
if r.ValidatorRegistrationV1 == nil {
|
||||
r.ValidatorRegistrationV1 = ð.ValidatorRegistrationV1{}
|
||||
}
|
||||
o := struct {
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
Pubkey hexutil.Bytes `json:"pubkey"`
|
||||
}{}
|
||||
if err := json.Unmarshal(b, &o); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.FeeRecipient = o.FeeRecipient
|
||||
r.Pubkey = o.Pubkey
|
||||
var err error
|
||||
if r.GasLimit, err = strconv.ParseUint(o.GasLimit, 10, 64); err != nil {
|
||||
return errors.Wrap(err, "failed to parse gas limit")
|
||||
}
|
||||
if r.Timestamp, err = strconv.ParseUint(o.Timestamp, 10, 64); err != nil {
|
||||
return errors.Wrap(err, "failed to parse timestamp")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var errInvalidUint256 = errors.New("invalid Uint256")
|
||||
var errDecodeUint256 = errors.New("unable to decode into Uint256")
|
||||
|
||||
@@ -636,44 +555,6 @@ type SignedBlindedBeaconBlockBellatrix struct {
|
||||
*eth.SignedBlindedBeaconBlockBellatrix
|
||||
}
|
||||
|
||||
// BlindedBeaconBlockBellatrix is a field in SignedBlindedBeaconBlockBellatrix.
|
||||
type BlindedBeaconBlockBellatrix struct {
|
||||
*eth.BlindedBeaconBlockBellatrix
|
||||
}
|
||||
|
||||
// BlindedBeaconBlockBodyBellatrix is a field in BlindedBeaconBlockBellatrix.
|
||||
type BlindedBeaconBlockBodyBellatrix struct {
|
||||
*eth.BlindedBeaconBlockBodyBellatrix
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of SignedBlindedBeaconBlockBellatrix.
|
||||
func (r *SignedBlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Message *BlindedBeaconBlockBellatrix `json:"message"`
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
}{
|
||||
Message: &BlindedBeaconBlockBellatrix{r.SignedBlindedBeaconBlockBellatrix.Block},
|
||||
Signature: r.SignedBlindedBeaconBlockBellatrix.Signature,
|
||||
})
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of BlindedBeaconBlockBellatrix.
|
||||
func (b *BlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot hexutil.Bytes `json:"parent_root"`
|
||||
StateRoot hexutil.Bytes `json:"state_root"`
|
||||
Body *BlindedBeaconBlockBodyBellatrix `json:"body"`
|
||||
}{
|
||||
Slot: fmt.Sprintf("%d", b.Slot),
|
||||
ProposerIndex: fmt.Sprintf("%d", b.ProposerIndex),
|
||||
ParentRoot: b.ParentRoot,
|
||||
StateRoot: b.StateRoot,
|
||||
Body: &BlindedBeaconBlockBodyBellatrix{b.BlindedBeaconBlockBellatrix.Body},
|
||||
})
|
||||
}
|
||||
|
||||
// ProposerSlashing is a field in BlindedBeaconBlockBodyCapella.
|
||||
type ProposerSlashing struct {
|
||||
*eth.ProposerSlashing
|
||||
@@ -928,53 +809,6 @@ func (e *Eth1Data) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of BlindedBeaconBlockBodyBellatrix.
|
||||
func (b *BlindedBeaconBlockBodyBellatrix) MarshalJSON() ([]byte, error) {
|
||||
sve := make([]*SignedVoluntaryExit, len(b.BlindedBeaconBlockBodyBellatrix.VoluntaryExits))
|
||||
for i := range b.BlindedBeaconBlockBodyBellatrix.VoluntaryExits {
|
||||
sve[i] = &SignedVoluntaryExit{SignedVoluntaryExit: b.BlindedBeaconBlockBodyBellatrix.VoluntaryExits[i]}
|
||||
}
|
||||
deps := make([]*Deposit, len(b.BlindedBeaconBlockBodyBellatrix.Deposits))
|
||||
for i := range b.BlindedBeaconBlockBodyBellatrix.Deposits {
|
||||
deps[i] = &Deposit{Deposit: b.BlindedBeaconBlockBodyBellatrix.Deposits[i]}
|
||||
}
|
||||
atts := make([]*Attestation, len(b.BlindedBeaconBlockBodyBellatrix.Attestations))
|
||||
for i := range b.BlindedBeaconBlockBodyBellatrix.Attestations {
|
||||
atts[i] = &Attestation{Attestation: b.BlindedBeaconBlockBodyBellatrix.Attestations[i]}
|
||||
}
|
||||
atsl := make([]*AttesterSlashing, len(b.BlindedBeaconBlockBodyBellatrix.AttesterSlashings))
|
||||
for i := range b.BlindedBeaconBlockBodyBellatrix.AttesterSlashings {
|
||||
atsl[i] = &AttesterSlashing{AttesterSlashing: b.BlindedBeaconBlockBodyBellatrix.AttesterSlashings[i]}
|
||||
}
|
||||
pros := make([]*ProposerSlashing, len(b.BlindedBeaconBlockBodyBellatrix.ProposerSlashings))
|
||||
for i := range b.BlindedBeaconBlockBodyBellatrix.ProposerSlashings {
|
||||
pros[i] = &ProposerSlashing{ProposerSlashing: b.BlindedBeaconBlockBodyBellatrix.ProposerSlashings[i]}
|
||||
}
|
||||
return json.Marshal(struct {
|
||||
RandaoReveal hexutil.Bytes `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Graffiti hexutil.Bytes `json:"graffiti"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashing `json:"attester_slashings"`
|
||||
Attestations []*Attestation `json:"attestations"`
|
||||
Deposits []*Deposit `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeader `json:"execution_payload_header"`
|
||||
}{
|
||||
RandaoReveal: b.RandaoReveal,
|
||||
Eth1Data: &Eth1Data{b.BlindedBeaconBlockBodyBellatrix.Eth1Data},
|
||||
Graffiti: b.BlindedBeaconBlockBodyBellatrix.Graffiti,
|
||||
ProposerSlashings: pros,
|
||||
AttesterSlashings: atsl,
|
||||
Attestations: atts,
|
||||
Deposits: deps,
|
||||
VoluntaryExits: sve,
|
||||
SyncAggregate: &SyncAggregate{b.BlindedBeaconBlockBodyBellatrix.SyncAggregate},
|
||||
ExecutionPayloadHeader: &ExecutionPayloadHeader{ExecutionPayloadHeader: b.BlindedBeaconBlockBodyBellatrix.ExecutionPayloadHeader},
|
||||
})
|
||||
}
|
||||
|
||||
// SignedBLSToExecutionChange is a field in Beacon Block Body for capella and above.
|
||||
type SignedBLSToExecutionChange struct {
|
||||
*eth.SignedBLSToExecutionChange
|
||||
@@ -1009,102 +843,6 @@ func (ch *BLSToExecutionChange) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// SignedBlindedBeaconBlockCapella is part of the request object sent to builder API /eth/v1/builder/blinded_blocks for Capella.
|
||||
type SignedBlindedBeaconBlockCapella struct {
|
||||
*eth.SignedBlindedBeaconBlockCapella
|
||||
}
|
||||
|
||||
// BlindedBeaconBlockCapella is a field in SignedBlindedBeaconBlockCapella.
|
||||
type BlindedBeaconBlockCapella struct {
|
||||
*eth.BlindedBeaconBlockCapella
|
||||
}
|
||||
|
||||
// BlindedBeaconBlockBodyCapella is a field in BlindedBeaconBlockCapella.
|
||||
type BlindedBeaconBlockBodyCapella struct {
|
||||
*eth.BlindedBeaconBlockBodyCapella
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of SignedBlindedBeaconBlockCapella.
|
||||
func (b *SignedBlindedBeaconBlockCapella) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Message *BlindedBeaconBlockCapella `json:"message"`
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
}{
|
||||
Message: &BlindedBeaconBlockCapella{b.Block},
|
||||
Signature: b.Signature,
|
||||
})
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of BlindedBeaconBlockCapella
|
||||
func (b *BlindedBeaconBlockCapella) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot hexutil.Bytes `json:"parent_root"`
|
||||
StateRoot hexutil.Bytes `json:"state_root"`
|
||||
Body *BlindedBeaconBlockBodyCapella `json:"body"`
|
||||
}{
|
||||
Slot: fmt.Sprintf("%d", b.Slot),
|
||||
ProposerIndex: fmt.Sprintf("%d", b.ProposerIndex),
|
||||
ParentRoot: b.ParentRoot,
|
||||
StateRoot: b.StateRoot,
|
||||
Body: &BlindedBeaconBlockBodyCapella{b.Body},
|
||||
})
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of BlindedBeaconBlockBodyCapella
|
||||
func (b *BlindedBeaconBlockBodyCapella) MarshalJSON() ([]byte, error) {
|
||||
sve := make([]*SignedVoluntaryExit, len(b.VoluntaryExits))
|
||||
for i := range b.VoluntaryExits {
|
||||
sve[i] = &SignedVoluntaryExit{SignedVoluntaryExit: b.VoluntaryExits[i]}
|
||||
}
|
||||
deps := make([]*Deposit, len(b.Deposits))
|
||||
for i := range b.Deposits {
|
||||
deps[i] = &Deposit{Deposit: b.Deposits[i]}
|
||||
}
|
||||
atts := make([]*Attestation, len(b.Attestations))
|
||||
for i := range b.Attestations {
|
||||
atts[i] = &Attestation{Attestation: b.Attestations[i]}
|
||||
}
|
||||
atsl := make([]*AttesterSlashing, len(b.AttesterSlashings))
|
||||
for i := range b.AttesterSlashings {
|
||||
atsl[i] = &AttesterSlashing{AttesterSlashing: b.AttesterSlashings[i]}
|
||||
}
|
||||
pros := make([]*ProposerSlashing, len(b.ProposerSlashings))
|
||||
for i := range b.ProposerSlashings {
|
||||
pros[i] = &ProposerSlashing{ProposerSlashing: b.ProposerSlashings[i]}
|
||||
}
|
||||
chs := make([]*SignedBLSToExecutionChange, len(b.BlsToExecutionChanges))
|
||||
for i := range b.BlsToExecutionChanges {
|
||||
chs[i] = &SignedBLSToExecutionChange{SignedBLSToExecutionChange: b.BlsToExecutionChanges[i]}
|
||||
}
|
||||
return json.Marshal(struct {
|
||||
RandaoReveal hexutil.Bytes `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Graffiti hexutil.Bytes `json:"graffiti"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashing `json:"attester_slashings"`
|
||||
Attestations []*Attestation `json:"attestations"`
|
||||
Deposits []*Deposit `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeaderCapella `json:"execution_payload_header"`
|
||||
}{
|
||||
RandaoReveal: b.RandaoReveal,
|
||||
Eth1Data: &Eth1Data{b.Eth1Data},
|
||||
Graffiti: b.Graffiti,
|
||||
ProposerSlashings: pros,
|
||||
AttesterSlashings: atsl,
|
||||
Attestations: atts,
|
||||
Deposits: deps,
|
||||
VoluntaryExits: sve,
|
||||
BLSToExecutionChanges: chs,
|
||||
SyncAggregate: &SyncAggregate{b.SyncAggregate},
|
||||
ExecutionPayloadHeader: &ExecutionPayloadHeaderCapella{ExecutionPayloadHeaderCapella: b.ExecutionPayloadHeader},
|
||||
})
|
||||
}
|
||||
|
||||
// ExecHeaderResponseDeneb is the header response for builder API /eth/v1/builder/header/{slot}/{parent_hash}/{pubkey}.
|
||||
type ExecHeaderResponseDeneb struct {
|
||||
Data struct {
|
||||
|
||||
@@ -12,8 +12,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/math"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
@@ -38,7 +38,8 @@ func TestSignedValidatorRegistration_MarshalJSON(t *testing.T) {
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
a := &SignedValidatorRegistration{SignedValidatorRegistrationV1: svr}
|
||||
a, err := shared.SignedValidatorRegistrationFromConsensus(svr)
|
||||
require.NoError(t, err)
|
||||
je, err := json.Marshal(a)
|
||||
require.NoError(t, err)
|
||||
// decode with a struct w/ plain strings so we can check the string encoding of the hex fields
|
||||
@@ -55,11 +56,11 @@ func TestSignedValidatorRegistration_MarshalJSON(t *testing.T) {
|
||||
require.Equal(t, "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", un.Message.Pubkey)
|
||||
|
||||
t.Run("roundtrip", func(t *testing.T) {
|
||||
b := &SignedValidatorRegistration{}
|
||||
b := &shared.SignedValidatorRegistration{}
|
||||
if err := json.Unmarshal(je, b); err != nil {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.Equal(t, proto.Equal(a.SignedValidatorRegistrationV1, b.SignedValidatorRegistrationV1), true)
|
||||
require.DeepEqual(t, a, b)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1529,7 +1530,7 @@ func TestUint256UnmarshalTooBig(t *testing.T) {
|
||||
func TestMarshalBlindedBeaconBlockBodyBellatrix(t *testing.T) {
|
||||
expected, err := os.ReadFile("testdata/blinded-block.json")
|
||||
require.NoError(t, err)
|
||||
b := &BlindedBeaconBlockBellatrix{BlindedBeaconBlockBellatrix: ð.BlindedBeaconBlockBellatrix{
|
||||
b, err := shared.BlindedBeaconBlockBellatrixFromConsensus(ð.BlindedBeaconBlockBellatrix{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
@@ -1546,7 +1547,8 @@ func TestMarshalBlindedBeaconBlockBodyBellatrix(t *testing.T) {
|
||||
SyncAggregate: pbSyncAggregate(),
|
||||
ExecutionPayloadHeader: pbExecutionPayloadHeader(t),
|
||||
},
|
||||
}}
|
||||
})
|
||||
require.NoError(t, err)
|
||||
m, err := json.Marshal(b)
|
||||
require.NoError(t, err)
|
||||
// string error output is easier to deal with
|
||||
@@ -1558,7 +1560,7 @@ func TestMarshalBlindedBeaconBlockBodyBellatrix(t *testing.T) {
|
||||
func TestMarshalBlindedBeaconBlockBodyCapella(t *testing.T) {
|
||||
expected, err := os.ReadFile("testdata/blinded-block-capella.json")
|
||||
require.NoError(t, err)
|
||||
b := &BlindedBeaconBlockCapella{BlindedBeaconBlockCapella: ð.BlindedBeaconBlockCapella{
|
||||
b, err := shared.BlindedBeaconBlockCapellaFromConsensus(ð.BlindedBeaconBlockCapella{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
@@ -1575,7 +1577,8 @@ func TestMarshalBlindedBeaconBlockBodyCapella(t *testing.T) {
|
||||
SyncAggregate: pbSyncAggregate(),
|
||||
ExecutionPayloadHeader: pbExecutionPayloadHeaderCapella(t),
|
||||
},
|
||||
}}
|
||||
})
|
||||
require.NoError(t, err)
|
||||
m, err := json.Marshal(b)
|
||||
require.NoError(t, err)
|
||||
// string error output is easier to deal with
|
||||
|
||||
@@ -4,6 +4,7 @@ const (
|
||||
VersionHeader = "Eth-Consensus-Version"
|
||||
ExecutionPayloadBlindedHeader = "Eth-Execution-Payload-Blinded"
|
||||
ExecutionPayloadValueHeader = "Eth-Execution-Payload-Value"
|
||||
ConsensusBlockValueHeader = "Eth-Consensus-Block-Value"
|
||||
JsonMediaType = "application/json"
|
||||
OctetStreamMediaType = "application/octet-stream"
|
||||
)
|
||||
|
||||
@@ -15,6 +15,9 @@ func StartAndEndPage(pageToken string, pageSize, totalSize int) (int, int, strin
|
||||
if pageToken == "" {
|
||||
pageToken = "0"
|
||||
}
|
||||
if pageSize < 0 || totalSize < 0 {
|
||||
return 0, 0, "", errors.Errorf("invalid page and total sizes provided: page size %d , total size %d", pageSize, totalSize)
|
||||
}
|
||||
if pageSize == 0 {
|
||||
pageSize = params.BeaconConfig().DefaultPageSize
|
||||
}
|
||||
@@ -23,6 +26,9 @@ func StartAndEndPage(pageToken string, pageSize, totalSize int) (int, int, strin
|
||||
if err != nil {
|
||||
return 0, 0, "", errors.Wrap(err, "could not convert page token")
|
||||
}
|
||||
if token < 0 {
|
||||
return 0, 0, "", errors.Errorf("invalid token value provided: %d", token)
|
||||
}
|
||||
|
||||
// Start page can not be greater than set size.
|
||||
start := token * pageSize
|
||||
|
||||
@@ -85,3 +85,19 @@ func TestStartAndEndPage_ExceedsMaxPage(t *testing.T) {
|
||||
_, _, _, err := pagination.StartAndEndPage("", 0, 0)
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
}
|
||||
|
||||
func TestStartAndEndPage_InvalidPageValues(t *testing.T) {
|
||||
_, _, _, err := pagination.StartAndEndPage("10", -1, 10)
|
||||
assert.ErrorContains(t, "invalid page and total sizes provided", err)
|
||||
|
||||
_, _, _, err = pagination.StartAndEndPage("12", 10, -10)
|
||||
assert.ErrorContains(t, "invalid page and total sizes provided", err)
|
||||
|
||||
_, _, _, err = pagination.StartAndEndPage("12", -50, -60)
|
||||
assert.ErrorContains(t, "invalid page and total sizes provided", err)
|
||||
}
|
||||
|
||||
func TestStartAndEndPage_InvalidTokenValue(t *testing.T) {
|
||||
_, _, _, err := pagination.StartAndEndPage("-12", 50, 60)
|
||||
assert.ErrorContains(t, "invalid token value provided", err)
|
||||
}
|
||||
|
||||
@@ -5,12 +5,14 @@ go_library(
|
||||
srcs = [
|
||||
"chain_info.go",
|
||||
"chain_info_forkchoice.go",
|
||||
"currently_syncing_block.go",
|
||||
"error.go",
|
||||
"execution_engine.go",
|
||||
"forkchoice_update_execution.go",
|
||||
"head.go",
|
||||
"head_sync_committee_info.go",
|
||||
"init_sync_process_block.go",
|
||||
"lightclient.go",
|
||||
"log.go",
|
||||
"merge_ascii_art.go",
|
||||
"metrics.go",
|
||||
@@ -47,6 +49,7 @@ go_library(
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/das:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
@@ -76,6 +79,8 @@ go_library(
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
|
||||
@@ -525,3 +525,8 @@ func (s *Service) recoverStateSummary(ctx context.Context, blockRoot [32]byte) (
|
||||
}
|
||||
return nil, errBlockDoesNotExist
|
||||
}
|
||||
|
||||
// BlockBeingSynced returns whether the block with the given root is currently being synced
|
||||
func (s *Service) BlockBeingSynced(root [32]byte) bool {
|
||||
return s.blockBeingSynced.isSyncing(root)
|
||||
}
|
||||
|
||||
27
beacon-chain/blockchain/currently_syncing_block.go
Normal file
27
beacon-chain/blockchain/currently_syncing_block.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package blockchain
|
||||
|
||||
import "sync"
|
||||
|
||||
type currentlySyncingBlock struct {
|
||||
sync.Mutex
|
||||
roots map[[32]byte]struct{}
|
||||
}
|
||||
|
||||
func (b *currentlySyncingBlock) set(root [32]byte) {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
b.roots[root] = struct{}{}
|
||||
}
|
||||
|
||||
func (b *currentlySyncingBlock) unset(root [32]byte) {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
delete(b.roots, root)
|
||||
}
|
||||
|
||||
func (b *currentlySyncingBlock) isSyncing(root [32]byte) bool {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
_, ok := b.roots[root]
|
||||
return ok
|
||||
}
|
||||
@@ -70,11 +70,8 @@ func IsInvalidBlock(e error) bool {
|
||||
if e == nil {
|
||||
return false
|
||||
}
|
||||
_, ok := e.(invalidBlockError)
|
||||
if !ok {
|
||||
return IsInvalidBlock(errors.Unwrap(e))
|
||||
}
|
||||
return true
|
||||
var d invalidBlockError
|
||||
return errors.As(e, &d)
|
||||
}
|
||||
|
||||
// InvalidBlockLVH returns the invalid block last valid hash root. If the error
|
||||
@@ -83,7 +80,8 @@ func InvalidBlockLVH(e error) [32]byte {
|
||||
if e == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
d, ok := e.(invalidBlockError)
|
||||
var d invalidBlockError
|
||||
ok := errors.As(e, &d)
|
||||
if !ok {
|
||||
return [32]byte{}
|
||||
}
|
||||
@@ -96,7 +94,8 @@ func InvalidBlockRoot(e error) [32]byte {
|
||||
if e == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
d, ok := e.(invalidBlockError)
|
||||
var d invalidBlockError
|
||||
ok := errors.As(e, &d)
|
||||
if !ok {
|
||||
return [32]byte{}
|
||||
}
|
||||
@@ -108,7 +107,8 @@ func InvalidAncestorRoots(e error) [][32]byte {
|
||||
if e == nil {
|
||||
return [][32]byte{}
|
||||
}
|
||||
d, ok := e.(invalidBlockError)
|
||||
var d invalidBlockError
|
||||
ok := errors.As(e, &d)
|
||||
if !ok {
|
||||
return [][32]byte{}
|
||||
}
|
||||
|
||||
@@ -24,6 +24,9 @@ func TestInvalidBlockRoot(t *testing.T) {
|
||||
err := invalidBlock{error: ErrInvalidPayload, root: [32]byte{'a'}}
|
||||
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(err))
|
||||
require.DeepEqual(t, [][32]byte(nil), InvalidAncestorRoots(err))
|
||||
|
||||
newErr := errors.Wrap(err, "wrap me")
|
||||
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(newErr))
|
||||
}
|
||||
|
||||
func TestInvalidRoots(t *testing.T) {
|
||||
@@ -33,4 +36,9 @@ func TestInvalidRoots(t *testing.T) {
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(err))
|
||||
require.DeepEqual(t, roots, InvalidAncestorRoots(err))
|
||||
|
||||
newErr := errors.Wrap(err, "wrap me")
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(newErr))
|
||||
require.DeepEqual(t, roots, InvalidAncestorRoots(newErr))
|
||||
}
|
||||
|
||||
@@ -405,8 +405,13 @@ func kzgCommitmentsToVersionedHashes(body interfaces.ReadOnlyBeaconBlockBody) ([
|
||||
|
||||
versionedHashes := make([]common.Hash, len(commitments))
|
||||
for i, commitment := range commitments {
|
||||
versionedHashes[i] = sha256.Sum256(commitment)
|
||||
versionedHashes[i][0] = blobCommitmentVersionKZG
|
||||
versionedHashes[i] = ConvertKzgCommitmentToVersionedHash(commitment)
|
||||
}
|
||||
return versionedHashes, nil
|
||||
}
|
||||
|
||||
func ConvertKzgCommitmentToVersionedHash(commitment []byte) common.Hash {
|
||||
versionedHash := sha256.Sum256(commitment)
|
||||
versionedHash[0] = blobCommitmentVersionKZG
|
||||
return versionedHash
|
||||
}
|
||||
|
||||
246
beacon-chain/blockchain/lightclient.go
Normal file
246
beacon-chain/blockchain/lightclient.go
Normal file
@@ -0,0 +1,246 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
const (
|
||||
finalityBranchNumOfLeaves = 6
|
||||
)
|
||||
|
||||
// CreateLightClientFinalityUpdate - implements https://github.com/ethereum/consensus-specs/blob/3d235740e5f1e641d3b160c8688f26e7dc5a1894/specs/altair/light-client/full-node.md#create_light_client_finality_update
|
||||
// def create_light_client_finality_update(update: LightClientUpdate) -> LightClientFinalityUpdate:
|
||||
//
|
||||
// return LightClientFinalityUpdate(
|
||||
// attested_header=update.attested_header,
|
||||
// finalized_header=update.finalized_header,
|
||||
// finality_branch=update.finality_branch,
|
||||
// sync_aggregate=update.sync_aggregate,
|
||||
// signature_slot=update.signature_slot,
|
||||
// )
|
||||
func CreateLightClientFinalityUpdate(update *ethpbv2.LightClientUpdate) *ethpbv2.LightClientFinalityUpdate {
|
||||
return ðpbv2.LightClientFinalityUpdate{
|
||||
AttestedHeader: update.AttestedHeader,
|
||||
FinalizedHeader: update.FinalizedHeader,
|
||||
FinalityBranch: update.FinalityBranch,
|
||||
SyncAggregate: update.SyncAggregate,
|
||||
SignatureSlot: update.SignatureSlot,
|
||||
}
|
||||
}
|
||||
|
||||
// CreateLightClientOptimisticUpdate - implements https://github.com/ethereum/consensus-specs/blob/3d235740e5f1e641d3b160c8688f26e7dc5a1894/specs/altair/light-client/full-node.md#create_light_client_optimistic_update
|
||||
// def create_light_client_optimistic_update(update: LightClientUpdate) -> LightClientOptimisticUpdate:
|
||||
//
|
||||
// return LightClientOptimisticUpdate(
|
||||
// attested_header=update.attested_header,
|
||||
// sync_aggregate=update.sync_aggregate,
|
||||
// signature_slot=update.signature_slot,
|
||||
// )
|
||||
func CreateLightClientOptimisticUpdate(update *ethpbv2.LightClientUpdate) *ethpbv2.LightClientOptimisticUpdate {
|
||||
return ðpbv2.LightClientOptimisticUpdate{
|
||||
AttestedHeader: update.AttestedHeader,
|
||||
SyncAggregate: update.SyncAggregate,
|
||||
SignatureSlot: update.SignatureSlot,
|
||||
}
|
||||
}
|
||||
|
||||
func NewLightClientOptimisticUpdateFromBeaconState(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
attestedState state.BeaconState) (*ethpbv2.LightClientUpdate, error) {
|
||||
// assert compute_epoch_at_slot(attested_state.slot) >= ALTAIR_FORK_EPOCH
|
||||
attestedEpoch := slots.ToEpoch(attestedState.Slot())
|
||||
if attestedEpoch < types.Epoch(params.BeaconConfig().AltairForkEpoch) {
|
||||
return nil, fmt.Errorf("invalid attested epoch %d", attestedEpoch)
|
||||
}
|
||||
|
||||
// assert sum(block.message.body.sync_aggregate.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS
|
||||
syncAggregate, err := block.Block().Body().SyncAggregate()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get sync aggregate %v", err)
|
||||
}
|
||||
|
||||
if syncAggregate.SyncCommitteeBits.Count() < params.BeaconConfig().MinSyncCommitteeParticipants {
|
||||
return nil, fmt.Errorf("invalid sync committee bits count %d", syncAggregate.SyncCommitteeBits.Count())
|
||||
}
|
||||
|
||||
// assert state.slot == state.latest_block_header.slot
|
||||
if state.Slot() != state.LatestBlockHeader().Slot {
|
||||
return nil, fmt.Errorf("state slot %d not equal to latest block header slot %d", state.Slot(), state.LatestBlockHeader().Slot)
|
||||
}
|
||||
|
||||
// assert hash_tree_root(header) == hash_tree_root(block.message)
|
||||
header := state.LatestBlockHeader()
|
||||
stateRoot, err := state.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get state root %v", err)
|
||||
}
|
||||
header.StateRoot = stateRoot[:]
|
||||
|
||||
headerRoot, err := header.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get header root %v", err)
|
||||
}
|
||||
|
||||
blockRoot, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get block root %v", err)
|
||||
}
|
||||
|
||||
if headerRoot != blockRoot {
|
||||
return nil, fmt.Errorf("header root %#x not equal to block root %#x", headerRoot, blockRoot)
|
||||
}
|
||||
|
||||
// assert attested_state.slot == attested_state.latest_block_header.slot
|
||||
if attestedState.Slot() != attestedState.LatestBlockHeader().Slot {
|
||||
return nil, fmt.Errorf("attested state slot %d not equal to attested latest block header slot %d", attestedState.Slot(), attestedState.LatestBlockHeader().Slot)
|
||||
}
|
||||
|
||||
// attested_header = attested_state.latest_block_header.copy()
|
||||
attestedHeader := attestedState.LatestBlockHeader()
|
||||
|
||||
// attested_header.state_root = hash_tree_root(attested_state)
|
||||
attestedStateRoot, err := attestedState.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get attested state root %v", err)
|
||||
}
|
||||
attestedHeader.StateRoot = attestedStateRoot[:]
|
||||
|
||||
// assert hash_tree_root(attested_header) == block.message.parent_root
|
||||
attestedHeaderRoot, err := attestedHeader.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get attested header root %v", err)
|
||||
}
|
||||
|
||||
if attestedHeaderRoot != block.Block().ParentRoot() {
|
||||
return nil, fmt.Errorf("attested header root %#x not equal to block parent root %#x", attestedHeaderRoot, block.Block().ParentRoot())
|
||||
}
|
||||
|
||||
// Return result
|
||||
attestedHeaderResult := ðpbv1.BeaconBlockHeader{
|
||||
Slot: attestedHeader.Slot,
|
||||
ProposerIndex: attestedHeader.ProposerIndex,
|
||||
ParentRoot: attestedHeader.ParentRoot,
|
||||
StateRoot: attestedHeader.StateRoot,
|
||||
BodyRoot: attestedHeader.BodyRoot,
|
||||
}
|
||||
|
||||
syncAggregateResult := ðpbv1.SyncAggregate{
|
||||
SyncCommitteeBits: syncAggregate.SyncCommitteeBits,
|
||||
SyncCommitteeSignature: syncAggregate.SyncCommitteeSignature,
|
||||
}
|
||||
|
||||
result := ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: attestedHeaderResult,
|
||||
SyncAggregate: syncAggregateResult,
|
||||
SignatureSlot: block.Block().Slot(),
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func NewLightClientFinalityUpdateFromBeaconState(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
attestedState state.BeaconState,
|
||||
finalizedBlock interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.LightClientUpdate, error) {
|
||||
result, err := NewLightClientOptimisticUpdateFromBeaconState(
|
||||
ctx,
|
||||
state,
|
||||
block,
|
||||
attestedState,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Indicate finality whenever possible
|
||||
var finalizedHeader *ethpbv1.BeaconBlockHeader
|
||||
var finalityBranch [][]byte
|
||||
|
||||
if finalizedBlock != nil && !finalizedBlock.IsNil() {
|
||||
if finalizedBlock.Block().Slot() != 0 {
|
||||
tempFinalizedHeader, err := finalizedBlock.Header()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get finalized header %v", err)
|
||||
}
|
||||
finalizedHeader = migration.V1Alpha1SignedHeaderToV1(tempFinalizedHeader).GetMessage()
|
||||
|
||||
finalizedHeaderRoot, err := finalizedHeader.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get finalized header root %v", err)
|
||||
}
|
||||
|
||||
if finalizedHeaderRoot != bytesutil.ToBytes32(attestedState.FinalizedCheckpoint().Root) {
|
||||
return nil, fmt.Errorf("finalized header root %#x not equal to attested finalized checkpoint root %#x", finalizedHeaderRoot, bytesutil.ToBytes32(attestedState.FinalizedCheckpoint().Root))
|
||||
}
|
||||
} else {
|
||||
if !bytes.Equal(attestedState.FinalizedCheckpoint().Root, make([]byte, 32)) {
|
||||
return nil, fmt.Errorf("invalid finalized header root %v", attestedState.FinalizedCheckpoint().Root)
|
||||
}
|
||||
|
||||
finalizedHeader = ðpbv1.BeaconBlockHeader{
|
||||
Slot: 0,
|
||||
ProposerIndex: 0,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
}
|
||||
}
|
||||
|
||||
var bErr error
|
||||
finalityBranch, bErr = attestedState.FinalizedRootProof(ctx)
|
||||
if bErr != nil {
|
||||
return nil, fmt.Errorf("could not get finalized root proof %v", bErr)
|
||||
}
|
||||
} else {
|
||||
finalizedHeader = ðpbv1.BeaconBlockHeader{
|
||||
Slot: 0,
|
||||
ProposerIndex: 0,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
}
|
||||
|
||||
finalityBranch = make([][]byte, finalityBranchNumOfLeaves)
|
||||
for i := 0; i < finalityBranchNumOfLeaves; i++ {
|
||||
finalityBranch[i] = make([]byte, 32)
|
||||
}
|
||||
}
|
||||
|
||||
result.FinalizedHeader = finalizedHeader
|
||||
result.FinalityBranch = finalityBranch
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func NewLightClientUpdateFromFinalityUpdate(update *ethpbv2.LightClientFinalityUpdate) *ethpbv2.LightClientUpdate {
|
||||
return ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: update.AttestedHeader,
|
||||
FinalizedHeader: update.FinalizedHeader,
|
||||
FinalityBranch: update.FinalityBranch,
|
||||
SyncAggregate: update.SyncAggregate,
|
||||
SignatureSlot: update.SignatureSlot,
|
||||
}
|
||||
}
|
||||
|
||||
func NewLightClientUpdateFromOptimisticUpdate(update *ethpbv2.LightClientOptimisticUpdate) *ethpbv2.LightClientUpdate {
|
||||
return ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: update.AttestedHeader,
|
||||
SyncAggregate: update.SyncAggregate,
|
||||
SignatureSlot: update.SignatureSlot,
|
||||
}
|
||||
}
|
||||
160
beacon-chain/blockchain/lightclient_test.go
Normal file
160
beacon-chain/blockchain/lightclient_test.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
)
|
||||
|
||||
type testlc struct {
|
||||
t *testing.T
|
||||
ctx context.Context
|
||||
state state.BeaconState
|
||||
block interfaces.ReadOnlySignedBeaconBlock
|
||||
attestedState state.BeaconState
|
||||
attestedHeader *ethpb.BeaconBlockHeader
|
||||
}
|
||||
|
||||
func newTestLc(t *testing.T) *testlc {
|
||||
return &testlc{t: t}
|
||||
}
|
||||
|
||||
func (l *testlc) setupTest() *testlc {
|
||||
ctx := context.Background()
|
||||
|
||||
slot := primitives.Slot(params.BeaconConfig().AltairForkEpoch * primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)).Add(1)
|
||||
|
||||
attestedState, err := util.NewBeaconStateCapella()
|
||||
require.NoError(l.t, err)
|
||||
err = attestedState.SetSlot(slot)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
parent := util.NewBeaconBlockCapella()
|
||||
parent.Block.Slot = slot
|
||||
|
||||
signedParent, err := blocks.NewSignedBeaconBlock(parent)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
parentHeader, err := signedParent.Header()
|
||||
require.NoError(l.t, err)
|
||||
attestedHeader := parentHeader.Header
|
||||
|
||||
err = attestedState.SetLatestBlockHeader(attestedHeader)
|
||||
require.NoError(l.t, err)
|
||||
attestedStateRoot, err := attestedState.HashTreeRoot(ctx)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
// get a new signed block so the root is updated with the new state root
|
||||
parent.Block.StateRoot = attestedStateRoot[:]
|
||||
signedParent, err = blocks.NewSignedBeaconBlock(parent)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
state, err := util.NewBeaconStateCapella()
|
||||
require.NoError(l.t, err)
|
||||
err = state.SetSlot(slot)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
parentRoot, err := signedParent.Block().HashTreeRoot()
|
||||
require.NoError(l.t, err)
|
||||
|
||||
block := util.NewBeaconBlockCapella()
|
||||
block.Block.Slot = slot
|
||||
block.Block.ParentRoot = parentRoot[:]
|
||||
|
||||
for i := uint64(0); i < params.BeaconConfig().MinSyncCommitteeParticipants; i++ {
|
||||
block.Block.Body.SyncAggregate.SyncCommitteeBits.SetBitAt(i, true)
|
||||
}
|
||||
|
||||
signedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
h, err := signedBlock.Header()
|
||||
require.NoError(l.t, err)
|
||||
|
||||
err = state.SetLatestBlockHeader(h.Header)
|
||||
require.NoError(l.t, err)
|
||||
stateRoot, err := state.HashTreeRoot(ctx)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
// get a new signed block so the root is updated with the new state root
|
||||
block.Block.StateRoot = stateRoot[:]
|
||||
signedBlock, err = blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
l.state = state
|
||||
l.attestedState = attestedState
|
||||
l.attestedHeader = attestedHeader
|
||||
l.block = signedBlock
|
||||
l.ctx = ctx
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *testlc) checkAttestedHeader(update *ethpbv2.LightClientUpdate) {
|
||||
require.Equal(l.t, l.attestedHeader.Slot, update.AttestedHeader.Slot, "Attested header slot is not equal")
|
||||
require.Equal(l.t, l.attestedHeader.ProposerIndex, update.AttestedHeader.ProposerIndex, "Attested header proposer index is not equal")
|
||||
require.DeepSSZEqual(l.t, l.attestedHeader.ParentRoot, update.AttestedHeader.ParentRoot, "Attested header parent root is not equal")
|
||||
require.DeepSSZEqual(l.t, l.attestedHeader.BodyRoot, update.AttestedHeader.BodyRoot, "Attested header body root is not equal")
|
||||
|
||||
attestedStateRoot, err := l.attestedState.HashTreeRoot(l.ctx)
|
||||
require.NoError(l.t, err)
|
||||
require.DeepSSZEqual(l.t, attestedStateRoot[:], update.AttestedHeader.StateRoot, "Attested header state root is not equal")
|
||||
}
|
||||
|
||||
func (l *testlc) checkSyncAggregate(update *ethpbv2.LightClientUpdate) {
|
||||
syncAggregate, err := l.block.Block().Body().SyncAggregate()
|
||||
require.NoError(l.t, err)
|
||||
require.DeepSSZEqual(l.t, syncAggregate.SyncCommitteeBits, update.SyncAggregate.SyncCommitteeBits, "SyncAggregate bits is not equal")
|
||||
require.DeepSSZEqual(l.t, syncAggregate.SyncCommitteeSignature, update.SyncAggregate.SyncCommitteeSignature, "SyncAggregate signature is not equal")
|
||||
}
|
||||
|
||||
func TestLightClient_NewLightClientOptimisticUpdateFromBeaconState(t *testing.T) {
|
||||
l := newTestLc(t).setupTest()
|
||||
|
||||
update, err := NewLightClientOptimisticUpdateFromBeaconState(l.ctx, l.state, l.block, l.attestedState)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
require.Equal(t, l.block.Block().Slot(), update.SignatureSlot, "Signature slot is not equal")
|
||||
|
||||
l.checkSyncAggregate(update)
|
||||
l.checkAttestedHeader(update)
|
||||
|
||||
require.Equal(t, (*v1.BeaconBlockHeader)(nil), update.FinalizedHeader, "Finalized header is not nil")
|
||||
require.DeepSSZEqual(t, ([][]byte)(nil), update.FinalityBranch, "Finality branch is not nil")
|
||||
}
|
||||
|
||||
func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
|
||||
l := newTestLc(t).setupTest()
|
||||
|
||||
update, err := NewLightClientFinalityUpdateFromBeaconState(l.ctx, l.state, l.block, l.attestedState, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
require.Equal(t, l.block.Block().Slot(), update.SignatureSlot, "Signature slot is not equal")
|
||||
|
||||
l.checkSyncAggregate(update)
|
||||
l.checkAttestedHeader(update)
|
||||
|
||||
zeroHash := params.BeaconConfig().ZeroHash[:]
|
||||
require.NotNil(t, update.FinalizedHeader, "Finalized header is nil")
|
||||
require.Equal(t, primitives.Slot(0), update.FinalizedHeader.Slot, "Finalized header slot is not zero")
|
||||
require.Equal(t, primitives.ValidatorIndex(0), update.FinalizedHeader.ProposerIndex, "Finalized header proposer index is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.ParentRoot, "Finalized header parent root is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.StateRoot, "Finalized header state root is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.BodyRoot, "Finalized header body root is not zero")
|
||||
require.Equal(t, finalityBranchNumOfLeaves, len(update.FinalityBranch), "Invalid finality branch leaves")
|
||||
for _, leaf := range update.FinalityBranch {
|
||||
require.DeepSSZEqual(t, zeroHash, leaf, "Leaf is not zero")
|
||||
}
|
||||
}
|
||||
@@ -66,7 +66,7 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get blob KZG commitments")
|
||||
} else if len(kzgs) > 0 {
|
||||
log = log.WithField("blobCommitmentCount", len(kzgs))
|
||||
log = log.WithField("kzgCommitmentCount", len(kzgs))
|
||||
}
|
||||
}
|
||||
log.Info("Finished applying state transition")
|
||||
@@ -147,3 +147,15 @@ func logPayload(block interfaces.ReadOnlyBeaconBlock) error {
|
||||
log.WithFields(fields).Debug("Synced new payload")
|
||||
return nil
|
||||
}
|
||||
|
||||
func logBlobSidecar(scs []*ethpb.BlobSidecar, startTime time.Time) {
|
||||
if len(scs) == 0 {
|
||||
return
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"count": len(scs),
|
||||
"slot": scs[0].Slot,
|
||||
"block": hex.EncodeToString(scs[0].BlockRoot),
|
||||
"validationTime": time.Since(startTime),
|
||||
}).Debug("Synced new blob sidecars")
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ func TestReportEpochMetrics_BadHeadState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, h.SetValidators(nil))
|
||||
err = reportEpochMetrics(context.Background(), s, h)
|
||||
require.ErrorContains(t, "failed to initialize precompute: nil validators in state", err)
|
||||
require.ErrorContains(t, "failed to initialize precompute: state has nil validator slice", err)
|
||||
}
|
||||
|
||||
func TestReportEpochMetrics_BadAttestation(t *testing.T) {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
|
||||
@@ -178,3 +179,10 @@ func WithSyncComplete(c chan struct{}) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithAvailabilityStore(vs das.AvailabilityStore) Option {
|
||||
return func(s *Service) error {
|
||||
s.avs = vs
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,10 +13,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
@@ -265,7 +265,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := s.databaseDACheck(ctx, b); err != nil {
|
||||
if err := s.avs.VerifyAvailability(ctx, s.CurrentSlot(), b); err != nil {
|
||||
return errors.Wrap(err, "could not validate blob data availability")
|
||||
}
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b.Block(),
|
||||
@@ -333,33 +333,6 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
|
||||
}
|
||||
|
||||
func commitmentsToCheck(b consensusblocks.ROBlock, current primitives.Slot) [][]byte {
|
||||
if b.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(current)) {
|
||||
return nil
|
||||
}
|
||||
kzgCommitments, err := b.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return kzgCommitments
|
||||
}
|
||||
|
||||
func (s *Service) databaseDACheck(ctx context.Context, b consensusblocks.ROBlock) error {
|
||||
commitments := commitmentsToCheck(b, s.CurrentSlot())
|
||||
if len(commitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
sidecars, err := s.cfg.BeaconDB.BlobSidecarsByRoot(ctx, b.Root())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get blob sidecars")
|
||||
}
|
||||
return kzg.IsDataAvailable(commitments, sidecars)
|
||||
}
|
||||
|
||||
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
|
||||
e := coreTime.CurrentEpoch(st)
|
||||
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
|
||||
@@ -533,6 +506,8 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
t := time.Now()
|
||||
|
||||
block := signed.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
@@ -550,52 +525,52 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get KZG commitments")
|
||||
}
|
||||
existingBlobs := len(kzgCommitments)
|
||||
if existingBlobs == 0 {
|
||||
expected := len(kzgCommitments)
|
||||
if expected == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read first from db in case we have the blobs
|
||||
s.blobNotifier.Lock()
|
||||
var nc *blobNotifierChan
|
||||
var ok bool
|
||||
nc, ok = s.blobNotifier.chanForRoot[root]
|
||||
sidecars, err := s.cfg.BeaconDB.BlobSidecarsByRoot(ctx, root)
|
||||
if err == nil {
|
||||
if len(sidecars) >= existingBlobs {
|
||||
delete(s.blobNotifier.chanForRoot, root)
|
||||
s.blobNotifier.Unlock()
|
||||
return kzg.IsDataAvailable(kzgCommitments, sidecars)
|
||||
switch {
|
||||
case err == nil:
|
||||
if len(sidecars) >= expected {
|
||||
s.blobNotifiers.delete(root)
|
||||
if err := kzg.IsDataAvailable(kzgCommitments, sidecars); err != nil {
|
||||
return err
|
||||
}
|
||||
logBlobSidecar(sidecars, t)
|
||||
return nil
|
||||
}
|
||||
case errors.Is(err, db.ErrNotFound):
|
||||
// If the blob sidecars haven't arrived yet, the subsequent code will wait for them.
|
||||
// Note: The system will not exit with an error in this scenario.
|
||||
default:
|
||||
log.WithError(err).Error("could not get blob sidecars from DB")
|
||||
}
|
||||
// Create the channel if it didn't exist already the index map will be
|
||||
// created later anyway
|
||||
if !ok {
|
||||
nc = &blobNotifierChan{channel: make(chan struct{}, fieldparams.MaxBlobsPerBlock)}
|
||||
s.blobNotifier.chanForRoot[root] = nc
|
||||
|
||||
found := map[uint64]struct{}{}
|
||||
for _, sc := range sidecars {
|
||||
found[sc.Index] = struct{}{}
|
||||
}
|
||||
// We have more commitments in the block than blobs in database
|
||||
// We sync the channel indices with the sidecars
|
||||
nc.indices = make(map[uint64]struct{})
|
||||
for _, sidecar := range sidecars {
|
||||
nc.indices[sidecar.Index] = struct{}{}
|
||||
}
|
||||
s.blobNotifier.Unlock()
|
||||
channelWrites := len(sidecars)
|
||||
nc := s.blobNotifiers.forRoot(root)
|
||||
for {
|
||||
select {
|
||||
case <-nc.channel:
|
||||
channelWrites++
|
||||
if channelWrites == existingBlobs {
|
||||
s.blobNotifier.Lock()
|
||||
delete(s.blobNotifier.chanForRoot, root)
|
||||
s.blobNotifier.Unlock()
|
||||
sidecars, err := s.cfg.BeaconDB.BlobSidecarsByRoot(ctx, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get blob sidecars")
|
||||
}
|
||||
return kzg.IsDataAvailable(kzgCommitments, sidecars)
|
||||
case idx := <-nc:
|
||||
found[idx] = struct{}{}
|
||||
if len(found) != expected {
|
||||
continue
|
||||
}
|
||||
s.blobNotifiers.delete(root)
|
||||
sidecars, err := s.cfg.BeaconDB.BlobSidecarsByRoot(ctx, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get blob sidecars")
|
||||
}
|
||||
if err := kzg.IsDataAvailable(kzgCommitments, sidecars); err != nil {
|
||||
return err
|
||||
}
|
||||
logBlobSidecar(sidecars, t)
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return errors.Wrap(ctx.Err(), "context deadline waiting for blob sidecars")
|
||||
}
|
||||
@@ -647,11 +622,13 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
return
|
||||
}
|
||||
s.headLock.RUnlock()
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, ¬ifyForkchoiceUpdateArg{
|
||||
headState: headState,
|
||||
headRoot: headRoot,
|
||||
headBlock: headBlock.Block(),
|
||||
})
|
||||
s.cfg.ForkChoiceStore.RUnlock()
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
}
|
||||
|
||||
@@ -1,22 +1,23 @@
|
||||
package blockchain
|
||||
|
||||
import fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
import (
|
||||
"context"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// SendNewBlobEvent sends a message to the BlobNotifier channel that the blob
|
||||
// for the blocroot `root` is ready in the database
|
||||
func (s *Service) SendNewBlobEvent(root [32]byte, index uint64) {
|
||||
s.blobNotifier.Lock()
|
||||
nc, ok := s.blobNotifier.chanForRoot[root]
|
||||
if !ok {
|
||||
nc = &blobNotifierChan{indices: make(map[uint64]struct{}), channel: make(chan struct{}, fieldparams.MaxBlobsPerBlock)}
|
||||
s.blobNotifier.chanForRoot[root] = nc
|
||||
}
|
||||
_, ok = nc.indices[index]
|
||||
if ok {
|
||||
s.blobNotifier.Unlock()
|
||||
return
|
||||
}
|
||||
nc.indices[index] = struct{}{}
|
||||
s.blobNotifier.Unlock()
|
||||
nc.channel <- struct{}{}
|
||||
func (s *Service) sendNewBlobEvent(root [32]byte, index uint64) {
|
||||
s.blobNotifiers.forRoot(root) <- index
|
||||
}
|
||||
|
||||
// ReceiveBlob saves the blob to database and sends the new event
|
||||
func (s *Service) ReceiveBlob(ctx context.Context, b *ethpb.BlobSidecar) error {
|
||||
if err := s.cfg.BeaconDB.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{b}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.sendNewBlobEvent([32]byte(b.BlockRoot), b.Index)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package blockchain
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
@@ -21,7 +22,6 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
"golang.org/x/sync/errgroup"
|
||||
@@ -36,12 +36,13 @@ type BlockReceiver interface {
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock) error
|
||||
HasBlock(ctx context.Context, root [32]byte) bool
|
||||
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
|
||||
BlockBeingSynced([32]byte) bool
|
||||
}
|
||||
|
||||
// BlobReceiver interface defines the methods of chain service for receiving new
|
||||
// blobs
|
||||
type BlobReceiver interface {
|
||||
SendNewBlobEvent([32]byte, uint64)
|
||||
ReceiveBlob(context.Context, *ethpb.BlobSidecar) error
|
||||
}
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
@@ -58,6 +59,9 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlock")
|
||||
defer span.End()
|
||||
receivedTime := time.Now()
|
||||
s.blockBeingSynced.set(blockRoot)
|
||||
defer s.blockBeingSynced.unset(blockRoot)
|
||||
|
||||
blockCopy, err := block.Copy()
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -23,9 +23,10 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
genesis, keys := util.DeterministicGenesisState(t, 64)
|
||||
copiedGen := genesis.Copy()
|
||||
genFullBlock := func(t *testing.T, conf *util.BlockGenConfig, slot primitives.Slot) *ethpb.SignedBeaconBlock {
|
||||
blk, err := util.GenerateFullBlock(genesis, keys, conf, slot)
|
||||
assert.NoError(t, err)
|
||||
blk, err := util.GenerateFullBlock(copiedGen.Copy(), keys, conf, slot)
|
||||
require.NoError(t, err)
|
||||
return blk
|
||||
}
|
||||
//params.SetupTestConfigCleanupWithLock(t)
|
||||
@@ -108,6 +109,9 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
block: genFullBlock(t, util.DefaultBlockGenConfig(), 1 /*slot*/),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
// Hacky sleep, should use a better way to be able to resolve the race
|
||||
// between event being sent out and processed.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
||||
}
|
||||
@@ -119,6 +123,10 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
wg.Add(1)
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
defer func() {
|
||||
wg.Done()
|
||||
}()
|
||||
genesis = genesis.Copy()
|
||||
s, tr := minimalTestService(t,
|
||||
WithFinalizedStateAtStartUp(genesis),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
@@ -139,10 +147,9 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
tt.check(t, s)
|
||||
}
|
||||
wg.Done()
|
||||
})
|
||||
}
|
||||
wg.Wait()
|
||||
@@ -173,6 +180,7 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
||||
}
|
||||
@@ -215,6 +223,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
block: genFullBlock(t, util.DefaultBlockGenConfig(), 1 /*slot*/),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
f "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
|
||||
@@ -32,6 +33,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
@@ -60,7 +62,9 @@ type Service struct {
|
||||
clockSetter startup.ClockSetter
|
||||
clockWaiter startup.ClockWaiter
|
||||
syncComplete chan struct{}
|
||||
blobNotifier *blobNotifier
|
||||
blobNotifiers *blobNotifierMap
|
||||
blockBeingSynced *currentlySyncingBlock
|
||||
avs das.AvailabilityStore
|
||||
}
|
||||
|
||||
// config options for the service.
|
||||
@@ -89,14 +93,26 @@ type config struct {
|
||||
|
||||
var ErrMissingClockSetter = errors.New("blockchain Service initialized without a startup.ClockSetter")
|
||||
|
||||
type blobNotifierChan struct {
|
||||
indices map[uint64]struct{}
|
||||
channel chan struct{}
|
||||
type blobNotifierMap struct {
|
||||
sync.RWMutex
|
||||
notifiers map[[32]byte]chan uint64
|
||||
}
|
||||
|
||||
type blobNotifier struct {
|
||||
sync.RWMutex
|
||||
chanForRoot map[[32]byte]*blobNotifierChan
|
||||
func (bn *blobNotifierMap) forRoot(root [32]byte) chan uint64 {
|
||||
bn.Lock()
|
||||
defer bn.Unlock()
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, fieldparams.MaxBlobsPerBlock)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func (bn *blobNotifierMap) delete(root [32]byte) {
|
||||
bn.Lock()
|
||||
defer bn.Unlock()
|
||||
delete(bn.notifiers, root)
|
||||
}
|
||||
|
||||
// NewService instantiates a new block service instance that will
|
||||
@@ -110,8 +126,8 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
}
|
||||
}
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
bn := &blobNotifier{
|
||||
chanForRoot: make(map[[32]byte]*blobNotifierChan),
|
||||
bn := &blobNotifierMap{
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
}
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
@@ -119,8 +135,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
boundaryRoots: [][32]byte{},
|
||||
checkpointStateCache: cache.NewCheckpointStateCache(),
|
||||
initSyncBlocks: make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock),
|
||||
blobNotifier: bn,
|
||||
blobNotifiers: bn,
|
||||
cfg: &config{ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache()},
|
||||
blockBeingSynced: ¤tlySyncingBlock{roots: make(map[[32]byte]struct{})},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if err := opt(srv); err != nil {
|
||||
@@ -367,7 +384,7 @@ func (s *Service) startFromExecutionChain() error {
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("Subscription to state notifier failed")
|
||||
log.WithError(err).Error("Subscription to state forRoot failed")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
|
||||
@@ -78,6 +79,7 @@ type testServiceRequirements struct {
|
||||
func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceRequirements) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
avs := das.NewCachingDBVerifiedStore(beaconDB)
|
||||
fcs := doublylinkedtree.New()
|
||||
sg := stategen.New(beaconDB, fcs)
|
||||
notif := &mockBeaconNode{}
|
||||
@@ -110,6 +112,7 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
|
||||
WithAttestationService(req.attSrv),
|
||||
WithBLSToExecPool(req.blsPool),
|
||||
WithDepositCache(dc),
|
||||
WithAvailabilityStore(avs),
|
||||
}
|
||||
// append the variadic opts so they override the defaults by being processed afterwards
|
||||
opts = append(defOpts, opts...)
|
||||
|
||||
@@ -71,6 +71,7 @@ type ChainService struct {
|
||||
FinalizedRoots map[[32]byte]bool
|
||||
OptimisticRoots map[[32]byte]bool
|
||||
BlockSlot primitives.Slot
|
||||
SyncingRoot [32]byte
|
||||
}
|
||||
|
||||
func (s *ChainService) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
|
||||
@@ -605,5 +606,12 @@ func (s *ChainService) UnrealizedJustifiedPayloadBlockHash() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// SendNewBlobEvent mocks the same method in the chain service
|
||||
func (s *ChainService) SendNewBlobEvent(_ [32]byte, _ uint64) {}
|
||||
// BlockBeingSynced mocks the same method in the chain service
|
||||
func (c *ChainService) BlockBeingSynced(root [32]byte) bool {
|
||||
return root == c.SyncingRoot
|
||||
}
|
||||
|
||||
// ReceiveBlob implements the same method in the chain service
|
||||
func (*ChainService) ReceiveBlob(_ context.Context, _ *ethpb.BlobSidecar) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@ func Test_BaseReward(t *testing.T) {
|
||||
valIdx: 2,
|
||||
st: genState(1),
|
||||
want: 0,
|
||||
errString: "index 2 out of range",
|
||||
errString: "validator index 2 does not exist",
|
||||
},
|
||||
{
|
||||
name: "active balance is 32eth",
|
||||
@@ -89,7 +89,7 @@ func Test_BaseRewardWithTotalBalance(t *testing.T) {
|
||||
valIdx: 2,
|
||||
activeBalance: 1,
|
||||
want: 0,
|
||||
errString: "index 2 out of range",
|
||||
errString: "validator index 2 does not exist",
|
||||
},
|
||||
{
|
||||
name: "active balance is 1",
|
||||
|
||||
@@ -42,7 +42,6 @@ go_library(
|
||||
"//math:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/slashings:go_default_library",
|
||||
@@ -100,7 +99,6 @@ go_test(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation/aggregation:go_default_library",
|
||||
|
||||
@@ -51,6 +51,10 @@ func ProcessVoluntaryExits(
|
||||
beaconState state.BeaconState,
|
||||
exits []*ethpb.SignedVoluntaryExit,
|
||||
) (state.BeaconState, error) {
|
||||
// Avoid calculating the epoch churn if no exits exist.
|
||||
if len(exits) == 0 {
|
||||
return beaconState, nil
|
||||
}
|
||||
maxExitEpoch, churn := v.ValidatorsMaxExitEpochAndChurn(beaconState)
|
||||
var exitEpoch primitives.Epoch
|
||||
for idx, exit := range exits {
|
||||
|
||||
@@ -82,7 +82,7 @@ func ProcessRandaoNoVerify(
|
||||
for i, x := range blockRandaoReveal {
|
||||
latestMixSlice[i] ^= x
|
||||
}
|
||||
if err := beaconState.UpdateRandaoMixesAtIndex(uint64(currentEpoch%latestMixesLength), latestMixSlice); err != nil {
|
||||
if err := beaconState.UpdateRandaoMixesAtIndex(uint64(currentEpoch%latestMixesLength), [32]byte(latestMixSlice)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return beaconState, nil
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/ssz"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
)
|
||||
@@ -251,7 +250,7 @@ func BLSChangesSignatureBatch(
|
||||
// is from a previous fork.
|
||||
func VerifyBLSChangeSignature(
|
||||
st state.ReadOnlyBeaconState,
|
||||
change *ethpbv2.SignedBLSToExecutionChange,
|
||||
change *ethpb.SignedBLSToExecutionChange,
|
||||
) error {
|
||||
c := params.BeaconConfig()
|
||||
domain, err := signing.ComputeDomain(c.DomainBLSToExecutionChange, c.GenesisForkVersion, st.GenesisValidatorsRoot())
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/ssz"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
@@ -152,7 +151,7 @@ func TestProcessBLSToExecutionChange(t *testing.T) {
|
||||
}
|
||||
|
||||
_, err = blocks.ProcessBLSToExecutionChange(st, signed)
|
||||
require.ErrorContains(t, "out of range", err)
|
||||
require.ErrorContains(t, "out of bounds", err)
|
||||
})
|
||||
|
||||
t.Run("signature does not verify", func(t *testing.T) {
|
||||
@@ -1209,8 +1208,7 @@ func TestBLSChangesSignatureBatch(t *testing.T) {
|
||||
require.Equal(t, true, verify)
|
||||
|
||||
// Verify a single change
|
||||
change := migration.V1Alpha1SignedBLSToExecChangeToV2(signedChanges[0])
|
||||
require.NoError(t, blocks.VerifyBLSChangeSignature(st, change))
|
||||
require.NoError(t, blocks.VerifyBLSChangeSignature(st, signedChanges[0]))
|
||||
}
|
||||
|
||||
func TestBLSChangesSignatureBatchWrongFork(t *testing.T) {
|
||||
@@ -1274,8 +1272,7 @@ func TestBLSChangesSignatureBatchWrongFork(t *testing.T) {
|
||||
require.Equal(t, false, verify)
|
||||
|
||||
// Verify a single change
|
||||
change := migration.V1Alpha1SignedBLSToExecChangeToV2(signedChanges[0])
|
||||
require.ErrorIs(t, signing.ErrSigFailedToVerify, blocks.VerifyBLSChangeSignature(st, change))
|
||||
require.ErrorIs(t, signing.ErrSigFailedToVerify, blocks.VerifyBLSChangeSignature(st, signedChanges[0]))
|
||||
}
|
||||
|
||||
func TestBLSChangesSignatureBatchFromBellatrix(t *testing.T) {
|
||||
@@ -1362,7 +1359,6 @@ func TestBLSChangesSignatureBatchFromBellatrix(t *testing.T) {
|
||||
require.Equal(t, true, verify)
|
||||
|
||||
// Verify a single change
|
||||
change := migration.V1Alpha1SignedBLSToExecChangeToV2(signedChanges[0])
|
||||
require.NoError(t, blocks.VerifyBLSChangeSignature(st, change))
|
||||
require.NoError(t, blocks.VerifyBLSChangeSignature(st, signedChanges[0]))
|
||||
params.OverrideBeaconConfig(savedConfig)
|
||||
}
|
||||
|
||||
@@ -137,9 +137,10 @@ func ProcessRegistryUpdates(ctx context.Context, state state.BeaconState) (state
|
||||
return nil, errors.Wrap(err, "could not get active validator count")
|
||||
}
|
||||
|
||||
churnLimit, err := helpers.ValidatorChurnLimit(activeValidatorCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get churn limit")
|
||||
churnLimit := helpers.ValidatorActivationChurnLimit(activeValidatorCount)
|
||||
|
||||
if state.Version() >= version.Deneb {
|
||||
churnLimit = helpers.ValidatorActivationChurnLimitDeneb(activeValidatorCount)
|
||||
}
|
||||
|
||||
// Prevent churn limit cause index out of bound.
|
||||
@@ -348,7 +349,7 @@ func ProcessRandaoMixesReset(state state.BeaconState) (state.BeaconState, error)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := state.UpdateRandaoMixesAtIndex(uint64(nextEpoch%randaoMixLength), mix); err != nil {
|
||||
if err := state.UpdateRandaoMixesAtIndex(uint64(nextEpoch%randaoMixLength), [32]byte(mix)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -311,8 +311,7 @@ func TestProcessRegistryUpdates_EligibleToActivate(t *testing.T) {
|
||||
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 6, Root: make([]byte, fieldparams.RootLength)},
|
||||
}
|
||||
limit, err := helpers.ValidatorChurnLimit(0)
|
||||
require.NoError(t, err)
|
||||
limit := helpers.ValidatorActivationChurnLimit(0)
|
||||
for i := uint64(0); i < limit+10; i++ {
|
||||
base.Validators = append(base.Validators, ðpb.Validator{
|
||||
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
@@ -338,6 +337,42 @@ func TestProcessRegistryUpdates_EligibleToActivate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessRegistryUpdates_EligibleToActivate_Cancun(t *testing.T) {
|
||||
base := ðpb.BeaconStateDeneb{
|
||||
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 6, Root: make([]byte, fieldparams.RootLength)},
|
||||
}
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.MinPerEpochChurnLimit = 10
|
||||
cfg.ChurnLimitQuotient = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
for i := uint64(0); i < 10; i++ {
|
||||
base.Validators = append(base.Validators, ðpb.Validator{
|
||||
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ActivationEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
})
|
||||
}
|
||||
beaconState, err := state_native.InitializeFromProtoDeneb(base)
|
||||
require.NoError(t, err)
|
||||
currentEpoch := time.CurrentEpoch(beaconState)
|
||||
newState, err := epoch.ProcessRegistryUpdates(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
for i, validator := range newState.Validators() {
|
||||
assert.Equal(t, currentEpoch+1, validator.ActivationEligibilityEpoch, "Could not update registry %d, unexpected activation eligibility epoch", i)
|
||||
// Note: In Deneb, only validators indices before `MaxPerEpochActivationChurnLimit` should be activated.
|
||||
if uint64(i) < params.BeaconConfig().MaxPerEpochActivationChurnLimit && validator.ActivationEpoch != helpers.ActivationExitEpoch(currentEpoch) {
|
||||
t.Errorf("Could not update registry %d, validators failed to activate: wanted activation epoch %d, got %d",
|
||||
i, helpers.ActivationExitEpoch(currentEpoch), validator.ActivationEpoch)
|
||||
}
|
||||
if uint64(i) >= params.BeaconConfig().MaxPerEpochActivationChurnLimit && validator.ActivationEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
t.Errorf("Could not update registry %d, validators should not have been activated, wanted activation epoch: %d, got %d",
|
||||
i, params.BeaconConfig().FarFutureEpoch, validator.ActivationEpoch)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessRegistryUpdates_ActivationCompletes(t *testing.T) {
|
||||
base := ðpb.BeaconState{
|
||||
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
|
||||
@@ -22,6 +22,9 @@ const (
|
||||
|
||||
// BLSToExecutionChangeReceived is sent after a BLS to execution change object has been received from gossip or rpc.
|
||||
BLSToExecutionChangeReceived
|
||||
|
||||
// BlobSidecarReceived is sent after a blob sidecar is received from gossip or rpc.
|
||||
BlobSidecarReceived = 6
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -52,3 +55,8 @@ type SyncCommitteeContributionReceivedData struct {
|
||||
type BLSToExecutionChangeReceivedData struct {
|
||||
Change *ethpb.SignedBLSToExecutionChange
|
||||
}
|
||||
|
||||
// BlobSidecarReceivedData is the data sent with BlobSidecarReceived events.
|
||||
type BlobSidecarReceivedData struct {
|
||||
Blob *ethpb.SignedBlobSidecar
|
||||
}
|
||||
|
||||
@@ -99,7 +99,7 @@ func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := IsCurrentPeriodSyncCommittee(state, 12390192)
|
||||
require.ErrorContains(t, "index 12390192 out of range", err)
|
||||
require.ErrorContains(t, "validator index 12390192 does not exist", err)
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
|
||||
@@ -182,7 +182,7 @@ func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := IsNextPeriodSyncCommittee(state, 120391029)
|
||||
require.ErrorContains(t, "index 120391029 out of range", err)
|
||||
require.ErrorContains(t, "validator index 120391029 does not exist", err)
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
|
||||
@@ -282,7 +282,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 129301923)
|
||||
require.ErrorContains(t, "index 129301923 out of range", err)
|
||||
require.ErrorContains(t, "validator index 129301923 does not exist", err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
|
||||
}
|
||||
|
||||
@@ -367,7 +367,7 @@ func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
index, err := NextPeriodSyncSubcommitteeIndices(state, 21093019)
|
||||
require.ErrorContains(t, "index 21093019 out of range", err)
|
||||
require.ErrorContains(t, "validator index 21093019 does not exist", err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
|
||||
}
|
||||
|
||||
|
||||
@@ -206,10 +206,7 @@ func ActivationExitEpoch(epoch primitives.Epoch) primitives.Epoch {
|
||||
return epoch + 1 + params.BeaconConfig().MaxSeedLookahead
|
||||
}
|
||||
|
||||
// ValidatorChurnLimit returns the number of validators that are allowed to
|
||||
// enter and exit validator pool for an epoch.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// calculateChurnLimit based on the formula in the spec.
|
||||
//
|
||||
// def get_validator_churn_limit(state: BeaconState) -> uint64:
|
||||
// """
|
||||
@@ -217,12 +214,32 @@ func ActivationExitEpoch(epoch primitives.Epoch) primitives.Epoch {
|
||||
// """
|
||||
// active_validator_indices = get_active_validator_indices(state, get_current_epoch(state))
|
||||
// return max(MIN_PER_EPOCH_CHURN_LIMIT, uint64(len(active_validator_indices)) // CHURN_LIMIT_QUOTIENT)
|
||||
func ValidatorChurnLimit(activeValidatorCount uint64) (uint64, error) {
|
||||
func calculateChurnLimit(activeValidatorCount uint64) uint64 {
|
||||
churnLimit := activeValidatorCount / params.BeaconConfig().ChurnLimitQuotient
|
||||
if churnLimit < params.BeaconConfig().MinPerEpochChurnLimit {
|
||||
churnLimit = params.BeaconConfig().MinPerEpochChurnLimit
|
||||
return params.BeaconConfig().MinPerEpochChurnLimit
|
||||
}
|
||||
return churnLimit, nil
|
||||
return churnLimit
|
||||
}
|
||||
|
||||
// ValidatorActivationChurnLimit returns the maximum number of validators that can be activated in a slot.
|
||||
func ValidatorActivationChurnLimit(activeValidatorCount uint64) uint64 {
|
||||
return calculateChurnLimit(activeValidatorCount)
|
||||
}
|
||||
|
||||
// ValidatorExitChurnLimit returns the maximum number of validators that can be exited in a slot.
|
||||
func ValidatorExitChurnLimit(activeValidatorCount uint64) uint64 {
|
||||
return calculateChurnLimit(activeValidatorCount)
|
||||
}
|
||||
|
||||
// ValidatorActivationChurnLimitDeneb returns the maximum number of validators that can be activated in a slot post Deneb.
|
||||
func ValidatorActivationChurnLimitDeneb(activeValidatorCount uint64) uint64 {
|
||||
limit := calculateChurnLimit(activeValidatorCount)
|
||||
// New in Deneb.
|
||||
if limit > params.BeaconConfig().MaxPerEpochActivationChurnLimit {
|
||||
return params.BeaconConfig().MaxPerEpochActivationChurnLimit
|
||||
}
|
||||
return limit
|
||||
}
|
||||
|
||||
// BeaconProposerIndex returns proposer index of a current slot.
|
||||
|
||||
@@ -367,9 +367,50 @@ func TestChurnLimit_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
|
||||
require.NoError(t, err)
|
||||
resultChurn, err := ValidatorChurnLimit(validatorCount)
|
||||
resultChurn := ValidatorActivationChurnLimit(validatorCount)
|
||||
assert.Equal(t, test.wantedChurn, resultChurn, "ValidatorActivationChurnLimit(%d)", test.validatorCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChurnLimitDeneb_OK(t *testing.T) {
|
||||
tests := []struct {
|
||||
validatorCount int
|
||||
wantedChurn uint64
|
||||
}{
|
||||
{1000, 4},
|
||||
{100000, 4},
|
||||
{1000000, params.BeaconConfig().MaxPerEpochActivationChurnLimit},
|
||||
{2000000, params.BeaconConfig().MaxPerEpochActivationChurnLimit},
|
||||
}
|
||||
|
||||
defer ClearCache()
|
||||
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
|
||||
// Create validators
|
||||
validators := make([]*ethpb.Validator, test.validatorCount)
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize beacon state
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Slot: 1,
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.wantedChurn, resultChurn, "ValidatorChurnLimit(%d)", test.validatorCount)
|
||||
|
||||
// Get active validator count
|
||||
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test churn limit calculation
|
||||
resultChurn := ValidatorActivationChurnLimitDeneb(validatorCount)
|
||||
assert.Equal(t, test.wantedChurn, resultChurn)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -80,10 +80,7 @@ func ComputeWeakSubjectivityPeriod(ctx context.Context, st state.ReadOnlyBeaconS
|
||||
T := cfg.MaxEffectiveBalance / cfg.GweiPerEth
|
||||
|
||||
// Validator churn limit.
|
||||
delta, err := ValidatorChurnLimit(N)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot obtain active validator churn limit: %w", err)
|
||||
}
|
||||
delta := ValidatorExitChurnLimit(N)
|
||||
|
||||
// Balance top-ups.
|
||||
Delta := uint64(cfg.SlotsPerEpoch.Mul(cfg.MaxDeposits))
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stateutil"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/container/trie"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
@@ -221,6 +222,18 @@ func OptimizedGenesisBeaconState(genesisTime uint64, preState state.BeaconState,
|
||||
|
||||
// EmptyGenesisState returns an empty beacon state object.
|
||||
func EmptyGenesisState() (state.BeaconState, error) {
|
||||
blockRoots := make([][]byte, fieldparams.BlockRootsLength)
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
stateRoots := make([][]byte, fieldparams.StateRootsLength)
|
||||
for i := range stateRoots {
|
||||
stateRoots[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
mixes := make([][]byte, fieldparams.RandaoMixesLength)
|
||||
for i := range mixes {
|
||||
mixes[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
st := ðpb.BeaconState{
|
||||
// Misc fields.
|
||||
Slot: 0,
|
||||
@@ -229,6 +242,9 @@ func EmptyGenesisState() (state.BeaconState, error) {
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: 0,
|
||||
},
|
||||
BlockRoots: blockRoots,
|
||||
StateRoots: stateRoots,
|
||||
RandaoMixes: mixes,
|
||||
// Validator registry fields.
|
||||
Validators: []*ethpb.Validator{},
|
||||
Balances: []uint64{},
|
||||
|
||||
@@ -82,10 +82,7 @@ func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primiti
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrap(err, "could not get active validator count")
|
||||
}
|
||||
currentChurn, err := helpers.ValidatorChurnLimit(activeValidatorCount)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrap(err, "could not get churn limit")
|
||||
}
|
||||
currentChurn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
|
||||
|
||||
if churn >= currentChurn {
|
||||
exitQueueEpoch, err = exitQueueEpoch.SafeAdd(1)
|
||||
@@ -235,10 +232,7 @@ func ExitedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validato
|
||||
exitQueueChurn++
|
||||
}
|
||||
}
|
||||
churn, err := helpers.ValidatorChurnLimit(activeValidatorCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get churn limit")
|
||||
}
|
||||
churn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
|
||||
if churn < exitQueueChurn {
|
||||
exitQueueEpoch++
|
||||
}
|
||||
@@ -276,10 +270,7 @@ func EjectedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validat
|
||||
exitQueueChurn++
|
||||
}
|
||||
}
|
||||
churn, err := helpers.ValidatorChurnLimit(activeValidatorCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get churn limit")
|
||||
}
|
||||
churn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
|
||||
if churn < exitQueueChurn {
|
||||
exitQueueEpoch++
|
||||
}
|
||||
|
||||
35
beacon-chain/das/BUILD.bazel
Normal file
35
beacon-chain/das/BUILD.bazel
Normal file
@@ -0,0 +1,35 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["availability.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/das",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//cache/nonblocking:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["availability_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
],
|
||||
)
|
||||
127
beacon-chain/das/availability.go
Normal file
127
beacon-chain/das/availability.go
Normal file
@@ -0,0 +1,127 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v4/cache/nonblocking"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
errCachedCommitmentMismatch = errors.New("previously verified commitments do not match those in block")
|
||||
// concurrentBlockFetchers * blocks per request is used to size the LRU so that we can have one cache
|
||||
// for many worker goroutines without them evicting each others results.
|
||||
// TODO: figure out how to determine the max number of init sync workers.
|
||||
concurrentBlockFetchers = 10
|
||||
)
|
||||
|
||||
// AvailabilityStore describes a component that can verify and save sidecars for a given block, and confirm previously
|
||||
// verified and saved sidecars.
|
||||
type AvailabilityStore interface {
|
||||
VerifyAvailability(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
SaveIfAvailable(ctx context.Context, current primitives.Slot, b blocks.BlockWithVerifiedBlobs) error
|
||||
}
|
||||
|
||||
type CachingDBVerifiedStore struct {
|
||||
db BlobsDB
|
||||
cache *nonblocking.LRU[[32]byte, [][]byte]
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &CachingDBVerifiedStore{}
|
||||
|
||||
func NewCachingDBVerifiedStore(db BlobsDB) *CachingDBVerifiedStore {
|
||||
discardev := func([32]byte, [][]byte) {}
|
||||
size := int(params.BeaconNetworkConfig().MaxRequestBlocks) * concurrentBlockFetchers
|
||||
cache, err := nonblocking.NewLRU[[32]byte, [][]byte](size, discardev)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &CachingDBVerifiedStore{
|
||||
db: db,
|
||||
cache: cache,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *CachingDBVerifiedStore) VerifyAvailability(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
c := commitmentsToCheck(b, current)
|
||||
if len(c) == 0 {
|
||||
return nil
|
||||
}
|
||||
// get the previously verified commitments
|
||||
vc, ok := s.cache.Get(b.Root())
|
||||
if !ok {
|
||||
return s.databaseDACheck(ctx, current, b.Root(), c)
|
||||
}
|
||||
// check commitments to make sure that they match
|
||||
if len(c) != len(vc) {
|
||||
return errors.Wrapf(errCachedCommitmentMismatch, "cache=%d, block=%d", len(vc), len(c))
|
||||
}
|
||||
for i := range vc {
|
||||
// check each commitment in the block against value previously validated and saved.
|
||||
if !bytes.Equal(vc[i], c[i]) {
|
||||
return errors.Wrapf(errCachedCommitmentMismatch, "commitment %#x at index %d does not match cache %#x", c[i], i, vc[i])
|
||||
}
|
||||
}
|
||||
// all commitments match
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *CachingDBVerifiedStore) SaveIfAvailable(ctx context.Context, current primitives.Slot, bwb blocks.BlockWithVerifiedBlobs) error {
|
||||
b := bwb.Block
|
||||
c := commitmentsToCheck(b, current)
|
||||
if len(c) == 0 {
|
||||
return nil
|
||||
}
|
||||
if err := kzg.IsDataAvailable(c, bwb.Blobs); err != nil {
|
||||
return errors.Wrapf(err, "kzg.IsDataAvailable check failed for %#x", b.Root())
|
||||
}
|
||||
if err := s.db.SaveBlobSidecar(ctx, bwb.Blobs); err != nil {
|
||||
return errors.Wrapf(err, "error while trying to save verified blob sidecars for root %#x", b.Root())
|
||||
}
|
||||
// cache the commitments that matched so that we can confirm them cheaply while they remain in cache.
|
||||
s.cache.Add(b.Root(), c)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *CachingDBVerifiedStore) databaseDACheck(ctx context.Context, current primitives.Slot, root [32]byte, cmts [][]byte) error {
|
||||
log.WithField("root", fmt.Sprintf("%#x", root)).Warn("Falling back to database DA check")
|
||||
sidecars, err := s.db.BlobSidecarsByRoot(ctx, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get blob sidecars")
|
||||
}
|
||||
if err := kzg.IsDataAvailable(cmts, sidecars); err != nil {
|
||||
return err
|
||||
}
|
||||
s.cache.Add(root, cmts)
|
||||
return nil
|
||||
}
|
||||
|
||||
type BlobsDB interface {
|
||||
BlobSidecarsByRoot(ctx context.Context, beaconBlockRoot [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error)
|
||||
SaveBlobSidecar(ctx context.Context, sidecars []*ethpb.BlobSidecar) error
|
||||
}
|
||||
|
||||
func commitmentsToCheck(b blocks.ROBlock, current primitives.Slot) [][]byte {
|
||||
if b.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(current)) {
|
||||
return nil
|
||||
}
|
||||
kzgCommitments, err := b.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return kzgCommitments
|
||||
}
|
||||
82
beacon-chain/das/availability_test.go
Normal file
82
beacon-chain/das/availability_test.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
func Test_commitmentsToCheck(t *testing.T) {
|
||||
windowSlots, err := slots.EpochEnd(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
commits := [][]byte{
|
||||
bytesutil.PadTo([]byte("a"), 48),
|
||||
bytesutil.PadTo([]byte("b"), 48),
|
||||
bytesutil.PadTo([]byte("c"), 48),
|
||||
bytesutil.PadTo([]byte("d"), 48),
|
||||
}
|
||||
cases := []struct {
|
||||
name string
|
||||
commits [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "pre deneb",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
bb := util.NewBeaconBlockBellatrix()
|
||||
sb, err := blocks.NewSignedBeaconBlock(bb)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "commitments within da",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
d := util.NewBeaconBlockDeneb()
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
d.Block.Slot = 100
|
||||
sb, err := blocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
commits: commits,
|
||||
slot: 100,
|
||||
},
|
||||
{
|
||||
name: "commitments outside da",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
d := util.NewBeaconBlockDeneb()
|
||||
// block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
sb, err := blocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
slot: windowSlots + 1,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
b := c.block(t)
|
||||
co := commitmentsToCheck(b, c.slot)
|
||||
require.Equal(t, len(c.commits), len(co))
|
||||
for i := 0; i < len(c.commits); i++ {
|
||||
require.Equal(t, true, bytes.Equal(c.commits[i], co[i]))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -13,6 +13,7 @@ go_library(
|
||||
"error.go",
|
||||
"execution_chain.go",
|
||||
"finalized_block_roots.go",
|
||||
"flags.go",
|
||||
"genesis.go",
|
||||
"key.go",
|
||||
"kv.go",
|
||||
@@ -38,6 +39,7 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/genesis:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -65,6 +67,7 @@ go_library(
|
||||
"@com_github_prysmaticlabs_prombbolt//:go_default_library",
|
||||
"@com_github_schollz_progressbar_v3//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@io_etcd_go_bbolt//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
@@ -83,6 +86,7 @@ go_test(
|
||||
"encoding_test.go",
|
||||
"execution_chain_test.go",
|
||||
"finalized_block_roots_test.go",
|
||||
"flags_test.go",
|
||||
"genesis_test.go",
|
||||
"init_test.go",
|
||||
"kv_test.go",
|
||||
@@ -103,6 +107,7 @@ go_test(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/genesis:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -119,6 +124,7 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
|
||||
"@io_etcd_go_bbolt//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
|
||||
@@ -12,11 +12,20 @@ import (
|
||||
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var (
|
||||
errBlobSlotMismatch = errors.New("sidecar slot mismatch")
|
||||
errBlobParentMismatch = errors.New("sidecar parent root mismatch")
|
||||
errBlobRootMismatch = errors.New("sidecar root mismatch")
|
||||
errBlobProposerMismatch = errors.New("sidecar proposer index mismatch")
|
||||
errBlobSidecarLimit = errors.New("sidecar exceeds maximum number of blobs")
|
||||
errEmptySidecar = errors.New("nil or empty blob sidecars")
|
||||
errNewerBlobExists = errors.New("Will not overwrite newer blobs in db")
|
||||
)
|
||||
|
||||
// A blob rotating key is represented as bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
|
||||
type blobRotatingKey []byte
|
||||
|
||||
@@ -45,94 +54,119 @@ func (rk blobRotatingKey) BlockRoot() []byte {
|
||||
//
|
||||
// 3. Begin the save algorithm: If the incoming blob has a slot bigger than the saved slot at the spot
|
||||
// in the rotating keys buffer, we overwrite all elements for that slot. Otherwise, we merge the blob with an existing one.
|
||||
// Trying to replace a newer blob with an older one is an error.
|
||||
func (s *Store) SaveBlobSidecar(ctx context.Context, scs []*ethpb.BlobSidecar) error {
|
||||
if len(scs) == 0 {
|
||||
return errEmptySidecar
|
||||
}
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlobSidecar")
|
||||
defer span.End()
|
||||
|
||||
sortSideCars(scs)
|
||||
if err := s.verifySideCars(scs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
first := scs[0]
|
||||
newKey := blobSidecarKey(first)
|
||||
prefix := newKey.BufferPrefix()
|
||||
var prune []blobRotatingKey
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
encodedBlobSidecar, err := encode(ctx, ðpb.BlobSidecars{Sidecars: scs})
|
||||
var existing []byte
|
||||
sc := ðpb.BlobSidecars{}
|
||||
bkt := tx.Bucket(blobsBucket)
|
||||
c := bkt.Cursor()
|
||||
for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() {
|
||||
key := blobRotatingKey(k)
|
||||
ks := key.Slot()
|
||||
if ks < first.Slot {
|
||||
// Mark older blobs at the same position of the ring buffer for deletion.
|
||||
prune = append(prune, key)
|
||||
continue
|
||||
}
|
||||
if ks > first.Slot {
|
||||
// We shouldn't be overwriting newer blobs with older blobs. Something is wrong.
|
||||
return errNewerBlobExists
|
||||
}
|
||||
// The slot isn't older or newer, so it must be equal.
|
||||
// If the roots match, then we want to merge the new sidecars with the existing data.
|
||||
if bytes.Equal(first.BlockRoot, key.BlockRoot()) {
|
||||
existing = v
|
||||
if err := decode(ctx, v, sc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// If the slot is equal but the roots don't match, leave the existing key alone and allow the sidecar
|
||||
// to be written to the new key with the same prefix. In this case sc will be empty, so it will just
|
||||
// contain the incoming sidecars when we write it.
|
||||
}
|
||||
sc.Sidecars = append(sc.Sidecars, scs...)
|
||||
sortSidecars(sc.Sidecars)
|
||||
var err error
|
||||
sc.Sidecars, err = validUniqueSidecars(sc.Sidecars)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bkt := tx.Bucket(blobsBucket)
|
||||
c := bkt.Cursor()
|
||||
newKey := blobSidecarKey(scs[0])
|
||||
rotatingBufferPrefix := newKey.BufferPrefix()
|
||||
var replacingKey blobRotatingKey
|
||||
for k, _ := c.Seek(rotatingBufferPrefix); bytes.HasPrefix(k, rotatingBufferPrefix); k, _ = c.Next() {
|
||||
if len(k) != 0 {
|
||||
replacingKey = k
|
||||
break
|
||||
encoded, err := encode(ctx, sc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// don't write if the merged result is the same as before
|
||||
if len(existing) == len(encoded) && bytes.Equal(existing, encoded) {
|
||||
return nil
|
||||
}
|
||||
// Only prune if we're actually going through with the update.
|
||||
for _, k := range prune {
|
||||
if err := bkt.Delete(k); err != nil {
|
||||
// note: attempting to delete a key that does not exist should not return an error.
|
||||
log.WithError(err).Warnf("Could not delete blob key %#x.", k)
|
||||
}
|
||||
}
|
||||
// If there is no element stored at blob.slot % MAX_SLOTS_TO_PERSIST_BLOBS, then we simply
|
||||
// store the blob by key and exit early.
|
||||
if len(replacingKey) != 0 {
|
||||
oldSlot := replacingKey.Slot()
|
||||
oldEpoch := slots.ToEpoch(oldSlot)
|
||||
// The blob we are replacing is too old, so we delete it.
|
||||
if slots.ToEpoch(scs[0].Slot) >= oldEpoch.Add(uint64(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest)) {
|
||||
if err := bkt.Delete(replacingKey); err != nil {
|
||||
log.WithError(err).Warnf("Could not delete blob with key %#x", replacingKey)
|
||||
}
|
||||
} else {
|
||||
// Otherwise, we need to merge the new blob with the old blob.
|
||||
enc := bkt.Get(replacingKey)
|
||||
sc := ðpb.BlobSidecars{}
|
||||
if err := decode(ctx, enc, sc); err != nil {
|
||||
return err
|
||||
}
|
||||
sc.Sidecars = append(sc.Sidecars, scs...)
|
||||
sortSideCars(sc.Sidecars)
|
||||
encodedBlobSidecar, err = encode(ctx, ðpb.BlobSidecars{Sidecars: sc.Sidecars})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return bkt.Put(newKey, encodedBlobSidecar)
|
||||
return bkt.Put(newKey, encoded)
|
||||
})
|
||||
}
|
||||
|
||||
// verifySideCars ensures that all sidecars have the same slot, parent root, block root, and proposer index, and no more than MAX_BLOB_EPOCHS.
|
||||
func (s *Store) verifySideCars(scs []*ethpb.BlobSidecar) error {
|
||||
// validUniqueSidecars ensures that all sidecars have the same slot, parent root, block root, and proposer index, and no more than MAX_BLOB_EPOCHS.
|
||||
func validUniqueSidecars(scs []*ethpb.BlobSidecar) ([]*ethpb.BlobSidecar, error) {
|
||||
if len(scs) == 0 {
|
||||
return errors.New("nil or empty blob sidecars")
|
||||
}
|
||||
if uint64(len(scs)) > fieldparams.MaxBlobsPerBlock {
|
||||
return fmt.Errorf("too many sidecars: %d > %d", len(scs), fieldparams.MaxBlobsPerBlock)
|
||||
return nil, errEmptySidecar
|
||||
}
|
||||
|
||||
sl := scs[0].Slot
|
||||
pr := scs[0].BlockParentRoot
|
||||
r := scs[0].BlockRoot
|
||||
p := scs[0].ProposerIndex
|
||||
|
||||
for _, sc := range scs {
|
||||
if sc.Slot != sl {
|
||||
return fmt.Errorf("sidecar slot mismatch: %d != %d", sc.Slot, sl)
|
||||
}
|
||||
if !bytes.Equal(sc.BlockParentRoot, pr) {
|
||||
return fmt.Errorf("sidecar parent root mismatch: %x != %x", sc.BlockParentRoot, pr)
|
||||
}
|
||||
if !bytes.Equal(sc.BlockRoot, r) {
|
||||
return fmt.Errorf("sidecar root mismatch: %x != %x", sc.BlockRoot, r)
|
||||
}
|
||||
if sc.ProposerIndex != p {
|
||||
return fmt.Errorf("sidecar proposer index mismatch: %d != %d", sc.ProposerIndex, p)
|
||||
}
|
||||
// If there's only 1 sidecar, we've got nothing to compare.
|
||||
if len(scs) == 1 {
|
||||
return scs, nil
|
||||
}
|
||||
return nil
|
||||
|
||||
prev := scs[0]
|
||||
didx := 1
|
||||
for i := 1; i < len(scs); i++ {
|
||||
sc := scs[i]
|
||||
if sc.Slot != prev.Slot {
|
||||
return nil, errors.Wrapf(errBlobSlotMismatch, "%d != %d", sc.Slot, prev.Slot)
|
||||
}
|
||||
if !bytes.Equal(sc.BlockParentRoot, prev.BlockParentRoot) {
|
||||
return nil, errors.Wrapf(errBlobParentMismatch, "%x != %x", sc.BlockParentRoot, prev.BlockParentRoot)
|
||||
}
|
||||
if !bytes.Equal(sc.BlockRoot, prev.BlockRoot) {
|
||||
return nil, errors.Wrapf(errBlobRootMismatch, "%x != %x", sc.BlockRoot, prev.BlockRoot)
|
||||
}
|
||||
if sc.ProposerIndex != prev.ProposerIndex {
|
||||
return nil, errors.Wrapf(errBlobProposerMismatch, "%d != %d", sc.ProposerIndex, prev.ProposerIndex)
|
||||
}
|
||||
// skip duplicate
|
||||
if sc.Index == prev.Index {
|
||||
continue
|
||||
}
|
||||
if didx != i {
|
||||
scs[didx] = scs[i]
|
||||
}
|
||||
prev = scs[i]
|
||||
didx += 1
|
||||
}
|
||||
|
||||
if didx > fieldparams.MaxBlobsPerBlock {
|
||||
return nil, errors.Wrapf(errBlobSidecarLimit, "%d > %d", didx, fieldparams.MaxBlobsPerBlock)
|
||||
}
|
||||
return scs[0:didx], nil
|
||||
}
|
||||
|
||||
// sortSideCars sorts the sidecars by their index.
|
||||
func sortSideCars(scs []*ethpb.BlobSidecar) {
|
||||
// sortSidecars sorts the sidecars by their index.
|
||||
func sortSidecars(scs []*ethpb.BlobSidecar) {
|
||||
sort.Slice(scs, func(i, j int) bool {
|
||||
return scs[i].Index < scs[j].Index
|
||||
})
|
||||
@@ -191,7 +225,7 @@ func filterForIndices(sc *ethpb.BlobSidecars, indices ...uint64) ([]*ethpb.BlobS
|
||||
}
|
||||
|
||||
// BlobSidecarsBySlot retrieves BlobSidecars for the given slot.
|
||||
// If the `indices` argument is omitted, all blobs for the root will be returned.
|
||||
// If the `indices` argument is omitted, all blobs for the slot will be returned.
|
||||
// Otherwise, the result will be filtered to only include the specified indices.
|
||||
// An error will result if an invalid index is specified.
|
||||
// The bucket size is bounded by 131072 entries. That's the most blobs a node will keep before rotating it out.
|
||||
@@ -255,7 +289,6 @@ func blobSidecarKey(blob *ethpb.BlobSidecar) blobRotatingKey {
|
||||
|
||||
func slotKey(slot types.Slot) []byte {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
maxEpochsToPersistBlobs := params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
|
||||
maxSlotsToPersistBlobs := types.Slot(maxEpochsToPersistBlobs.Mul(uint64(slotsPerEpoch)))
|
||||
return bytesutil.SlotToBytesBigEndian(slot.ModSlot(maxSlotsToPersistBlobs))
|
||||
}
|
||||
@@ -265,14 +298,14 @@ func checkEpochsForBlobSidecarsRequestBucket(db *bolt.DB) error {
|
||||
b := tx.Bucket(chainMetadataBucket)
|
||||
v := b.Get(blobRetentionEpochsKey)
|
||||
if v == nil {
|
||||
if err := b.Put(blobRetentionEpochsKey, bytesutil.Uint64ToBytesBigEndian(uint64(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest))); err != nil {
|
||||
if err := b.Put(blobRetentionEpochsKey, bytesutil.Uint64ToBytesBigEndian(uint64(maxEpochsToPersistBlobs))); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
e := bytesutil.BytesToUint64BigEndian(v)
|
||||
if e != uint64(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest) {
|
||||
return fmt.Errorf("epochs for blobs request value in DB %d does not match config value %d", e, params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
if e != uint64(maxEpochsToPersistBlobs) {
|
||||
return fmt.Errorf("epochs for blobs request value in DB %d does not match config value %d", e, maxEpochsToPersistBlobs)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
|
||||
@@ -3,10 +3,13 @@ package kv
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"flag"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -15,6 +18,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assertions"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/urfave/cli/v2"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
@@ -294,6 +298,24 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
})
|
||||
t.Run("save equivocating blobs", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock/2)
|
||||
eScs := generateEquivocatingBlobSidecars(t, fieldparams.MaxBlobsPerBlock/2)
|
||||
|
||||
for i, sc := range scs {
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{eScs[i]}))
|
||||
}
|
||||
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
|
||||
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(eScs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(eScs, got))
|
||||
})
|
||||
}
|
||||
|
||||
func generateBlobSidecars(t *testing.T, n uint64) []*ethpb.BlobSidecar {
|
||||
@@ -314,7 +336,6 @@ func generateBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSidecar {
|
||||
kzgProof := make([]byte, 48)
|
||||
_, err = rand.Read(kzgProof)
|
||||
require.NoError(t, err)
|
||||
|
||||
return ðpb.BlobSidecar{
|
||||
BlockRoot: bytesutil.PadTo([]byte{'a'}, 32),
|
||||
Index: index,
|
||||
@@ -327,26 +348,56 @@ func generateBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSidecar {
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_verifySideCars(t *testing.T) {
|
||||
s := setupDB(t)
|
||||
func generateEquivocatingBlobSidecars(t *testing.T, n uint64) []*ethpb.BlobSidecar {
|
||||
blobSidecars := make([]*ethpb.BlobSidecar, n)
|
||||
for i := uint64(0); i < n; i++ {
|
||||
blobSidecars[i] = generateEquivocatingBlobSidecar(t, i)
|
||||
}
|
||||
return blobSidecars
|
||||
}
|
||||
|
||||
func generateEquivocatingBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSidecar {
|
||||
blob := make([]byte, 131072)
|
||||
_, err := rand.Read(blob)
|
||||
require.NoError(t, err)
|
||||
kzgCommitment := make([]byte, 48)
|
||||
_, err = rand.Read(kzgCommitment)
|
||||
require.NoError(t, err)
|
||||
kzgProof := make([]byte, 48)
|
||||
_, err = rand.Read(kzgProof)
|
||||
require.NoError(t, err)
|
||||
|
||||
return ðpb.BlobSidecar{
|
||||
BlockRoot: bytesutil.PadTo([]byte{'c'}, 32),
|
||||
Index: index,
|
||||
Slot: 100,
|
||||
BlockParentRoot: bytesutil.PadTo([]byte{'b'}, 32),
|
||||
ProposerIndex: 102,
|
||||
Blob: blob,
|
||||
KzgCommitment: kzgCommitment,
|
||||
KzgProof: kzgProof,
|
||||
}
|
||||
}
|
||||
|
||||
func Test_validUniqueSidecars_validation(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
scs []*ethpb.BlobSidecar
|
||||
error string
|
||||
name string
|
||||
scs []*ethpb.BlobSidecar
|
||||
err error
|
||||
}{
|
||||
{name: "empty", scs: []*ethpb.BlobSidecar{}, error: "nil or empty blob sidecars"},
|
||||
{name: "too many sidecars", scs: generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock+1), error: "too many sidecars: 7 > 6"},
|
||||
{name: "invalid slot", scs: []*ethpb.BlobSidecar{{Slot: 1}, {Slot: 2}}, error: "sidecar slot mismatch: 2 != 1"},
|
||||
{name: "invalid proposer index", scs: []*ethpb.BlobSidecar{{ProposerIndex: 1}, {ProposerIndex: 2}}, error: "sidecar proposer index mismatch: 2 != 1"},
|
||||
{name: "invalid root", scs: []*ethpb.BlobSidecar{{BlockRoot: []byte{1}}, {BlockRoot: []byte{2}}}, error: "sidecar root mismatch: 02 != 01"},
|
||||
{name: "invalid parent root", scs: []*ethpb.BlobSidecar{{BlockParentRoot: []byte{1}}, {BlockParentRoot: []byte{2}}}, error: "sidecar parent root mismatch: 02 != 01"},
|
||||
{name: "happy path", scs: []*ethpb.BlobSidecar{{Index: 0}, {Index: 1}}, error: ""},
|
||||
{name: "empty", scs: []*ethpb.BlobSidecar{}, err: errEmptySidecar},
|
||||
{name: "too many sidecars", scs: generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock+1), err: errBlobSidecarLimit},
|
||||
{name: "invalid slot", scs: []*ethpb.BlobSidecar{{Slot: 1}, {Slot: 2}}, err: errBlobSlotMismatch},
|
||||
{name: "invalid proposer index", scs: []*ethpb.BlobSidecar{{ProposerIndex: 1}, {ProposerIndex: 2}}, err: errBlobProposerMismatch},
|
||||
{name: "invalid root", scs: []*ethpb.BlobSidecar{{BlockRoot: []byte{1}}, {BlockRoot: []byte{2}}}, err: errBlobRootMismatch},
|
||||
{name: "invalid parent root", scs: []*ethpb.BlobSidecar{{BlockParentRoot: []byte{1}}, {BlockParentRoot: []byte{2}}}, err: errBlobParentMismatch},
|
||||
{name: "happy path", scs: []*ethpb.BlobSidecar{{Index: 0}, {Index: 1}}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := s.verifySideCars(tt.scs)
|
||||
if tt.error != "" {
|
||||
require.Equal(t, tt.error, err.Error())
|
||||
_, err := validUniqueSidecars(tt.scs)
|
||||
if tt.err != nil {
|
||||
require.ErrorIs(t, err, tt.err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -354,6 +405,62 @@ func TestStore_verifySideCars(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_validUniqueSidecars_dedup(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
scs []*ethpb.BlobSidecar
|
||||
expected []*ethpb.BlobSidecar
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "duplicate sidecar",
|
||||
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 1}},
|
||||
expected: []*ethpb.BlobSidecar{{Index: 1}},
|
||||
},
|
||||
{
|
||||
name: "single sidecar",
|
||||
scs: []*ethpb.BlobSidecar{{Index: 1}},
|
||||
expected: []*ethpb.BlobSidecar{{Index: 1}},
|
||||
},
|
||||
{
|
||||
name: "multiple duplicates",
|
||||
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 2}, {Index: 3}, {Index: 3}},
|
||||
expected: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}},
|
||||
},
|
||||
{
|
||||
name: "ok number after de-dupe, > 6 before",
|
||||
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 2}, {Index: 2}, {Index: 2}, {Index: 3}, {Index: 3}},
|
||||
expected: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}},
|
||||
},
|
||||
{
|
||||
name: "max unique, no dupes",
|
||||
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}},
|
||||
expected: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}},
|
||||
},
|
||||
{
|
||||
name: "too many unique",
|
||||
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}, {Index: 7}},
|
||||
err: errBlobSidecarLimit,
|
||||
},
|
||||
{
|
||||
name: "too many unique with dupes",
|
||||
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 1}, {Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}, {Index: 7}},
|
||||
err: errBlobSidecarLimit,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
u, err := validUniqueSidecars(c.scs)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.Equal(t, len(c.expected), len(u))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_sortSidecars(t *testing.T) {
|
||||
scs := []*ethpb.BlobSidecar{
|
||||
{Index: 6},
|
||||
@@ -364,7 +471,7 @@ func TestStore_sortSidecars(t *testing.T) {
|
||||
{Index: 5},
|
||||
{},
|
||||
}
|
||||
sortSideCars(scs)
|
||||
sortSidecars(scs)
|
||||
for i := 0; i < len(scs)-1; i++ {
|
||||
require.Equal(t, uint64(i), scs[i].Index)
|
||||
}
|
||||
@@ -412,9 +519,12 @@ func Test_checkEpochsForBlobSidecarsRequestBucket(t *testing.T) {
|
||||
require.NoError(t, checkEpochsForBlobSidecarsRequestBucket(dbStore.db)) // First write
|
||||
require.NoError(t, checkEpochsForBlobSidecarsRequestBucket(dbStore.db)) // First check
|
||||
|
||||
nConfig := params.BeaconNetworkConfig()
|
||||
nConfig.MinEpochsForBlobsSidecarsRequest = 42069
|
||||
params.OverrideBeaconNetworkConfig(nConfig)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.Uint64(flags.BlobRetentionEpoch.Name, 0, "")
|
||||
require.NoError(t, set.Set(flags.BlobRetentionEpoch.Name, strconv.FormatUint(42069, 10)))
|
||||
cliCtx := cli.NewContext(&cli.App{}, set, nil)
|
||||
require.NoError(t, ConfigureBlobRetentionEpoch(cliCtx))
|
||||
require.ErrorContains(t, "epochs for blobs request value in DB 4096 does not match config value 42069", checkEpochsForBlobSidecarsRequestBucket(dbStore.db))
|
||||
}
|
||||
|
||||
|
||||
@@ -111,10 +111,10 @@ var blockTests = []struct {
|
||||
name: "deneb blind",
|
||||
newBlock: func(slot primitives.Slot, root []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
b := util.NewBlindedBeaconBlockDeneb()
|
||||
b.Block.Slot = slot
|
||||
b.Message.Slot = slot
|
||||
if root != nil {
|
||||
b.Block.ParentRoot = root
|
||||
b.Block.Body.BlobKzgCommitments = [][]byte{
|
||||
b.Message.ParentRoot = root
|
||||
b.Message.Body.BlobKzgCommitments = [][]byte{
|
||||
bytesutil.PadTo([]byte{0x05}, 48),
|
||||
bytesutil.PadTo([]byte{0x06}, 48),
|
||||
bytesutil.PadTo([]byte{0x07}, 48),
|
||||
|
||||
33
beacon-chain/db/kv/flags.go
Normal file
33
beacon-chain/db/kv/flags.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var maxEpochsToPersistBlobs = params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
|
||||
|
||||
// ConfigureBlobRetentionEpoch sets the for blob retention based on command-line context. It sets the local config `maxEpochsToPersistBlobs`.
|
||||
// If the flag is not set, the spec default `MinEpochsForBlobsSidecarsRequest` is used.
|
||||
// An error if the input epoch is smaller than the spec default value.
|
||||
func ConfigureBlobRetentionEpoch(cliCtx *cli.Context) error {
|
||||
// Check if the blob retention epoch flag is set.
|
||||
if cliCtx.IsSet(flags.BlobRetentionEpoch.Name) {
|
||||
// Retrieve and cast the epoch value.
|
||||
epochValue := cliCtx.Uint64(flags.BlobRetentionEpoch.Name)
|
||||
e := primitives.Epoch(epochValue)
|
||||
|
||||
// Validate the epoch value against the spec default.
|
||||
if e < params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest {
|
||||
return fmt.Errorf("%s smaller than spec default, %d < %d", flags.BlobRetentionEpoch.Name, e, params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
}
|
||||
|
||||
maxEpochsToPersistBlobs = e
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
39
beacon-chain/db/kv/flags_test.go
Normal file
39
beacon-chain/db/kv/flags_test.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func TestConfigureBlobRetentionEpoch(t *testing.T) {
|
||||
maxEpochsToPersistBlobs = params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
|
||||
params.SetupTestConfigCleanup(t)
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
|
||||
// Test case: Spec default.
|
||||
require.NoError(t, ConfigureBlobRetentionEpoch(cli.NewContext(&app, set, nil)))
|
||||
require.Equal(t, params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest, maxEpochsToPersistBlobs)
|
||||
|
||||
set.Uint64(flags.BlobRetentionEpoch.Name, 0, "")
|
||||
minEpochsForSidecarRequest := uint64(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
require.NoError(t, set.Set(flags.BlobRetentionEpoch.Name, strconv.FormatUint(2*minEpochsForSidecarRequest, 10)))
|
||||
cliCtx := cli.NewContext(&app, set, nil)
|
||||
|
||||
// Test case: Input epoch is greater than or equal to spec value.
|
||||
require.NoError(t, ConfigureBlobRetentionEpoch(cliCtx))
|
||||
require.Equal(t, primitives.Epoch(2*minEpochsForSidecarRequest), maxEpochsToPersistBlobs)
|
||||
|
||||
// Test case: Input epoch is less than spec value.
|
||||
require.NoError(t, set.Set(flags.BlobRetentionEpoch.Name, strconv.FormatUint(minEpochsForSidecarRequest-1, 10)))
|
||||
cliCtx = cli.NewContext(&app, set, nil)
|
||||
err := ConfigureBlobRetentionEpoch(cliCtx)
|
||||
require.ErrorContains(t, "extend-blob-retention-epoch smaller than spec default", err)
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
@@ -45,6 +46,10 @@ func (s *Store) SaveGenesisData(ctx context.Context, genesisState state.BeaconSt
|
||||
|
||||
// LoadGenesis loads a genesis state from a ssz-serialized byte slice, if no genesis exists already.
|
||||
func (s *Store) LoadGenesis(ctx context.Context, sb []byte) error {
|
||||
if len(sb) < (1 << 10) {
|
||||
log.WithField("size", fmt.Sprintf("%d bytes", len(sb))).
|
||||
Warn("Genesis state is smaller than one 1Kb. This could be an empty file, git lfs metadata file, or corrupt genesis state.")
|
||||
}
|
||||
vu, err := detect.FromState(sb)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -48,6 +48,8 @@ func TestLatestMainchainInfo_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
testAcc.Backend.Commit()
|
||||
|
||||
tickerChan := make(chan time.Time)
|
||||
web3Service.eth1HeadTicker = &time.Ticker{C: tickerChan}
|
||||
exitRoutine := make(chan bool)
|
||||
|
||||
go func() {
|
||||
@@ -58,8 +60,6 @@ func TestLatestMainchainInfo_OK(t *testing.T) {
|
||||
header, err := web3Service.HeaderByNumber(web3Service.ctx, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
tickerChan := make(chan time.Time)
|
||||
web3Service.eth1HeadTicker = &time.Ticker{C: tickerChan}
|
||||
tickerChan <- time.Now()
|
||||
web3Service.cancel()
|
||||
exitRoutine <- true
|
||||
|
||||
@@ -3,6 +3,7 @@ package execution
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -106,28 +107,26 @@ func (s *Service) retryExecutionClientConnection(ctx context.Context, err error)
|
||||
|
||||
// Initializes an RPC connection with authentication headers.
|
||||
func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.Endpoint) (*gethRPC.Client, error) {
|
||||
client, err := network.NewExecutionRPCClient(ctx, endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headers := http.Header{}
|
||||
if endpoint.Auth.Method != authorization.None {
|
||||
header, err := endpoint.Auth.ToHeaderValue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.SetHeader("Authorization", header)
|
||||
headers.Set("Authorization", header)
|
||||
}
|
||||
for _, h := range s.cfg.headers {
|
||||
if h != "" {
|
||||
keyValue := strings.Split(h, "=")
|
||||
if len(keyValue) < 2 {
|
||||
log.Warnf("Incorrect HTTP header flag format. Skipping %v", keyValue[0])
|
||||
continue
|
||||
}
|
||||
client.SetHeader(keyValue[0], strings.Join(keyValue[1:], "="))
|
||||
if h == "" {
|
||||
continue
|
||||
}
|
||||
keyValue := strings.Split(h, "=")
|
||||
if len(keyValue) < 2 {
|
||||
log.Warnf("Incorrect HTTP header flag format. Skipping %v", keyValue[0])
|
||||
continue
|
||||
}
|
||||
headers.Set(keyValue[0], strings.Join(keyValue[1:], "="))
|
||||
}
|
||||
return client, nil
|
||||
return network.NewExecutionRPCClient(ctx, endpoint, headers)
|
||||
}
|
||||
|
||||
// Checks the chain ID of the execution client to ensure
|
||||
|
||||
@@ -208,13 +208,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.ensureValidPowchainData(ctx); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to validate powchain data")
|
||||
}
|
||||
|
||||
eth1Data, err := s.cfg.beaconDB.ExecutionChainData(ctx)
|
||||
eth1Data, err := s.validPowchainData(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to retrieve eth1 data")
|
||||
return nil, errors.Wrap(err, "unable to validate powchain data")
|
||||
}
|
||||
if err := s.initializeEth1Data(ctx, eth1Data); err != nil {
|
||||
return nil, err
|
||||
@@ -822,23 +818,22 @@ func validateDepositContainers(ctrs []*ethpb.DepositContainer) bool {
|
||||
|
||||
// Validates the current powchain data is saved and makes sure that any
|
||||
// embedded genesis state is correctly accounted for.
|
||||
func (s *Service) ensureValidPowchainData(ctx context.Context) error {
|
||||
func (s *Service) validPowchainData(ctx context.Context) (*ethpb.ETH1ChainData, error) {
|
||||
genState, err := s.cfg.beaconDB.GenesisState(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Exit early if no genesis state is saved.
|
||||
if genState == nil || genState.IsNil() {
|
||||
return nil
|
||||
return nil, err
|
||||
}
|
||||
eth1Data, err := s.cfg.beaconDB.ExecutionChainData(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to retrieve eth1 data")
|
||||
return nil, errors.Wrap(err, "unable to retrieve eth1 data")
|
||||
}
|
||||
if genState == nil || genState.IsNil() {
|
||||
return eth1Data, nil
|
||||
}
|
||||
if eth1Data == nil || !eth1Data.ChainstartData.Chainstarted || !validateDepositContainers(eth1Data.DepositContainers) {
|
||||
pbState, err := native.ProtobufBeaconStatePhase0(s.preGenesisState.ToProtoUnsafe())
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
s.chainStartData = ðpb.ChainStartData{
|
||||
Chainstarted: true,
|
||||
@@ -856,22 +851,24 @@ func (s *Service) ensureValidPowchainData(ctx context.Context) error {
|
||||
if features.Get().EnableEIP4881 {
|
||||
trie, ok := s.depositTrie.(*depositsnapshot.DepositTree)
|
||||
if !ok {
|
||||
return errors.New("deposit trie was not EIP4881 DepositTree")
|
||||
return nil, errors.New("deposit trie was not EIP4881 DepositTree")
|
||||
}
|
||||
eth1Data.DepositSnapshot, err = trie.ToProto()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
trie, ok := s.depositTrie.(*trie.SparseMerkleTrie)
|
||||
if !ok {
|
||||
return errors.New("deposit trie was not SparseMerkleTrie")
|
||||
return nil, errors.New("deposit trie was not SparseMerkleTrie")
|
||||
}
|
||||
eth1Data.Trie = trie.ToProto()
|
||||
}
|
||||
return s.cfg.beaconDB.SaveExecutionChainData(ctx, eth1Data)
|
||||
if err := s.cfg.beaconDB.SaveExecutionChainData(ctx, eth1Data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return eth1Data, nil
|
||||
}
|
||||
|
||||
func dedupEndpoints(endpoints []string) []string {
|
||||
|
||||
@@ -571,7 +571,8 @@ func TestService_EnsureConsistentPowchainData(t *testing.T) {
|
||||
assert.NoError(t, genState.SetSlot(1000))
|
||||
|
||||
require.NoError(t, s1.cfg.beaconDB.SaveGenesisData(context.Background(), genState))
|
||||
require.NoError(t, s1.ensureValidPowchainData(context.Background()))
|
||||
_, err = s1.validPowchainData(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
eth1Data, err := s1.cfg.beaconDB.ExecutionChainData(context.Background())
|
||||
assert.NoError(t, err)
|
||||
@@ -601,7 +602,8 @@ func TestService_InitializeCorrectly(t *testing.T) {
|
||||
assert.NoError(t, genState.SetSlot(1000))
|
||||
|
||||
require.NoError(t, s1.cfg.beaconDB.SaveGenesisData(context.Background(), genState))
|
||||
require.NoError(t, s1.ensureValidPowchainData(context.Background()))
|
||||
_, err = s1.validPowchainData(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
eth1Data, err := s1.cfg.beaconDB.ExecutionChainData(context.Background())
|
||||
assert.NoError(t, err)
|
||||
@@ -636,7 +638,8 @@ func TestService_EnsureValidPowchainData(t *testing.T) {
|
||||
DepositContainers: []*ethpb.DepositContainer{{Index: 1}},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s1.ensureValidPowchainData(context.Background()))
|
||||
_, err = s1.validPowchainData(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
eth1Data, err := s1.cfg.beaconDB.ExecutionChainData(context.Background())
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
"doc.go",
|
||||
"errors.go",
|
||||
"forkchoice.go",
|
||||
"last_root.go",
|
||||
"metrics.go",
|
||||
"node.go",
|
||||
"on_tick.go",
|
||||
@@ -49,6 +50,7 @@ go_test(
|
||||
srcs = [
|
||||
"ffg_update_test.go",
|
||||
"forkchoice_test.go",
|
||||
"last_root_test.go",
|
||||
"no_vote_test.go",
|
||||
"node_test.go",
|
||||
"on_tick_test.go",
|
||||
|
||||
26
beacon-chain/forkchoice/doubly-linked-tree/last_root.go
Normal file
26
beacon-chain/forkchoice/doubly-linked-tree/last_root.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package doublylinkedtree
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
// LastRoot returns the last canonical block root in the given epoch
|
||||
func (f *ForkChoice) LastRoot(epoch primitives.Epoch) [32]byte {
|
||||
head := f.store.headNode
|
||||
headEpoch := slots.ToEpoch(head.slot)
|
||||
epochEnd, err := slots.EpochEnd(epoch)
|
||||
if err != nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
if headEpoch <= epoch {
|
||||
return head.root
|
||||
}
|
||||
for head != nil && head.slot > epochEnd {
|
||||
head = head.parent
|
||||
}
|
||||
if head == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
return head.root
|
||||
}
|
||||
39
beacon-chain/forkchoice/doubly-linked-tree/last_root_test.go
Normal file
39
beacon-chain/forkchoice/doubly-linked-tree/last_root_test.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package doublylinkedtree
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestLastRoot(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte{'1'}, params.BeaconConfig().ZeroHash, [32]byte{'1'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'2'}, [32]byte{'1'}, [32]byte{'2'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
st, root, err = prepareForkchoiceState(ctx, 3, [32]byte{'3'}, [32]byte{'1'}, [32]byte{'3'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
st, root, err = prepareForkchoiceState(ctx, 32, [32]byte{'4'}, [32]byte{'3'}, [32]byte{'4'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
st, root, err = prepareForkchoiceState(ctx, 33, [32]byte{'5'}, [32]byte{'2'}, [32]byte{'5'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
st, root, err = prepareForkchoiceState(ctx, 34, [32]byte{'6'}, [32]byte{'5'}, [32]byte{'6'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
headNode, _ := f.store.nodeByRoot[[32]byte{'6'}]
|
||||
f.store.headNode = headNode
|
||||
require.Equal(t, [32]byte{'6'}, f.store.headNode.root)
|
||||
require.Equal(t, [32]byte{'2'}, f.LastRoot(0))
|
||||
require.Equal(t, [32]byte{'6'}, f.LastRoot(1))
|
||||
require.Equal(t, [32]byte{'6'}, f.LastRoot(2))
|
||||
}
|
||||
@@ -49,6 +49,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot := primitives.Slot(1)
|
||||
driftGenesisTime(f, slot, 0)
|
||||
newRoot := indexToHash(1)
|
||||
f.store.proposerBoostRoot = [32]byte{}
|
||||
state, blkRoot, err := prepareForkchoiceState(
|
||||
ctx,
|
||||
slot,
|
||||
@@ -74,6 +75,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot = primitives.Slot(2)
|
||||
driftGenesisTime(f, slot, 0)
|
||||
newRoot = indexToHash(2)
|
||||
f.store.proposerBoostRoot = [32]byte{}
|
||||
state, blkRoot, err = prepareForkchoiceState(
|
||||
ctx,
|
||||
slot,
|
||||
@@ -101,6 +103,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot = primitives.Slot(3)
|
||||
driftGenesisTime(f, slot, 0)
|
||||
newRoot = indexToHash(3)
|
||||
f.store.proposerBoostRoot = [32]byte{}
|
||||
state, blkRoot, err = prepareForkchoiceState(
|
||||
ctx,
|
||||
slot,
|
||||
@@ -129,6 +132,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot = primitives.Slot(4)
|
||||
driftGenesisTime(f, slot, 0)
|
||||
newRoot = indexToHash(4)
|
||||
f.store.proposerBoostRoot = [32]byte{}
|
||||
state, blkRoot, err = prepareForkchoiceState(
|
||||
ctx,
|
||||
slot,
|
||||
@@ -335,6 +339,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
cSlot := primitives.Slot(2)
|
||||
driftGenesisTime(f, cSlot, 0)
|
||||
c := indexToHash(2)
|
||||
f.store.proposerBoostRoot = [32]byte{}
|
||||
state, blkRoot, err := prepareForkchoiceState(
|
||||
ctx,
|
||||
cSlot,
|
||||
@@ -354,6 +359,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
|
||||
bSlot := primitives.Slot(1)
|
||||
b := indexToHash(1)
|
||||
f.store.proposerBoostRoot = [32]byte{}
|
||||
state, blkRoot, err = prepareForkchoiceState(
|
||||
ctx,
|
||||
bSlot,
|
||||
@@ -378,6 +384,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
// A block D, building on B, is received at slot N+3. It should not be able to win without boosting.
|
||||
dSlot := primitives.Slot(3)
|
||||
d := indexToHash(3)
|
||||
f.store.proposerBoostRoot = [32]byte{}
|
||||
state, blkRoot, err = prepareForkchoiceState(
|
||||
ctx,
|
||||
dSlot,
|
||||
@@ -398,6 +405,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
// If the same block arrives with boosting then it becomes head:
|
||||
driftGenesisTime(f, dSlot, 0)
|
||||
d2 := indexToHash(30)
|
||||
f.store.proposerBoostRoot = [32]byte{}
|
||||
state, blkRoot, err = prepareForkchoiceState(
|
||||
ctx,
|
||||
dSlot,
|
||||
|
||||
@@ -109,7 +109,8 @@ func (s *Store) insert(ctx context.Context,
|
||||
secondsIntoSlot := (timeNow - s.genesisTime) % params.BeaconConfig().SecondsPerSlot
|
||||
currentSlot := slots.CurrentSlot(s.genesisTime)
|
||||
boostThreshold := params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().IntervalsPerSlot
|
||||
if currentSlot == slot && secondsIntoSlot < boostThreshold {
|
||||
isFirstBlock := s.proposerBoostRoot == [32]byte{}
|
||||
if currentSlot == slot && secondsIntoSlot < boostThreshold && isFirstBlock {
|
||||
s.proposerBoostRoot = root
|
||||
}
|
||||
|
||||
|
||||
@@ -68,6 +68,7 @@ type Getter interface {
|
||||
IsOptimistic(root [32]byte) (bool, error)
|
||||
ShouldOverrideFCU() bool
|
||||
Slot([32]byte) (primitives.Slot, error)
|
||||
LastRoot(primitives.Epoch) [32]byte
|
||||
}
|
||||
|
||||
// Setter allows to set forkchoice information
|
||||
|
||||
@@ -52,7 +52,6 @@ func DefaultConfig(enableDebugRPCEndpoints bool, httpModules string) MuxConfig {
|
||||
}
|
||||
if flags.EnableHTTPEthAPI(httpModules) {
|
||||
ethRegistrations := []gateway.PbHandlerRegistration{
|
||||
ethpbservice.RegisterBeaconNodeHandler,
|
||||
ethpbservice.RegisterBeaconChainHandler,
|
||||
ethpbservice.RegisterBeaconValidatorHandler,
|
||||
ethpbservice.RegisterEventsHandler,
|
||||
|
||||
@@ -15,7 +15,7 @@ func TestDefaultConfig(t *testing.T) {
|
||||
require.Equal(t, 2, len(cfg.EthPbMux.Patterns))
|
||||
assert.Equal(t, "/internal/eth/v1/", cfg.EthPbMux.Patterns[0])
|
||||
assert.Equal(t, "/internal/eth/v2/", cfg.EthPbMux.Patterns[1])
|
||||
assert.Equal(t, 4, len(cfg.EthPbMux.Registrations))
|
||||
assert.Equal(t, 3, len(cfg.EthPbMux.Registrations))
|
||||
assert.NotNil(t, cfg.V1AlphaPbMux.Mux)
|
||||
require.Equal(t, 2, len(cfg.V1AlphaPbMux.Patterns))
|
||||
assert.Equal(t, "/eth/v1alpha1/", cfg.V1AlphaPbMux.Patterns[0])
|
||||
@@ -29,7 +29,7 @@ func TestDefaultConfig(t *testing.T) {
|
||||
require.Equal(t, 2, len(cfg.EthPbMux.Patterns))
|
||||
assert.Equal(t, "/internal/eth/v1/", cfg.EthPbMux.Patterns[0])
|
||||
assert.Equal(t, "/internal/eth/v2/", cfg.EthPbMux.Patterns[1])
|
||||
assert.Equal(t, 5, len(cfg.EthPbMux.Registrations))
|
||||
assert.Equal(t, 4, len(cfg.EthPbMux.Registrations))
|
||||
assert.NotNil(t, cfg.V1AlphaPbMux.Mux)
|
||||
require.Equal(t, 2, len(cfg.V1AlphaPbMux.Patterns))
|
||||
assert.Equal(t, "/eth/v1alpha1/", cfg.V1AlphaPbMux.Patterns[0])
|
||||
@@ -41,7 +41,7 @@ func TestDefaultConfig(t *testing.T) {
|
||||
assert.NotNil(t, cfg.EthPbMux.Mux)
|
||||
require.Equal(t, 2, len(cfg.EthPbMux.Patterns))
|
||||
assert.Equal(t, "/internal/eth/v1/", cfg.EthPbMux.Patterns[0])
|
||||
assert.Equal(t, 5, len(cfg.EthPbMux.Registrations))
|
||||
assert.Equal(t, 4, len(cfg.EthPbMux.Registrations))
|
||||
assert.Equal(t, (*gateway.PbMux)(nil), cfg.V1AlphaPbMux)
|
||||
})
|
||||
t.Run("Without Eth API", func(t *testing.T) {
|
||||
|
||||
@@ -8,6 +8,7 @@ go_library(
|
||||
"node.go",
|
||||
"options.go",
|
||||
"prometheus.go",
|
||||
"router.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/node",
|
||||
visibility = [
|
||||
@@ -22,6 +23,7 @@ go_library(
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/das:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/db/slasherkv:go_default_library",
|
||||
@@ -40,6 +42,7 @@ go_library(
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/rpc:go_default_library",
|
||||
"//beacon-chain/rpc/apimiddleware:go_default_library",
|
||||
"//beacon-chain/rpc/eth/helpers:go_default_library",
|
||||
"//beacon-chain/slasher:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositsnapshot"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/slasherkv"
|
||||
@@ -153,6 +154,9 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
if err := configureExecutionSetting(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := kv.ConfigureBlobRetentionEpoch(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configureFastSSZHashingAlgorithm()
|
||||
|
||||
// Initializes any forks here.
|
||||
@@ -241,13 +245,19 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering Blockchain Service")
|
||||
if err := beacon.registerBlockchainService(beacon.forkChoicer, synchronizer, beacon.initialSyncComplete); err != nil {
|
||||
avs := das.NewCachingDBVerifiedStore(beacon.db)
|
||||
bcOpts := []blockchain.Option{
|
||||
blockchain.WithForkChoiceStore(beacon.forkChoicer),
|
||||
blockchain.WithClockSynchronizer(synchronizer),
|
||||
blockchain.WithSyncComplete(beacon.initialSyncComplete),
|
||||
blockchain.WithAvailabilityStore(avs),
|
||||
}
|
||||
if err := beacon.registerBlockchainService(bcOpts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering Initial Sync Service")
|
||||
if err := beacon.registerInitialSyncService(beacon.initialSyncComplete); err != nil {
|
||||
if err := beacon.registerInitialSyncService(beacon.initialSyncComplete, avs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -268,6 +278,7 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
|
||||
log.Debugln("Registering RPC Service")
|
||||
router := mux.NewRouter()
|
||||
router.Use(middleware)
|
||||
if err := beacon.registerRPCService(router); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -602,7 +613,8 @@ func (b *BeaconNode) registerAttestationPool() error {
|
||||
return b.services.RegisterService(s)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *startup.ClockSynchronizer, syncComplete chan struct{}) error {
|
||||
func (b *BeaconNode) registerBlockchainService(required []blockchain.Option) error {
|
||||
log.Debugln("Registering Blockchain Service")
|
||||
var web3Service *execution.Service
|
||||
if err := b.services.FetchService(&web3Service); err != nil {
|
||||
return err
|
||||
@@ -613,10 +625,9 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
|
||||
return err
|
||||
}
|
||||
|
||||
opts := append(b.serviceFlagOpts.blockchainFlagOpts, required...)
|
||||
// skipcq: CRT-D0001
|
||||
opts := append(
|
||||
b.serviceFlagOpts.blockchainFlagOpts,
|
||||
blockchain.WithForkChoiceStore(fc),
|
||||
opts = append(opts,
|
||||
blockchain.WithDatabase(b.db),
|
||||
blockchain.WithDepositCache(b.depositCache),
|
||||
blockchain.WithChainStartFetcher(web3Service),
|
||||
@@ -632,8 +643,6 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
|
||||
blockchain.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
|
||||
blockchain.WithFinalizedStateAtStartUp(b.finalizedStateAtStartUp),
|
||||
blockchain.WithProposerIdsCache(b.proposerIdsCache),
|
||||
blockchain.WithClockSynchronizer(gs),
|
||||
blockchain.WithSyncComplete(syncComplete),
|
||||
)
|
||||
|
||||
blockchainService, err := blockchain.NewService(b.ctx, opts...)
|
||||
@@ -715,7 +724,7 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}) erro
|
||||
return b.services.RegisterService(rs)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
|
||||
func (b *BeaconNode) registerInitialSyncService(complete chan struct{}, avs das.AvailabilityStore) error {
|
||||
var chainService *blockchain.Service
|
||||
if err := b.services.FetchService(&chainService); err != nil {
|
||||
return err
|
||||
@@ -729,6 +738,7 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
|
||||
BlockNotifier: b,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
InitialSyncComplete: complete,
|
||||
AVS: avs,
|
||||
})
|
||||
return b.services.RegisterService(is)
|
||||
}
|
||||
|
||||
17
beacon-chain/node/router.go
Normal file
17
beacon-chain/node/router.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
|
||||
)
|
||||
|
||||
func middleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
query := r.URL.Query()
|
||||
helpers.NormalizeQueryValues(query)
|
||||
r.URL.RawQuery = query.Encode()
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
@@ -477,8 +477,10 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
|
||||
// Same test for block attestations
|
||||
cache = NewAttCaches()
|
||||
assert.NoError(t, cache.SaveBlockAttestations(tt.existing))
|
||||
|
||||
for _, att := range tt.existing {
|
||||
require.NoError(t, cache.SaveBlockAttestation(att))
|
||||
}
|
||||
result, err = cache.HasAggregatedAttestation(tt.input)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want, result)
|
||||
|
||||
@@ -36,17 +36,6 @@ func (c *AttCaches) SaveBlockAttestation(att *ethpb.Attestation) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveBlockAttestations saves a list of block attestations in cache.
|
||||
func (c *AttCaches) SaveBlockAttestations(atts []*ethpb.Attestation) error {
|
||||
for _, att := range atts {
|
||||
if err := c.SaveBlockAttestation(att); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BlockAttestations returns the block attestations in cache.
|
||||
func (c *AttCaches) BlockAttestations() []*ethpb.Attestation {
|
||||
atts := make([]*ethpb.Attestation, 0)
|
||||
|
||||
@@ -32,7 +32,6 @@ type Pool interface {
|
||||
UnaggregatedAttestationCount() int
|
||||
// For attestations that were included in the block.
|
||||
SaveBlockAttestation(att *ethpb.Attestation) error
|
||||
SaveBlockAttestations(atts []*ethpb.Attestation) error
|
||||
BlockAttestations() []*ethpb.Attestation
|
||||
DeleteBlockAttestation(att *ethpb.Attestation) error
|
||||
// For attestations to be passed to fork choice.
|
||||
|
||||
@@ -94,7 +94,9 @@ func TestBatchAttestations_Multiple(t *testing.T) {
|
||||
}
|
||||
require.NoError(t, s.cfg.Pool.SaveUnaggregatedAttestations(unaggregatedAtts))
|
||||
require.NoError(t, s.cfg.Pool.SaveAggregatedAttestations(aggregatedAtts))
|
||||
require.NoError(t, s.cfg.Pool.SaveBlockAttestations(blockAtts))
|
||||
for _, att := range blockAtts {
|
||||
require.NoError(t, s.cfg.Pool.SaveBlockAttestation(att))
|
||||
}
|
||||
require.NoError(t, s.batchForkChoiceAtts(context.Background()))
|
||||
|
||||
wanted, err := attaggregation.Aggregate([]*ethpb.Attestation{aggregatedAtts[0], blockAtts[0]})
|
||||
@@ -148,7 +150,10 @@ func TestBatchAttestations_Single(t *testing.T) {
|
||||
}
|
||||
require.NoError(t, s.cfg.Pool.SaveUnaggregatedAttestations(unaggregatedAtts))
|
||||
require.NoError(t, s.cfg.Pool.SaveAggregatedAttestations(aggregatedAtts))
|
||||
require.NoError(t, s.cfg.Pool.SaveBlockAttestations(blockAtts))
|
||||
|
||||
for _, att := range blockAtts {
|
||||
require.NoError(t, s.cfg.Pool.SaveBlockAttestation(att))
|
||||
}
|
||||
require.NoError(t, s.batchForkChoiceAtts(context.Background()))
|
||||
|
||||
wanted, err := attaggregation.Aggregate(append(aggregatedAtts, unaggregatedAtts...))
|
||||
|
||||
@@ -42,7 +42,9 @@ func TestPruneExpired_Ticker(t *testing.T) {
|
||||
}
|
||||
require.NoError(t, s.cfg.Pool.SaveAggregatedAttestations(atts))
|
||||
assert.Equal(t, 2, s.cfg.Pool.AggregatedAttestationCount())
|
||||
require.NoError(t, s.cfg.Pool.SaveBlockAttestations(atts))
|
||||
for _, att := range atts {
|
||||
require.NoError(t, s.cfg.Pool.SaveBlockAttestation(att))
|
||||
}
|
||||
|
||||
// Rewind back one epoch worth of time.
|
||||
s.genesisTime = uint64(prysmTime.Now().Unix()) - uint64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
@@ -95,7 +97,9 @@ func TestPruneExpired_PruneExpiredAtts(t *testing.T) {
|
||||
att4 := ðpb.Attestation{Data: ad2, AggregationBits: bitfield.Bitlist{0b1110}}
|
||||
atts := []*ethpb.Attestation{att1, att2, att3, att4}
|
||||
require.NoError(t, s.cfg.Pool.SaveAggregatedAttestations(atts))
|
||||
require.NoError(t, s.cfg.Pool.SaveBlockAttestations(atts))
|
||||
for _, att := range atts {
|
||||
require.NoError(t, s.cfg.Pool.SaveBlockAttestation(att))
|
||||
}
|
||||
|
||||
// Rewind back one epoch worth of time.
|
||||
s.genesisTime = uint64(prysmTime.Now().Unix()) - uint64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
|
||||
@@ -16,7 +16,6 @@ go_library(
|
||||
"//api/gateway/apimiddleware:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
"//beacon-chain/rpc/eth/events:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//network/http:go_default_library",
|
||||
|
||||
@@ -70,19 +70,6 @@ func handleGetBlindedBeaconBlockSSZ(
|
||||
return handleGetSSZ(m, endpoint, w, req, config)
|
||||
}
|
||||
|
||||
func handleSubmitBlockSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
return handlePostSSZ(m, endpoint, w, req)
|
||||
}
|
||||
|
||||
func handleSubmitBlindedBlockSSZ(
|
||||
m *apimiddleware.ApiProxyMiddleware,
|
||||
endpoint apimiddleware.Endpoint,
|
||||
w http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) (handled bool) {
|
||||
return handlePostSSZ(m, endpoint, w, req)
|
||||
}
|
||||
|
||||
func handleProduceBlockSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
config := sszConfig{
|
||||
fileName: "produce_beacon_block.ssz",
|
||||
@@ -111,11 +98,7 @@ func handleGetSSZ(
|
||||
req *http.Request,
|
||||
config sszConfig,
|
||||
) (handled bool) {
|
||||
ssz, err := http2.SszRequested(req)
|
||||
if err != nil {
|
||||
apimiddleware.WriteError(w, apimiddleware.InternalServerError(err), nil)
|
||||
return true
|
||||
}
|
||||
ssz := http2.SszRequested(req)
|
||||
if !ssz {
|
||||
return false
|
||||
}
|
||||
@@ -163,58 +146,6 @@ func handleGetSSZ(
|
||||
return true
|
||||
}
|
||||
|
||||
func handlePostSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
if !sszPosted(req) {
|
||||
return false
|
||||
}
|
||||
|
||||
if errJson := prepareSSZRequestForProxying(m, endpoint, req); errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
prepareCustomHeaders(req)
|
||||
if errJson := preparePostedSSZData(req); errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
|
||||
grpcResponse, errJson := m.ProxyRequest(req)
|
||||
if errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
grpcResponseBody, errJson := apimiddleware.ReadGrpcResponseBody(grpcResponse.Body)
|
||||
if errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
respHasError, errJson := apimiddleware.HandleGrpcResponseError(endpoint.Err, grpcResponse, grpcResponseBody, w)
|
||||
if errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
if respHasError {
|
||||
return true
|
||||
}
|
||||
if errJson := apimiddleware.Cleanup(grpcResponse.Body); errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func sszPosted(req *http.Request) bool {
|
||||
ct, ok := req.Header["Content-Type"]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if len(ct) != 1 {
|
||||
return false
|
||||
}
|
||||
return ct[0] == api.OctetStreamMediaType
|
||||
}
|
||||
|
||||
func prepareSSZRequestForProxying(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, req *http.Request) apimiddleware.ErrorJson {
|
||||
req.URL.Scheme = "http"
|
||||
req.URL.Host = m.GatewayAddress
|
||||
@@ -230,14 +161,6 @@ func prepareSSZRequestForProxying(m *apimiddleware.ApiProxyMiddleware, endpoint
|
||||
return nil
|
||||
}
|
||||
|
||||
func prepareCustomHeaders(req *http.Request) {
|
||||
ver := req.Header.Get(api.VersionHeader)
|
||||
if ver != "" {
|
||||
req.Header.Del(api.VersionHeader)
|
||||
req.Header.Add(grpc.WithPrefix(api.VersionHeader), ver)
|
||||
}
|
||||
}
|
||||
|
||||
func preparePostedSSZData(req *http.Request) apimiddleware.ErrorJson {
|
||||
buf, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
@@ -390,6 +313,8 @@ func receiveEvents(eventChan <-chan *sse.Event, w http.ResponseWriter, req *http
|
||||
default:
|
||||
return apimiddleware.InternalServerError(errors.New("payload version unsupported"))
|
||||
}
|
||||
case events.BlobSidecarTopic:
|
||||
data = &BlobSidecarJson{}
|
||||
case "error":
|
||||
data = &EventErrorJson{}
|
||||
default:
|
||||
|
||||
@@ -19,48 +19,6 @@ import (
|
||||
|
||||
// https://ethereum.github.io/beacon-APIs/?urls.primaryName=dev#/Beacon/submitPoolBLSToExecutionChange
|
||||
// expects posting a top-level array. We make it more proto-friendly by wrapping it in a struct.
|
||||
func wrapBLSChangesArray(
|
||||
endpoint *apimiddleware.Endpoint,
|
||||
_ http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
|
||||
if _, ok := endpoint.PostRequest.(*SubmitBLSToExecutionChangesRequest); !ok {
|
||||
return true, nil
|
||||
}
|
||||
changes := make([]*SignedBLSToExecutionChangeJson, 0)
|
||||
if err := json.NewDecoder(req.Body).Decode(&changes); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
|
||||
}
|
||||
j := &SubmitBLSToExecutionChangesRequest{Changes: changes}
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
|
||||
}
|
||||
req.Body = io.NopCloser(bytes.NewReader(b))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Some endpoints e.g. https://ethereum.github.io/beacon-apis/#/Validator/getAttesterDuties expect posting a top-level array of validator indices.
|
||||
// We make it more proto-friendly by wrapping it in a struct with an 'Index' field.
|
||||
func wrapValidatorIndicesArray(
|
||||
endpoint *apimiddleware.Endpoint,
|
||||
_ http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
|
||||
if _, ok := endpoint.PostRequest.(*ValidatorIndicesJson); ok {
|
||||
indices := make([]string, 0)
|
||||
if err := json.NewDecoder(req.Body).Decode(&indices); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
|
||||
}
|
||||
j := &ValidatorIndicesJson{Index: indices}
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
|
||||
}
|
||||
req.Body = io.NopCloser(bytes.NewReader(b))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
type v1alpha1SignedPhase0Block struct {
|
||||
Block *BeaconBlockJson `json:"block"` // tech debt on phase 0 called this block instead of "message"
|
||||
@@ -321,43 +279,6 @@ func preparePublishedBlindedBlock(endpoint *apimiddleware.Endpoint, _ http.Respo
|
||||
return apimiddleware.InternalServerError(errors.New("unsupported block type"))
|
||||
}
|
||||
|
||||
type tempSyncCommitteesResponseJson struct {
|
||||
Data *tempSyncCommitteeValidatorsJson `json:"data"`
|
||||
}
|
||||
|
||||
type tempSyncCommitteeValidatorsJson struct {
|
||||
Validators []string `json:"validators"`
|
||||
ValidatorAggregates []*tempSyncSubcommitteeValidatorsJson `json:"validator_aggregates"`
|
||||
}
|
||||
|
||||
type tempSyncSubcommitteeValidatorsJson struct {
|
||||
Validators []string `json:"validators"`
|
||||
}
|
||||
|
||||
// https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.0.0#/Beacon/getEpochSyncCommittees returns validator_aggregates as a nested array.
|
||||
// grpc-gateway returns a struct with nested fields which we have to transform into a plain 2D array.
|
||||
func prepareValidatorAggregates(body []byte, responseContainer interface{}) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
|
||||
tempContainer := &tempSyncCommitteesResponseJson{}
|
||||
if err := json.Unmarshal(body, tempContainer); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not unmarshal response into temp container")
|
||||
}
|
||||
container, ok := responseContainer.(*SyncCommitteesResponseJson)
|
||||
if !ok {
|
||||
return false, apimiddleware.InternalServerError(errors.New("container is not of the correct type"))
|
||||
}
|
||||
|
||||
container.Data = &SyncCommitteeValidatorsJson{}
|
||||
container.Data.Validators = tempContainer.Data.Validators
|
||||
container.Data.ValidatorAggregates = make([][]string, len(tempContainer.Data.ValidatorAggregates))
|
||||
for i, srcValAgg := range tempContainer.Data.ValidatorAggregates {
|
||||
dstValAgg := make([]string, len(srcValAgg.Validators))
|
||||
copy(dstValAgg, tempContainer.Data.ValidatorAggregates[i].Validators)
|
||||
container.Data.ValidatorAggregates[i] = dstValAgg
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
type phase0BlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *SignedBeaconBlockJson `json:"data"`
|
||||
|
||||
@@ -3,7 +3,6 @@ package apimiddleware
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"strconv"
|
||||
@@ -19,87 +18,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
func TestWrapValidatorIndicesArray(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
endpoint := &apimiddleware.Endpoint{
|
||||
PostRequest: &ValidatorIndicesJson{},
|
||||
}
|
||||
unwrappedIndices := []string{"1", "2"}
|
||||
unwrappedIndicesJson, err := json.Marshal(unwrappedIndices)
|
||||
require.NoError(t, err)
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err = body.Write(unwrappedIndicesJson)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
|
||||
runDefault, errJson := wrapValidatorIndicesArray(endpoint, nil, request)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
|
||||
wrappedIndices := &ValidatorIndicesJson{}
|
||||
require.NoError(t, json.NewDecoder(request.Body).Decode(wrappedIndices))
|
||||
require.Equal(t, 2, len(wrappedIndices.Index), "wrong number of wrapped items")
|
||||
assert.Equal(t, "1", wrappedIndices.Index[0])
|
||||
assert.Equal(t, "2", wrappedIndices.Index[1])
|
||||
})
|
||||
|
||||
t.Run("invalid_body", func(t *testing.T) {
|
||||
endpoint := &apimiddleware.Endpoint{
|
||||
PostRequest: &ValidatorIndicesJson{},
|
||||
}
|
||||
var body bytes.Buffer
|
||||
_, err := body.Write([]byte("invalid"))
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
|
||||
runDefault, errJson := wrapValidatorIndicesArray(endpoint, nil, request)
|
||||
require.Equal(t, false, errJson == nil)
|
||||
assert.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not decode body"))
|
||||
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
|
||||
})
|
||||
}
|
||||
|
||||
func TestWrapBLSChangesArray(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
endpoint := &apimiddleware.Endpoint{
|
||||
PostRequest: &SubmitBLSToExecutionChangesRequest{},
|
||||
}
|
||||
unwrappedChanges := []*SignedBLSToExecutionChangeJson{{Signature: "sig"}}
|
||||
unwrappedChangesJson, err := json.Marshal(unwrappedChanges)
|
||||
require.NoError(t, err)
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err = body.Write(unwrappedChangesJson)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
|
||||
runDefault, errJson := wrapBLSChangesArray(endpoint, nil, request)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
|
||||
wrappedChanges := &SubmitBLSToExecutionChangesRequest{}
|
||||
require.NoError(t, json.NewDecoder(request.Body).Decode(wrappedChanges))
|
||||
require.Equal(t, 1, len(wrappedChanges.Changes), "wrong number of wrapped items")
|
||||
assert.Equal(t, "sig", wrappedChanges.Changes[0].Signature)
|
||||
})
|
||||
|
||||
t.Run("invalid_body", func(t *testing.T) {
|
||||
endpoint := &apimiddleware.Endpoint{
|
||||
PostRequest: &SubmitBLSToExecutionChangesRequest{},
|
||||
}
|
||||
var body bytes.Buffer
|
||||
_, err := body.Write([]byte("invalid"))
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
|
||||
runDefault, errJson := wrapBLSChangesArray(endpoint, nil, request)
|
||||
require.Equal(t, false, errJson == nil)
|
||||
assert.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not decode body"))
|
||||
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
|
||||
})
|
||||
}
|
||||
|
||||
func TestSetInitialPublishBlockPostRequest(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
@@ -442,31 +360,6 @@ func TestPreparePublishedBlindedBlock(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestPrepareValidatorAggregates(t *testing.T) {
|
||||
body := &tempSyncCommitteesResponseJson{
|
||||
Data: &tempSyncCommitteeValidatorsJson{
|
||||
Validators: []string{"1", "2"},
|
||||
ValidatorAggregates: []*tempSyncSubcommitteeValidatorsJson{
|
||||
{
|
||||
Validators: []string{"3", "4"},
|
||||
},
|
||||
{
|
||||
Validators: []string{"5"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
bodyJson, err := json.Marshal(body)
|
||||
require.NoError(t, err)
|
||||
|
||||
container := &SyncCommitteesResponseJson{}
|
||||
runDefault, errJson := prepareValidatorAggregates(bodyJson, container)
|
||||
require.Equal(t, nil, errJson)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
assert.DeepEqual(t, []string{"1", "2"}, container.Data.Validators)
|
||||
require.DeepEqual(t, [][]string{{"3", "4"}, {"5"}}, container.Data.ValidatorAggregates)
|
||||
}
|
||||
|
||||
func TestSerializeV2Block(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
response := &BlockV2ResponseJson{
|
||||
|
||||
@@ -16,19 +16,6 @@ func (f *BeaconEndpointFactory) IsNil() bool {
|
||||
// Paths is a collection of all valid beacon chain API paths.
|
||||
func (_ *BeaconEndpointFactory) Paths() []string {
|
||||
return []string{
|
||||
"/eth/v1/beacon/genesis",
|
||||
"/eth/v1/beacon/states/{state_id}/root",
|
||||
"/eth/v1/beacon/states/{state_id}/fork",
|
||||
"/eth/v1/beacon/states/{state_id}/finality_checkpoints",
|
||||
"/eth/v1/beacon/states/{state_id}/validators",
|
||||
"/eth/v1/beacon/states/{state_id}/validators/{validator_id}",
|
||||
"/eth/v1/beacon/states/{state_id}/validator_balances",
|
||||
"/eth/v1/beacon/states/{state_id}/committees",
|
||||
"/eth/v1/beacon/states/{state_id}/sync_committees",
|
||||
"/eth/v1/beacon/states/{state_id}/randao",
|
||||
"/eth/v1/beacon/headers",
|
||||
"/eth/v1/beacon/headers/{block_id}",
|
||||
"/eth/v1/beacon/blocks",
|
||||
"/eth/v1/beacon/blinded_blocks",
|
||||
"/eth/v1/beacon/blocks/{block_id}",
|
||||
"/eth/v2/beacon/blocks/{block_id}",
|
||||
@@ -36,31 +23,18 @@ func (_ *BeaconEndpointFactory) Paths() []string {
|
||||
"/eth/v1/beacon/blinded_blocks/{block_id}",
|
||||
"/eth/v1/beacon/pool/attester_slashings",
|
||||
"/eth/v1/beacon/pool/proposer_slashings",
|
||||
"/eth/v1/beacon/pool/bls_to_execution_changes",
|
||||
"/eth/v1/beacon/pool/bls_to_execution_changes",
|
||||
"/eth/v1/beacon/weak_subjectivity",
|
||||
"/eth/v1/node/identity",
|
||||
"/eth/v1/node/peers",
|
||||
"/eth/v1/node/peers/{peer_id}",
|
||||
"/eth/v1/node/peer_count",
|
||||
"/eth/v1/node/version",
|
||||
"/eth/v1/node/health",
|
||||
"/eth/v1/debug/beacon/states/{state_id}",
|
||||
"/eth/v2/debug/beacon/states/{state_id}",
|
||||
"/eth/v1/debug/beacon/heads",
|
||||
"/eth/v2/debug/beacon/heads",
|
||||
"/eth/v1/debug/fork_choice",
|
||||
"/eth/v1/config/fork_schedule",
|
||||
"/eth/v1/config/deposit_contract",
|
||||
"/eth/v1/config/spec",
|
||||
"/eth/v1/events",
|
||||
"/eth/v1/validator/duties/attester/{epoch}",
|
||||
"/eth/v1/validator/duties/proposer/{epoch}",
|
||||
"/eth/v1/validator/duties/sync/{epoch}",
|
||||
"/eth/v1/validator/blocks/{slot}",
|
||||
"/eth/v2/validator/blocks/{slot}",
|
||||
"/eth/v1/validator/blinded_blocks/{slot}",
|
||||
"/eth/v1/validator/liveness/{epoch}",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,51 +42,6 @@ func (_ *BeaconEndpointFactory) Paths() []string {
|
||||
func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, error) {
|
||||
endpoint := apimiddleware.DefaultEndpoint()
|
||||
switch path {
|
||||
case "/eth/v1/beacon/genesis":
|
||||
endpoint.GetResponse = &GenesisResponseJson{}
|
||||
case "/eth/v1/beacon/states/{state_id}/root":
|
||||
endpoint.GetResponse = &StateRootResponseJson{}
|
||||
case "/eth/v1/beacon/states/{state_id}/fork":
|
||||
endpoint.GetResponse = &StateForkResponseJson{}
|
||||
case "/eth/v1/beacon/states/{state_id}/finality_checkpoints":
|
||||
endpoint.GetResponse = &StateFinalityCheckpointResponseJson{}
|
||||
case "/eth/v1/beacon/states/{state_id}/validators":
|
||||
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "id", Hex: true}, {Name: "status", Enum: true}}
|
||||
endpoint.GetResponse = &StateValidatorsResponseJson{}
|
||||
case "/eth/v1/beacon/states/{state_id}/validators/{validator_id}":
|
||||
endpoint.GetResponse = &StateValidatorResponseJson{}
|
||||
case "/eth/v1/beacon/states/{state_id}/validator_balances":
|
||||
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "id", Hex: true}}
|
||||
endpoint.GetResponse = &ValidatorBalancesResponseJson{}
|
||||
case "/eth/v1/beacon/states/{state_id}/committees":
|
||||
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "epoch"}, {Name: "index"}, {Name: "slot"}}
|
||||
endpoint.GetResponse = &StateCommitteesResponseJson{}
|
||||
case "/eth/v1/beacon/states/{state_id}/sync_committees":
|
||||
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "epoch"}}
|
||||
endpoint.GetResponse = &SyncCommitteesResponseJson{}
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreDeserializeGrpcResponseBodyIntoContainer: prepareValidatorAggregates,
|
||||
}
|
||||
case "/eth/v1/beacon/states/{state_id}/randao":
|
||||
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "epoch"}}
|
||||
endpoint.GetResponse = &RandaoResponseJson{}
|
||||
case "/eth/v1/beacon/headers":
|
||||
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "slot"}, {Name: "parent_root", Hex: true}}
|
||||
endpoint.GetResponse = &BlockHeadersResponseJson{}
|
||||
case "/eth/v1/beacon/headers/{block_id}":
|
||||
endpoint.GetResponse = &BlockHeaderResponseJson{}
|
||||
case "/eth/v1/beacon/blocks":
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreDeserializeRequestBodyIntoContainer: setInitialPublishBlockPostRequest,
|
||||
OnPostDeserializeRequestBodyIntoContainer: preparePublishedBlock,
|
||||
}
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleSubmitBlockSSZ}
|
||||
case "/eth/v1/beacon/blinded_blocks":
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreDeserializeRequestBodyIntoContainer: setInitialPublishBlindedBlockPostRequest,
|
||||
OnPostDeserializeRequestBodyIntoContainer: preparePublishedBlindedBlock,
|
||||
}
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleSubmitBlindedBlockSSZ}
|
||||
case "/eth/v1/beacon/blocks/{block_id}":
|
||||
endpoint.GetResponse = &BlockResponseJson{}
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleGetBeaconBlockSSZ}
|
||||
@@ -136,29 +65,8 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
|
||||
case "/eth/v1/beacon/pool/proposer_slashings":
|
||||
endpoint.PostRequest = &ProposerSlashingJson{}
|
||||
endpoint.GetResponse = &ProposerSlashingsPoolResponseJson{}
|
||||
case "/eth/v1/beacon/pool/bls_to_execution_changes":
|
||||
endpoint.PostRequest = &SubmitBLSToExecutionChangesRequest{}
|
||||
endpoint.GetResponse = &BLSToExecutionChangesPoolResponseJson{}
|
||||
endpoint.Err = &IndexedVerificationFailureErrorJson{}
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreDeserializeRequestBodyIntoContainer: wrapBLSChangesArray,
|
||||
}
|
||||
case "/eth/v1/beacon/weak_subjectivity":
|
||||
endpoint.GetResponse = &WeakSubjectivityResponse{}
|
||||
case "/eth/v1/node/identity":
|
||||
endpoint.GetResponse = &IdentityResponseJson{}
|
||||
case "/eth/v1/node/peers":
|
||||
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "state", Enum: true}, {Name: "direction", Enum: true}}
|
||||
endpoint.GetResponse = &PeersResponseJson{}
|
||||
case "/eth/v1/node/peers/{peer_id}":
|
||||
endpoint.RequestURLLiterals = []string{"peer_id"}
|
||||
endpoint.GetResponse = &PeerResponseJson{}
|
||||
case "/eth/v1/node/peer_count":
|
||||
endpoint.GetResponse = &PeerCountResponseJson{}
|
||||
case "/eth/v1/node/version":
|
||||
endpoint.GetResponse = &VersionResponseJson{}
|
||||
case "/eth/v1/node/health":
|
||||
// Use default endpoint
|
||||
case "/eth/v1/debug/beacon/states/{state_id}":
|
||||
endpoint.GetResponse = &BeaconStateResponseJson{}
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleGetBeaconStateSSZ}
|
||||
@@ -179,32 +87,10 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
|
||||
}
|
||||
case "/eth/v1/config/fork_schedule":
|
||||
endpoint.GetResponse = &ForkScheduleResponseJson{}
|
||||
case "/eth/v1/config/deposit_contract":
|
||||
endpoint.GetResponse = &DepositContractResponseJson{}
|
||||
case "/eth/v1/config/spec":
|
||||
endpoint.GetResponse = &SpecResponseJson{}
|
||||
case "/eth/v1/events":
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleEvents}
|
||||
case "/eth/v1/validator/duties/attester/{epoch}":
|
||||
endpoint.PostRequest = &ValidatorIndicesJson{}
|
||||
endpoint.PostResponse = &AttesterDutiesResponseJson{}
|
||||
endpoint.RequestURLLiterals = []string{"epoch"}
|
||||
endpoint.Err = &NodeSyncDetailsErrorJson{}
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreDeserializeRequestBodyIntoContainer: wrapValidatorIndicesArray,
|
||||
}
|
||||
case "/eth/v1/validator/duties/proposer/{epoch}":
|
||||
endpoint.GetResponse = &ProposerDutiesResponseJson{}
|
||||
endpoint.RequestURLLiterals = []string{"epoch"}
|
||||
endpoint.Err = &NodeSyncDetailsErrorJson{}
|
||||
case "/eth/v1/validator/duties/sync/{epoch}":
|
||||
endpoint.PostRequest = &ValidatorIndicesJson{}
|
||||
endpoint.PostResponse = &SyncCommitteeDutiesResponseJson{}
|
||||
endpoint.RequestURLLiterals = []string{"epoch"}
|
||||
endpoint.Err = &NodeSyncDetailsErrorJson{}
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreDeserializeRequestBodyIntoContainer: wrapValidatorIndicesArray,
|
||||
}
|
||||
case "/eth/v1/validator/blocks/{slot}":
|
||||
endpoint.GetResponse = &ProduceBlockResponseJson{}
|
||||
endpoint.RequestURLLiterals = []string{"slot"}
|
||||
@@ -225,14 +111,6 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
|
||||
OnPreSerializeMiddlewareResponseIntoJson: serializeProducedBlindedBlock,
|
||||
}
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleProduceBlindedBlockSSZ}
|
||||
case "/eth/v1/validator/liveness/{epoch}":
|
||||
endpoint.PostRequest = &ValidatorIndicesJson{}
|
||||
endpoint.PostResponse = &LivenessResponseJson{}
|
||||
endpoint.RequestURLLiterals = []string{"epoch"}
|
||||
endpoint.Err = &NodeSyncDetailsErrorJson{}
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreDeserializeRequestBodyIntoContainer: wrapValidatorIndicesArray,
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("invalid path")
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
)
|
||||
|
||||
@@ -12,16 +11,6 @@ import (
|
||||
// Requests and responses.
|
||||
//----------------
|
||||
|
||||
type GenesisResponseJson struct {
|
||||
Data *GenesisResponse_GenesisJson `json:"data"`
|
||||
}
|
||||
|
||||
type GenesisResponse_GenesisJson struct {
|
||||
GenesisTime string `json:"genesis_time" time:"true"`
|
||||
GenesisValidatorsRoot string `json:"genesis_validators_root" hex:"true"`
|
||||
GenesisForkVersion string `json:"genesis_fork_version" hex:"true"`
|
||||
}
|
||||
|
||||
// WeakSubjectivityResponse is used to marshal/unmarshal the response for the
|
||||
// /eth/v1/beacon/weak_subjectivity endpoint.
|
||||
type WeakSubjectivityResponse struct {
|
||||
@@ -31,84 +20,6 @@ type WeakSubjectivityResponse struct {
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type StateRootResponseJson struct {
|
||||
Data *StateRootResponse_StateRootJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type StateRootResponse_StateRootJson struct {
|
||||
StateRoot string `json:"root" hex:"true"`
|
||||
}
|
||||
|
||||
type StateForkResponseJson struct {
|
||||
Data *ForkJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type StateFinalityCheckpointResponseJson struct {
|
||||
Data *StateFinalityCheckpointResponse_StateFinalityCheckpointJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type StateFinalityCheckpointResponse_StateFinalityCheckpointJson struct {
|
||||
PreviousJustified *CheckpointJson `json:"previous_justified"`
|
||||
CurrentJustified *CheckpointJson `json:"current_justified"`
|
||||
Finalized *CheckpointJson `json:"finalized"`
|
||||
}
|
||||
|
||||
type StateValidatorsResponseJson struct {
|
||||
Data []*ValidatorContainerJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type StateValidatorResponseJson struct {
|
||||
Data *ValidatorContainerJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type ValidatorBalancesResponseJson struct {
|
||||
Data []*ValidatorBalanceJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type StateCommitteesResponseJson struct {
|
||||
Data []*CommitteeJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type SyncCommitteesResponseJson struct {
|
||||
Data *SyncCommitteeValidatorsJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type RandaoResponseJson struct {
|
||||
Data *struct {
|
||||
Randao string `json:"randao" hex:"true"`
|
||||
} `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type BlockHeadersResponseJson struct {
|
||||
Data []*BlockHeaderContainerJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type BlockHeaderResponseJson struct {
|
||||
Data *BlockHeaderContainerJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type BlockResponseJson struct {
|
||||
Data *SignedBeaconBlockJson `json:"data"`
|
||||
}
|
||||
@@ -147,41 +58,6 @@ type ProposerSlashingsPoolResponseJson struct {
|
||||
Data []*ProposerSlashingJson `json:"data"`
|
||||
}
|
||||
|
||||
type BLSToExecutionChangesPoolResponseJson struct {
|
||||
Data []*SignedBLSToExecutionChangeJson `json:"data"`
|
||||
}
|
||||
|
||||
type IdentityResponseJson struct {
|
||||
Data *IdentityJson `json:"data"`
|
||||
}
|
||||
|
||||
type PeersResponseJson struct {
|
||||
Data []*PeerJson `json:"data"`
|
||||
}
|
||||
|
||||
type PeerResponseJson struct {
|
||||
Data *PeerJson `json:"data"`
|
||||
}
|
||||
|
||||
type PeerCountResponseJson struct {
|
||||
Data PeerCountResponse_PeerCountJson `json:"data"`
|
||||
}
|
||||
|
||||
type PeerCountResponse_PeerCountJson struct {
|
||||
Disconnected string `json:"disconnected"`
|
||||
Connecting string `json:"connecting"`
|
||||
Connected string `json:"connected"`
|
||||
Disconnecting string `json:"disconnecting"`
|
||||
}
|
||||
|
||||
type VersionResponseJson struct {
|
||||
Data *VersionJson `json:"data"`
|
||||
}
|
||||
|
||||
type SyncingResponseJson struct {
|
||||
Data *shared.SyncDetails `json:"data"`
|
||||
}
|
||||
|
||||
type BeaconStateResponseJson struct {
|
||||
Data *BeaconStateJson `json:"data"`
|
||||
}
|
||||
@@ -213,27 +89,6 @@ type SpecResponseJson struct {
|
||||
Data interface{} `json:"data"`
|
||||
}
|
||||
|
||||
type ValidatorIndicesJson struct {
|
||||
Index []string `json:"index"`
|
||||
}
|
||||
|
||||
type AttesterDutiesResponseJson struct {
|
||||
DependentRoot string `json:"dependent_root" hex:"true"`
|
||||
Data []*AttesterDutyJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
type ProposerDutiesResponseJson struct {
|
||||
DependentRoot string `json:"dependent_root" hex:"true"`
|
||||
Data []*ProposerDutyJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
type SyncCommitteeDutiesResponseJson struct {
|
||||
Data []*SyncCommitteeDuty `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
type ProduceBlockResponseJson struct {
|
||||
Data *BeaconBlockJson `json:"data"`
|
||||
}
|
||||
@@ -300,13 +155,6 @@ type ForkChoiceResponseExtraDataJson struct {
|
||||
HeadRoot string `json:"head_root" hex:"true"`
|
||||
}
|
||||
|
||||
type LivenessResponseJson struct {
|
||||
Data []*struct {
|
||||
Index string `json:"index"`
|
||||
IsLive bool `json:"is_live"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
//----------------
|
||||
// Reusable types.
|
||||
//----------------
|
||||
@@ -740,17 +588,6 @@ type SyncAggregateJson struct {
|
||||
SyncCommitteeSignature string `json:"sync_committee_signature" hex:"true"`
|
||||
}
|
||||
|
||||
type BlockHeaderContainerJson struct {
|
||||
Root string `json:"root" hex:"true"`
|
||||
Canonical bool `json:"canonical"`
|
||||
Header *BeaconBlockHeaderContainerJson `json:"header"`
|
||||
}
|
||||
|
||||
type BeaconBlockHeaderContainerJson struct {
|
||||
Message *BeaconBlockHeaderJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockHeaderJson struct {
|
||||
Header *BeaconBlockHeaderJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
@@ -811,10 +648,6 @@ type BLSToExecutionChangeJson struct {
|
||||
ToExecutionAddress string `json:"to_execution_address" hex:"true"`
|
||||
}
|
||||
|
||||
type SubmitBLSToExecutionChangesRequest struct {
|
||||
Changes []*SignedBLSToExecutionChangeJson `json:"changes"`
|
||||
}
|
||||
|
||||
type DepositJson struct {
|
||||
Proof []string `json:"proof" hex:"true"`
|
||||
Data *Deposit_DataJson `json:"data"`
|
||||
@@ -837,31 +670,6 @@ type VoluntaryExitJson struct {
|
||||
ValidatorIndex string `json:"validator_index"`
|
||||
}
|
||||
|
||||
type IdentityJson struct {
|
||||
PeerId string `json:"peer_id"`
|
||||
Enr string `json:"enr"`
|
||||
P2PAddresses []string `json:"p2p_addresses"`
|
||||
DiscoveryAddresses []string `json:"discovery_addresses"`
|
||||
Metadata *MetadataJson `json:"metadata"`
|
||||
}
|
||||
|
||||
type MetadataJson struct {
|
||||
SeqNumber string `json:"seq_number"`
|
||||
Attnets string `json:"attnets" hex:"true"`
|
||||
}
|
||||
|
||||
type PeerJson struct {
|
||||
PeerId string `json:"peer_id"`
|
||||
Enr string `json:"enr"`
|
||||
Address string `json:"last_seen_p2p_address"`
|
||||
State string `json:"state" enum:"true"`
|
||||
Direction string `json:"direction" enum:"true"`
|
||||
}
|
||||
|
||||
type VersionJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
}
|
||||
|
||||
type WithdrawalJson struct {
|
||||
WithdrawalIndex string `json:"index"`
|
||||
ValidatorIndex string `json:"validator_index"`
|
||||
@@ -1024,13 +832,6 @@ type ForkJson struct {
|
||||
Epoch string `json:"epoch"`
|
||||
}
|
||||
|
||||
type ValidatorContainerJson struct {
|
||||
Index string `json:"index"`
|
||||
Balance string `json:"balance"`
|
||||
Status string `json:"status" enum:"true"`
|
||||
Validator *ValidatorJson `json:"validator"`
|
||||
}
|
||||
|
||||
type ValidatorJson struct {
|
||||
PublicKey string `json:"pubkey" hex:"true"`
|
||||
WithdrawalCredentials string `json:"withdrawal_credentials" hex:"true"`
|
||||
@@ -1042,27 +843,11 @@ type ValidatorJson struct {
|
||||
WithdrawableEpoch string `json:"withdrawable_epoch"`
|
||||
}
|
||||
|
||||
type ValidatorBalanceJson struct {
|
||||
Index string `json:"index"`
|
||||
Balance string `json:"balance"`
|
||||
}
|
||||
|
||||
type CommitteeJson struct {
|
||||
Index string `json:"index"`
|
||||
Slot string `json:"slot"`
|
||||
Validators []string `json:"validators"`
|
||||
}
|
||||
|
||||
type SyncCommitteeJson struct {
|
||||
Pubkeys []string `json:"pubkeys" hex:"true"`
|
||||
AggregatePubkey string `json:"aggregate_pubkey" hex:"true"`
|
||||
}
|
||||
|
||||
type SyncCommitteeValidatorsJson struct {
|
||||
Validators []string `json:"validators"`
|
||||
ValidatorAggregates [][]string `json:"validator_aggregates"`
|
||||
}
|
||||
|
||||
type PendingAttestationJson struct {
|
||||
AggregationBits string `json:"aggregation_bits" hex:"true"`
|
||||
Data *AttestationDataJson `json:"data"`
|
||||
@@ -1086,28 +871,6 @@ type DepositContractJson struct {
|
||||
Address string `json:"address"`
|
||||
}
|
||||
|
||||
type AttesterDutyJson struct {
|
||||
Pubkey string `json:"pubkey" hex:"true"`
|
||||
ValidatorIndex string `json:"validator_index"`
|
||||
CommitteeIndex string `json:"committee_index"`
|
||||
CommitteeLength string `json:"committee_length"`
|
||||
CommitteesAtSlot string `json:"committees_at_slot"`
|
||||
ValidatorCommitteeIndex string `json:"validator_committee_index"`
|
||||
Slot string `json:"slot"`
|
||||
}
|
||||
|
||||
type ProposerDutyJson struct {
|
||||
Pubkey string `json:"pubkey" hex:"true"`
|
||||
ValidatorIndex string `json:"validator_index"`
|
||||
Slot string `json:"slot"`
|
||||
}
|
||||
|
||||
type SyncCommitteeDuty struct {
|
||||
Pubkey string `json:"pubkey" hex:"true"`
|
||||
ValidatorIndex string `json:"validator_index"`
|
||||
ValidatorSyncCommitteeIndices []string `json:"validator_sync_committee_indices"`
|
||||
}
|
||||
|
||||
type SignedAggregateAttestationAndProofJson struct {
|
||||
Message *AggregateAttestationAndProofJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
@@ -1335,11 +1098,6 @@ type SingleIndexedVerificationFailureJson struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type NodeSyncDetailsErrorJson struct {
|
||||
apimiddleware.DefaultErrorJson
|
||||
SyncDetails shared.SyncDetails `json:"sync_details"`
|
||||
}
|
||||
|
||||
type EventErrorJson struct {
|
||||
StatusCode int `json:"status_code"`
|
||||
Message string `json:"message"`
|
||||
|
||||
@@ -459,12 +459,12 @@ func (s *Service) GetAttestationData(
|
||||
|
||||
// SubmitSyncMessage submits the sync committee message to the network.
|
||||
// It also saves the sync committee message into the pending pool for block inclusion.
|
||||
func (s *Service) SubmitSyncMessage(ctx context.Context, msg *ethpb.SyncCommitteeMessage) error {
|
||||
func (s *Service) SubmitSyncMessage(ctx context.Context, msg *ethpb.SyncCommitteeMessage) *RpcError {
|
||||
errs, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
headSyncCommitteeIndices, err := s.HeadFetcher.HeadSyncCommitteeIndices(ctx, msg.ValidatorIndex, msg.Slot)
|
||||
if err != nil {
|
||||
return err
|
||||
return &RpcError{Reason: Internal, Err: errors.Wrap(err, "could not get head sync committee indices")}
|
||||
}
|
||||
// Broadcasting and saving message into the pool in parallel. As one fail should not affect another.
|
||||
// This broadcasts for all subnets.
|
||||
@@ -477,9 +477,12 @@ func (s *Service) SubmitSyncMessage(ctx context.Context, msg *ethpb.SyncCommitte
|
||||
}
|
||||
|
||||
if err := s.SyncCommitteePool.SaveSyncCommitteeMessage(msg); err != nil {
|
||||
return err
|
||||
return &RpcError{Reason: Internal, Err: errors.Wrap(err, "could not save sync committee message")}
|
||||
}
|
||||
|
||||
// Wait for p2p broadcast to complete and return the first error (if any)
|
||||
return errs.Wait()
|
||||
if err = errs.Wait(); err != nil {
|
||||
return &RpcError{Reason: Internal, Err: errors.Wrap(err, "could not broadcast sync committee message")}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -8,19 +8,17 @@ go_library(
|
||||
"config.go",
|
||||
"handlers.go",
|
||||
"handlers_pool.go",
|
||||
"handlers_state.go",
|
||||
"handlers_validator.go",
|
||||
"log.go",
|
||||
"pool.go",
|
||||
"server.go",
|
||||
"state.go",
|
||||
"structs.go",
|
||||
"sync_committee.go",
|
||||
"validator.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/beacon",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
@@ -41,7 +39,6 @@ go_library(
|
||||
"//beacon-chain/rpc/eth/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
@@ -56,7 +53,6 @@ go_library(
|
||||
"//consensus-types/validator:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz/detect:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//network/http:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
@@ -66,18 +62,16 @@ go_library(
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_go_playground_validator_v10//:go_default_library",
|
||||
"@com_github_golang_protobuf//ptypes/empty",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -88,13 +82,12 @@ go_test(
|
||||
"blocks_test.go",
|
||||
"config_test.go",
|
||||
"handlers_pool_test.go",
|
||||
"handlers_state_test.go",
|
||||
"handlers_test.go",
|
||||
"handlers_validators_test.go",
|
||||
"init_test.go",
|
||||
"pool_test.go",
|
||||
"server_test.go",
|
||||
"state_test.go",
|
||||
"sync_committee_test.go",
|
||||
"validator_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
@@ -115,7 +108,6 @@ go_test(
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/rpc/apimiddleware:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/rpc/eth/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared/testing:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
@@ -128,7 +120,6 @@ go_test(
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/validator:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
@@ -141,6 +132,7 @@ go_test(
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/mock:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
@@ -155,9 +147,6 @@ go_test(
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_stretchr_testify//mock:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -2,30 +2,21 @@ package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
rpchelpers "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/validator"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/ssz/detect"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/forks"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
// GetBlindedBlock retrieves blinded block for given block id.
|
||||
@@ -34,7 +25,7 @@ func (bs *Server) GetBlindedBlock(ctx context.Context, req *ethpbv1.BlockRequest
|
||||
defer span.End()
|
||||
|
||||
blk, err := bs.Blocker.Block(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
err = rpchelpers.HandleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -100,7 +91,7 @@ func (bs *Server) GetBlindedBlockSSZ(ctx context.Context, req *ethpbv1.BlockRequ
|
||||
defer span.End()
|
||||
|
||||
blk, err := bs.Blocker.Block(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
err = rpchelpers.HandleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -158,217 +149,6 @@ func (bs *Server) GetBlindedBlockSSZ(ctx context.Context, req *ethpbv1.BlockRequ
|
||||
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock instructs the beacon node to use the components of the `SignedBlindedBeaconBlock` to construct
|
||||
// and publish a `ReadOnlySignedBeaconBlock` by swapping out the `transactions_root` for the corresponding full list of `transactions`.
|
||||
// The beacon node should broadcast a newly constructed `ReadOnlySignedBeaconBlock` to the beacon network,
|
||||
// to be included in the beacon chain. The beacon node is not required to validate the signed
|
||||
// `ReadOnlyBeaconBlock`, and a successful response (20X) only indicates that the broadcast has been
|
||||
// successful. The beacon node is expected to integrate the new block into its state, and
|
||||
// therefore validate the block internally, however blocks which fail the validation are still
|
||||
// broadcast but a different status code is returned (202).
|
||||
func (bs *Server) SubmitBlindedBlock(ctx context.Context, req *ethpbv2.SignedBlindedBeaconBlockContentsContainer) (*emptypb.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlindedBlock")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSyncGRPC(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch blkContainer := req.Message.(type) {
|
||||
case *ethpbv2.SignedBlindedBeaconBlockContentsContainer_DenebContents:
|
||||
if err := bs.submitBlindedDenebContents(ctx, blkContainer.DenebContents); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case *ethpbv2.SignedBlindedBeaconBlockContentsContainer_CapellaBlock:
|
||||
if err := bs.submitBlindedCapellaBlock(ctx, blkContainer.CapellaBlock.Message, blkContainer.CapellaBlock.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case *ethpbv2.SignedBlindedBeaconBlockContentsContainer_BellatrixBlock:
|
||||
if err := bs.submitBlindedBellatrixBlock(ctx, blkContainer.BellatrixBlock.Message, blkContainer.BellatrixBlock.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case *ethpbv2.SignedBlindedBeaconBlockContentsContainer_Phase0Block:
|
||||
if err := bs.submitPhase0Block(ctx, blkContainer.Phase0Block.Block, blkContainer.Phase0Block.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case *ethpbv2.SignedBlindedBeaconBlockContentsContainer_AltairBlock:
|
||||
if err := bs.submitAltairBlock(ctx, blkContainer.AltairBlock.Message, blkContainer.AltairBlock.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Unsupported block container type %T", blkContainer)
|
||||
}
|
||||
|
||||
return &emptypb.Empty{}, nil
|
||||
}
|
||||
|
||||
// SubmitBlindedBlockSSZ instructs the beacon node to use the components of the `SignedBlindedBeaconBlock` to construct
|
||||
// and publish a `ReadOnlySignedBeaconBlock` by swapping out the `transactions_root` for the corresponding full list of `transactions`.
|
||||
// The beacon node should broadcast a newly constructed `ReadOnlySignedBeaconBlock` to the beacon network,
|
||||
// to be included in the beacon chain. The beacon node is not required to validate the signed
|
||||
// `ReadOnlyBeaconBlock`, and a successful response (20X) only indicates that the broadcast has been
|
||||
// successful. The beacon node is expected to integrate the new block into its state, and
|
||||
// therefore validate the block internally, however blocks which fail the validation are still
|
||||
// broadcast but a different status code is returned (202).
|
||||
//
|
||||
// The provided block must be SSZ-serialized.
|
||||
func (bs *Server) SubmitBlindedBlockSSZ(ctx context.Context, req *ethpbv2.SSZContainer) (*emptypb.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlindedBlockSSZ")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSyncGRPC(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
md, ok := metadata.FromIncomingContext(ctx)
|
||||
if !ok {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read"+api.VersionHeader+" header")
|
||||
}
|
||||
ver := md.Get(api.VersionHeader)
|
||||
if len(ver) == 0 {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read"+api.VersionHeader+" header")
|
||||
}
|
||||
schedule := forks.NewOrderedSchedule(params.BeaconConfig())
|
||||
forkVer, err := schedule.VersionForName(ver[0])
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not determine fork version: %v", err)
|
||||
}
|
||||
unmarshaler, err := detect.FromForkVersion(forkVer)
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not create unmarshaler: %v", err)
|
||||
}
|
||||
|
||||
switch forkVer {
|
||||
case bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion):
|
||||
blkContent := ðpbv2.SignedBlindedBeaconBlockContentsDeneb{}
|
||||
if err := blkContent.UnmarshalSSZ(req.Data); err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.InvalidArgument, "Could not unmarshal ssz signed blinded block contents: %v", err)
|
||||
}
|
||||
blindedBlock, err := migration.BlindedDenebToV1Alpha1SignedBlock(blkContent.SignedBlindedBlock)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Could not convert signed blinded block to v1alpha1: %v", err)
|
||||
}
|
||||
block, err := blocks.NewSignedBeaconBlock(blindedBlock)
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not init block: %v", err)
|
||||
}
|
||||
b, err := block.PbBlindedDenebBlock()
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not get proto block: %v", err)
|
||||
}
|
||||
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_BlindedDeneb{
|
||||
BlindedDeneb: ð.SignedBlindedBeaconBlockAndBlobsDeneb{
|
||||
Block: b,
|
||||
Blobs: migration.SignedBlindedBlobsToV1Alpha1SignedBlindedBlobs(blkContent.SignedBlindedBlobSidecars),
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
|
||||
return &emptypb.Empty{}, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not propose block: %v", err)
|
||||
}
|
||||
return &emptypb.Empty{}, nil
|
||||
|
||||
case bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion):
|
||||
block, err := unmarshaler.UnmarshalBlindedBeaconBlock(req.Data)
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not unmarshal request data into block: %v", err)
|
||||
}
|
||||
if !block.IsBlinded() {
|
||||
return nil, status.Error(codes.InvalidArgument, "Submitted block is not blinded")
|
||||
}
|
||||
b, err := block.PbBlindedCapellaBlock()
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not get proto block: %v", err)
|
||||
}
|
||||
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_BlindedCapella{
|
||||
BlindedCapella: b,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
|
||||
return &emptypb.Empty{}, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not propose block: %v", err)
|
||||
}
|
||||
return &emptypb.Empty{}, nil
|
||||
case bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion):
|
||||
block, err := unmarshaler.UnmarshalBlindedBeaconBlock(req.Data)
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not unmarshal request data into block: %v", err)
|
||||
}
|
||||
if !block.IsBlinded() {
|
||||
return nil, status.Error(codes.InvalidArgument, "Submitted block is not blinded")
|
||||
}
|
||||
b, err := block.PbBlindedBellatrixBlock()
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not get proto block: %v", err)
|
||||
}
|
||||
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_BlindedBellatrix{
|
||||
BlindedBellatrix: b,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
|
||||
return &emptypb.Empty{}, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not propose block: %v", err)
|
||||
}
|
||||
return &emptypb.Empty{}, nil
|
||||
case bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion):
|
||||
block, err := unmarshaler.UnmarshalBlindedBeaconBlock(req.Data)
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not unmarshal request data into block: %v", err)
|
||||
}
|
||||
b, err := block.PbAltairBlock()
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not get proto block: %v", err)
|
||||
}
|
||||
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Altair{
|
||||
Altair: b,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
|
||||
return &emptypb.Empty{}, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not propose block: %v", err)
|
||||
}
|
||||
return &emptypb.Empty{}, nil
|
||||
case bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion):
|
||||
block, err := unmarshaler.UnmarshalBlindedBeaconBlock(req.Data)
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not unmarshal request data into block: %v", err)
|
||||
}
|
||||
b, err := block.PbPhase0Block()
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not get proto block: %v", err)
|
||||
}
|
||||
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Phase0{
|
||||
Phase0: b,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
|
||||
return &emptypb.Empty{}, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not propose block: %v", err)
|
||||
}
|
||||
return &emptypb.Empty{}, nil
|
||||
default:
|
||||
return &emptypb.Empty{}, status.Errorf(codes.InvalidArgument, "Unsupported fork %s", string(forkVer[:]))
|
||||
}
|
||||
}
|
||||
|
||||
func getBlindedBlockPhase0(blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlindedBlockResponse, error) {
|
||||
phase0Blk, err := blk.PbPhase0Block()
|
||||
if err != nil {
|
||||
@@ -563,7 +343,7 @@ func (bs *Server) getBlindedBlockDeneb(ctx context.Context, blk interfaces.ReadO
|
||||
if blindedDenebBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedDenebToV2Blinded(blindedDenebBlk.Block)
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedDenebToV2Blinded(blindedDenebBlk.Message)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not convert beacon block")
|
||||
}
|
||||
@@ -601,7 +381,7 @@ func (bs *Server) getBlindedBlockDeneb(ctx context.Context, blk interfaces.ReadO
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedDenebToV2Blinded(blindedDenebBlock.Block)
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedDenebToV2Blinded(blindedDenebBlock.Message)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert beacon block")
|
||||
}
|
||||
@@ -783,7 +563,7 @@ func (bs *Server) getBlindedSSZBlockDeneb(ctx context.Context, blk interfaces.Re
|
||||
if blindedDenebBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedDenebToV2Blinded(blindedDenebBlk.Block)
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedDenebToV2Blinded(blindedDenebBlk.Message)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
@@ -825,7 +605,7 @@ func (bs *Server) getBlindedSSZBlockDeneb(ctx context.Context, blk interfaces.Re
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedDenebToV2Blinded(blindedDenebBlock.Block)
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedDenebToV2Blinded(blindedDenebBlock.Message)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
@@ -848,73 +628,3 @@ func (bs *Server) getBlindedSSZBlockDeneb(ctx context.Context, blk interfaces.Re
|
||||
}
|
||||
return ðpbv2.SSZContainer{Version: ethpbv2.Version_DENEB, ExecutionOptimistic: isOptimistic, Data: sszData}, nil
|
||||
}
|
||||
|
||||
func (bs *Server) submitBlindedBellatrixBlock(ctx context.Context, blindedBellatrixBlk *ethpbv2.BlindedBeaconBlockBellatrix, sig []byte) error {
|
||||
b, err := migration.BlindedBellatrixToV1Alpha1SignedBlock(ðpbv2.SignedBlindedBeaconBlockBellatrix{
|
||||
Message: blindedBellatrixBlk,
|
||||
Signature: sig,
|
||||
})
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not convert block: %v", err)
|
||||
}
|
||||
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_BlindedBellatrix{
|
||||
BlindedBellatrix: b,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
|
||||
return status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
return status.Errorf(codes.Internal, "Could not propose blinded block: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bs *Server) submitBlindedCapellaBlock(ctx context.Context, blindedCapellaBlk *ethpbv2.BlindedBeaconBlockCapella, sig []byte) error {
|
||||
b, err := migration.BlindedCapellaToV1Alpha1SignedBlock(ðpbv2.SignedBlindedBeaconBlockCapella{
|
||||
Message: blindedCapellaBlk,
|
||||
Signature: sig,
|
||||
})
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not convert block: %v", err)
|
||||
}
|
||||
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_BlindedCapella{
|
||||
BlindedCapella: b,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
|
||||
return status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
return status.Errorf(codes.Internal, "Could not propose blinded block: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bs *Server) submitBlindedDenebContents(ctx context.Context, blindedDenebContents *ethpbv2.SignedBlindedBeaconBlockContentsDeneb) error {
|
||||
blk, err := migration.BlindedDenebToV1Alpha1SignedBlock(ðpbv2.SignedBlindedBeaconBlockDeneb{
|
||||
Message: blindedDenebContents.SignedBlindedBlock.Message,
|
||||
Signature: blindedDenebContents.SignedBlindedBlock.Signature,
|
||||
})
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
|
||||
}
|
||||
blobs := migration.SignedBlindedBlobsToV1Alpha1SignedBlindedBlobs(blindedDenebContents.SignedBlindedBlobSidecars)
|
||||
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_BlindedDeneb{
|
||||
BlindedDeneb: ð.SignedBlindedBeaconBlockAndBlobsDeneb{
|
||||
Block: blk,
|
||||
Blobs: blobs,
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
|
||||
return status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
return status.Errorf(codes.Internal, "Could not propose blinded block: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -4,24 +4,17 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
mock2 "github.com/prysmaticlabs/prysm/v4/testing/mock"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func TestServer_GetBlindedBlock(t *testing.T) {
|
||||
@@ -120,7 +113,7 @@ func TestServer_GetBlindedBlock(t *testing.T) {
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
expected, err := migration.V1Alpha1BeaconBlockBlindedDenebToV2Blinded(b.Block)
|
||||
expected, err := migration.V1Alpha1BeaconBlockBlindedDenebToV2Blinded(b.Message)
|
||||
require.NoError(t, err)
|
||||
resp, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
@@ -347,273 +340,3 @@ func TestServer_GetBlindedBlockSSZ(t *testing.T) {
|
||||
assert.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_SubmitBlindedBlockSSZ(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
ssz, err := b.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(api.VersionHeader, "phase0")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
b := util.NewBeaconBlockAltair()
|
||||
b.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().AltairForkEpoch))
|
||||
ssz, err := b.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(api.VersionHeader, "altair")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
b := util.NewBlindedBeaconBlockBellatrix()
|
||||
b.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().BellatrixForkEpoch))
|
||||
ssz, err := b.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(api.VersionHeader, "bellatrix")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("Bellatrix full", func(t *testing.T) {
|
||||
server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
b := util.NewBeaconBlockBellatrix()
|
||||
b.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().BellatrixForkEpoch))
|
||||
ssz, err := b.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(api.VersionHeader, "bellatrix")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
b := util.NewBlindedBeaconBlockCapella()
|
||||
b.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().CapellaForkEpoch))
|
||||
ssz, err := b.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(api.VersionHeader, "capella")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("Capella full", func(t *testing.T) {
|
||||
server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
b := util.NewBeaconBlockCapella()
|
||||
b.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().CapellaForkEpoch))
|
||||
ssz, err := b.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(api.VersionHeader, "capella")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
b, err := util.NewBlindedBeaconBlockContentsDeneb(fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, err)
|
||||
// TODO: replace when deneb fork epoch is known
|
||||
b.SignedBlindedBlock.Message.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().CapellaForkEpoch))
|
||||
ssz, err := b.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(api.VersionHeader, "deneb")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("Deneb full", func(t *testing.T) {
|
||||
server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
b, err := util.NewBeaconBlockContentsDeneb(fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, err)
|
||||
// TODO: replace when deneb fork epoch is known
|
||||
b.SignedBlock.Message.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().CapellaForkEpoch))
|
||||
ssz, err := b.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(api.VersionHeader, "deneb")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
t.Run("sync not ready", func(t *testing.T) {
|
||||
chainService := &mock.ChainService{}
|
||||
v1Server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
_, err := v1Server.SubmitBlindedBlockSSZ(context.Background(), nil)
|
||||
require.ErrorContains(t, "Syncing to latest head", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSubmitBlindedBlock(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
blockReq := ðpbv2.SignedBlindedBeaconBlockContentsContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContentsContainer_Phase0Block{Phase0Block: ðpbv1.SignedBeaconBlock{}},
|
||||
}
|
||||
_, err := server.SubmitBlindedBlock(context.Background(), blockReq)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
blockReq := ðpbv2.SignedBlindedBeaconBlockContentsContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContentsContainer_AltairBlock{AltairBlock: ðpbv2.SignedBeaconBlockAltair{}},
|
||||
}
|
||||
_, err := server.SubmitBlindedBlock(context.Background(), blockReq)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
blockReq := ðpbv2.SignedBlindedBeaconBlockContentsContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContentsContainer_BellatrixBlock{BellatrixBlock: ðpbv2.SignedBlindedBeaconBlockBellatrix{}},
|
||||
}
|
||||
_, err := server.SubmitBlindedBlock(context.Background(), blockReq)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
blockReq := ðpbv2.SignedBlindedBeaconBlockContentsContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContentsContainer_CapellaBlock{CapellaBlock: ðpbv2.SignedBlindedBeaconBlockCapella{}},
|
||||
}
|
||||
_, err := server.SubmitBlindedBlock(context.Background(), blockReq)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
blockReq := ðpbv2.SignedBlindedBeaconBlockContentsContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContentsContainer_DenebContents{
|
||||
DenebContents: ðpbv2.SignedBlindedBeaconBlockContentsDeneb{
|
||||
SignedBlindedBlock: ðpbv2.SignedBlindedBeaconBlockDeneb{},
|
||||
SignedBlindedBlobSidecars: []*ethpbv2.SignedBlindedBlobSidecar{},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := server.SubmitBlindedBlock(context.Background(), blockReq)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("sync not ready", func(t *testing.T) {
|
||||
chainService := &mock.ChainService{}
|
||||
v1Server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
_, err := v1Server.SubmitBlindedBlock(context.Background(), nil)
|
||||
require.ErrorContains(t, "Syncing to latest head", err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -3,27 +3,18 @@ package beacon
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filters"
|
||||
rpchelpers "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/validator"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/ssz/detect"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/forks"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -31,7 +22,6 @@ import (
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -80,326 +70,6 @@ func (bs *Server) GetWeakSubjectivity(ctx context.Context, _ *empty.Empty) (*eth
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetBlockHeader retrieves block header for given block id.
|
||||
func (bs *Server) GetBlockHeader(ctx context.Context, req *ethpbv1.BlockRequest) (*ethpbv1.BlockHeaderResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetBlockHeader")
|
||||
defer span.End()
|
||||
|
||||
blk, err := bs.Blocker.Block(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v1alpha1Header, err := blk.Header()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get block header from block: %v", err)
|
||||
}
|
||||
header := migration.V1Alpha1SignedHeaderToV1(v1alpha1Header)
|
||||
headerRoot, err := header.Message.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not hash block header: %v", err)
|
||||
}
|
||||
blkRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not hash block: %v", err)
|
||||
}
|
||||
canonical, err := bs.ChainInfoFetcher.IsCanonical(ctx, blkRoot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine if block root is canonical: %v", err)
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, blkRoot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
|
||||
}
|
||||
|
||||
return ðpbv1.BlockHeaderResponse{
|
||||
Data: ðpbv1.BlockHeaderContainer{
|
||||
Root: headerRoot[:],
|
||||
Canonical: canonical,
|
||||
Header: ðpbv1.BeaconBlockHeaderContainer{
|
||||
Message: header.Message,
|
||||
Signature: header.Signature,
|
||||
},
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Finalized: bs.FinalizationFetcher.IsFinalized(ctx, blkRoot),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ListBlockHeaders retrieves block headers matching given query. By default it will fetch current head slot blocks.
|
||||
func (bs *Server) ListBlockHeaders(ctx context.Context, req *ethpbv1.BlockHeadersRequest) (*ethpbv1.BlockHeadersResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.ListBlockHeaders")
|
||||
defer span.End()
|
||||
|
||||
var err error
|
||||
var blks []interfaces.ReadOnlySignedBeaconBlock
|
||||
var blkRoots [][32]byte
|
||||
if len(req.ParentRoot) == 32 {
|
||||
blks, blkRoots, err = bs.BeaconDB.Blocks(ctx, filters.NewFilter().SetParentRoot(req.ParentRoot))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not retrieve blocks: %v", err)
|
||||
}
|
||||
} else {
|
||||
slot := bs.ChainInfoFetcher.HeadSlot()
|
||||
if req.Slot != nil {
|
||||
slot = *req.Slot
|
||||
}
|
||||
blks, err = bs.BeaconDB.BlocksBySlot(ctx, slot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not retrieve blocks for slot %d: %v", req.Slot, err)
|
||||
}
|
||||
_, blkRoots, err = bs.BeaconDB.BlockRootsBySlot(ctx, slot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not retrieve block roots for slot %d: %v", req.Slot, err)
|
||||
}
|
||||
}
|
||||
if len(blks) == 0 {
|
||||
return nil, status.Error(codes.NotFound, "Could not find requested blocks")
|
||||
}
|
||||
|
||||
isOptimistic := false
|
||||
isFinalized := true
|
||||
blkHdrs := make([]*ethpbv1.BlockHeaderContainer, len(blks))
|
||||
for i, bl := range blks {
|
||||
v1alpha1Header, err := bl.Header()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get block header from block: %v", err)
|
||||
}
|
||||
header := migration.V1Alpha1SignedHeaderToV1(v1alpha1Header)
|
||||
headerRoot, err := header.Message.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not hash block header: %v", err)
|
||||
}
|
||||
canonical, err := bs.ChainInfoFetcher.IsCanonical(ctx, blkRoots[i])
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine if block root is canonical: %v", err)
|
||||
}
|
||||
if !isOptimistic {
|
||||
isOptimistic, err = bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, blkRoots[i])
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
|
||||
}
|
||||
}
|
||||
if isFinalized {
|
||||
isFinalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoots[i])
|
||||
}
|
||||
blkHdrs[i] = ðpbv1.BlockHeaderContainer{
|
||||
Root: headerRoot[:],
|
||||
Canonical: canonical,
|
||||
Header: ðpbv1.BeaconBlockHeaderContainer{
|
||||
Message: header.Message,
|
||||
Signature: header.Signature,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return ðpbv1.BlockHeadersResponse{Data: blkHdrs, ExecutionOptimistic: isOptimistic, Finalized: isFinalized}, nil
|
||||
}
|
||||
|
||||
// SubmitBlock instructs the beacon node to broadcast a newly signed beacon block to the beacon network, to be
|
||||
// included in the beacon chain. The beacon node is not required to validate the signed ReadOnlyBeaconBlock, and a successful
|
||||
// response (20X) only indicates that the broadcast has been successful. The beacon node is expected to integrate the
|
||||
// new block into its state, and therefore validate the block internally, however blocks which fail the validation are
|
||||
// still broadcast but a different status code is returned (202).
|
||||
func (bs *Server) SubmitBlock(ctx context.Context, req *ethpbv2.SignedBeaconBlockContentsContainer) (*emptypb.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlock")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSyncGRPC(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch blkContainer := req.Message.(type) {
|
||||
case *ethpbv2.SignedBeaconBlockContentsContainer_Phase0Block:
|
||||
if err := bs.submitPhase0Block(ctx, blkContainer.Phase0Block.Block, blkContainer.Phase0Block.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case *ethpbv2.SignedBeaconBlockContentsContainer_AltairBlock:
|
||||
if err := bs.submitAltairBlock(ctx, blkContainer.AltairBlock.Message, blkContainer.AltairBlock.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case *ethpbv2.SignedBeaconBlockContentsContainer_BellatrixBlock:
|
||||
if err := bs.submitBellatrixBlock(ctx, blkContainer.BellatrixBlock.Message, blkContainer.BellatrixBlock.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case *ethpbv2.SignedBeaconBlockContentsContainer_CapellaBlock:
|
||||
if err := bs.submitCapellaBlock(ctx, blkContainer.CapellaBlock.Message, blkContainer.CapellaBlock.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case *ethpbv2.SignedBeaconBlockContentsContainer_DenebContents:
|
||||
if err := bs.submitDenebContents(ctx, blkContainer.DenebContents); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Unsupported block container type %T", blkContainer)
|
||||
}
|
||||
|
||||
return &emptypb.Empty{}, nil
|
||||
}
|
||||
|
||||
// SubmitBlockSSZ instructs the beacon node to broadcast a newly signed beacon block to the beacon network, to be
|
||||
// included in the beacon chain. The beacon node is not required to validate the signed ReadOnlyBeaconBlock, and a successful
|
||||
// response (20X) only indicates that the broadcast has been successful. The beacon node is expected to integrate the
|
||||
// new block into its state, and therefore validate the block internally, however blocks which fail the validation are
|
||||
// still broadcast but a different status code is returned (202).
|
||||
//
|
||||
// The provided block must be SSZ-serialized.
|
||||
func (bs *Server) SubmitBlockSSZ(ctx context.Context, req *ethpbv2.SSZContainer) (*emptypb.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlockSSZ")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSyncGRPC(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
md, ok := metadata.FromIncomingContext(ctx)
|
||||
if !ok {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read "+api.VersionHeader+" header")
|
||||
}
|
||||
ver := md.Get(api.VersionHeader)
|
||||
if len(ver) == 0 {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read "+api.VersionHeader+" header")
|
||||
}
|
||||
schedule := forks.NewOrderedSchedule(params.BeaconConfig())
|
||||
forkVer, err := schedule.VersionForName(ver[0])
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not determine fork version: %v", err)
|
||||
}
|
||||
unmarshaler, err := detect.FromForkVersion(forkVer)
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not create unmarshaler: %v", err)
|
||||
}
|
||||
|
||||
switch forkVer {
|
||||
case bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion):
|
||||
blkContent := ðpbv2.SignedBeaconBlockContentsDeneb{}
|
||||
if err := blkContent.UnmarshalSSZ(req.Data); err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not unmarshal ssz block contents: %v", err)
|
||||
}
|
||||
v1block, err := migration.DenebToV1Alpha1SignedBlock(blkContent.SignedBlock)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Submitted block is not valid: %v", err)
|
||||
}
|
||||
block, err := blocks.NewSignedBeaconBlock(v1block)
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not init block: %v", err)
|
||||
}
|
||||
b, err := block.PbDenebBlock()
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not get proto block: %v", err)
|
||||
}
|
||||
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Deneb{
|
||||
Deneb: ð.SignedBeaconBlockAndBlobsDeneb{
|
||||
Block: b,
|
||||
Blobs: migration.SignedBlobsToV1Alpha1SignedBlobs(blkContent.SignedBlobSidecars),
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
|
||||
return &emptypb.Empty{}, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not propose block: %v", err)
|
||||
}
|
||||
return &emptypb.Empty{}, nil
|
||||
case bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion):
|
||||
block, err := unmarshaler.UnmarshalBeaconBlock(req.Data)
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not unmarshal request data into block: %v", err)
|
||||
}
|
||||
if block.IsBlinded() {
|
||||
return nil, status.Error(codes.InvalidArgument, "Submitted block is blinded")
|
||||
}
|
||||
b, err := block.PbCapellaBlock()
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not get proto block: %v", err)
|
||||
}
|
||||
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Capella{
|
||||
Capella: b,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
|
||||
return &emptypb.Empty{}, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not propose block: %v", err)
|
||||
}
|
||||
return &emptypb.Empty{}, nil
|
||||
case bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion):
|
||||
block, err := unmarshaler.UnmarshalBeaconBlock(req.Data)
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not unmarshal request data into block: %v", err)
|
||||
}
|
||||
if block.IsBlinded() {
|
||||
return nil, status.Error(codes.InvalidArgument, "Submitted block is blinded")
|
||||
}
|
||||
b, err := block.PbBellatrixBlock()
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not get proto block: %v", err)
|
||||
}
|
||||
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Bellatrix{
|
||||
Bellatrix: b,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
|
||||
return &emptypb.Empty{}, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not propose block: %v", err)
|
||||
}
|
||||
return &emptypb.Empty{}, nil
|
||||
case bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion):
|
||||
block, err := unmarshaler.UnmarshalBeaconBlock(req.Data)
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not unmarshal request data into block: %v", err)
|
||||
}
|
||||
b, err := block.PbAltairBlock()
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not get proto block: %v", err)
|
||||
}
|
||||
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Altair{
|
||||
Altair: b,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
|
||||
return &emptypb.Empty{}, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not propose block: %v", err)
|
||||
}
|
||||
return &emptypb.Empty{}, nil
|
||||
case bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion):
|
||||
block, err := unmarshaler.UnmarshalBeaconBlock(req.Data)
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not unmarshal request data into block: %v", err)
|
||||
}
|
||||
b, err := block.PbPhase0Block()
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not get proto block: %v", err)
|
||||
}
|
||||
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Phase0{
|
||||
Phase0: b,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
|
||||
return &emptypb.Empty{}, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not propose block: %v", err)
|
||||
}
|
||||
return &emptypb.Empty{}, nil
|
||||
default:
|
||||
return &emptypb.Empty{}, status.Errorf(codes.InvalidArgument, "Unsupported fork %s", string(forkVer[:]))
|
||||
}
|
||||
}
|
||||
|
||||
// GetBlock retrieves block details for given block ID.
|
||||
// DEPRECATED: please use GetBlockV2 instead
|
||||
func (bs *Server) GetBlock(ctx context.Context, req *ethpbv1.BlockRequest) (*ethpbv1.BlockResponse, error) {
|
||||
@@ -407,7 +77,7 @@ func (bs *Server) GetBlock(ctx context.Context, req *ethpbv1.BlockRequest) (*eth
|
||||
defer span.End()
|
||||
|
||||
blk, err := bs.Blocker.Block(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
err = rpchelpers.HandleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -431,7 +101,7 @@ func (bs *Server) GetBlockSSZ(ctx context.Context, req *ethpbv1.BlockRequest) (*
|
||||
defer span.End()
|
||||
|
||||
blk, err := bs.Blocker.Block(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
err = rpchelpers.HandleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -453,7 +123,7 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
|
||||
defer span.End()
|
||||
|
||||
blk, err := bs.Blocker.Block(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
err = rpchelpers.HandleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -521,7 +191,7 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
|
||||
defer span.End()
|
||||
|
||||
blk, err := bs.Blocker.Block(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
err = rpchelpers.HandleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -586,7 +256,7 @@ func (bs *Server) ListBlockAttestations(ctx context.Context, req *ethpbv1.BlockR
|
||||
defer span.End()
|
||||
|
||||
blk, err := bs.Blocker.Block(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
err = rpchelpers.HandleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -612,19 +282,6 @@ func (bs *Server) ListBlockAttestations(ctx context.Context, req *ethpbv1.BlockR
|
||||
}, nil
|
||||
}
|
||||
|
||||
func handleGetBlockError(blk interfaces.ReadOnlySignedBeaconBlock, err error) error {
|
||||
if invalidBlockIdErr, ok := err.(*lookup.BlockIdParseError); ok {
|
||||
return status.Errorf(codes.InvalidArgument, "Invalid block ID: %v", invalidBlockIdErr)
|
||||
}
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not get block from block ID: %v", err)
|
||||
}
|
||||
if err := blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
return status.Errorf(codes.NotFound, "Could not find requested block: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getBlockPhase0(blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlockResponseV2, error) {
|
||||
phase0Blk, err := blk.PbPhase0Block()
|
||||
if err != nil {
|
||||
@@ -1150,105 +807,3 @@ func (bs *Server) getSSZBlockDeneb(ctx context.Context, blk interfaces.ReadOnlyS
|
||||
}
|
||||
return ðpbv2.SSZContainer{Version: ethpbv2.Version_DENEB, ExecutionOptimistic: isOptimistic, Data: sszData}, nil
|
||||
}
|
||||
|
||||
func (bs *Server) submitPhase0Block(ctx context.Context, phase0Blk *ethpbv1.BeaconBlock, sig []byte) error {
|
||||
v1alpha1Blk, err := migration.V1ToV1Alpha1SignedBlock(ðpbv1.SignedBeaconBlock{Block: phase0Blk, Signature: sig})
|
||||
if err != nil {
|
||||
return status.Errorf(codes.InvalidArgument, "Could not convert block: %v", err)
|
||||
}
|
||||
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Phase0{
|
||||
Phase0: v1alpha1Blk,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
|
||||
return status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
return status.Errorf(codes.Internal, "Could not propose block: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bs *Server) submitAltairBlock(ctx context.Context, altairBlk *ethpbv2.BeaconBlockAltair, sig []byte) error {
|
||||
v1alpha1Blk, err := migration.AltairToV1Alpha1SignedBlock(ðpbv2.SignedBeaconBlockAltair{Message: altairBlk, Signature: sig})
|
||||
if err != nil {
|
||||
return status.Errorf(codes.InvalidArgument, "Could not convert block %v", err)
|
||||
}
|
||||
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Altair{
|
||||
Altair: v1alpha1Blk,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
|
||||
return status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
return status.Errorf(codes.Internal, "Could not propose block: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bs *Server) submitBellatrixBlock(ctx context.Context, bellatrixBlk *ethpbv2.BeaconBlockBellatrix, sig []byte) error {
|
||||
v1alpha1Blk, err := migration.BellatrixToV1Alpha1SignedBlock(ðpbv2.SignedBeaconBlockBellatrix{Message: bellatrixBlk, Signature: sig})
|
||||
if err != nil {
|
||||
return status.Errorf(codes.InvalidArgument, "Could not convert block to v1 block")
|
||||
}
|
||||
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Bellatrix{
|
||||
Bellatrix: v1alpha1Blk,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
|
||||
return status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
return status.Errorf(codes.Internal, "Could not propose block: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bs *Server) submitCapellaBlock(ctx context.Context, capellaBlk *ethpbv2.BeaconBlockCapella, sig []byte) error {
|
||||
v1alpha1Blk, err := migration.CapellaToV1Alpha1SignedBlock(ðpbv2.SignedBeaconBlockCapella{Message: capellaBlk, Signature: sig})
|
||||
if err != nil {
|
||||
return status.Errorf(codes.InvalidArgument, "Could not convert block to v1 block")
|
||||
}
|
||||
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Capella{
|
||||
Capella: v1alpha1Blk,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
|
||||
return status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
return status.Errorf(codes.Internal, "Could not propose block: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bs *Server) submitDenebContents(ctx context.Context, denebContents *ethpbv2.SignedBeaconBlockContentsDeneb) error {
|
||||
blk, err := migration.DenebToV1Alpha1SignedBlock(ðpbv2.SignedBeaconBlockDeneb{
|
||||
Message: denebContents.SignedBlock.Message,
|
||||
Signature: denebContents.SignedBlock.Signature,
|
||||
})
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not get block: %v", err)
|
||||
}
|
||||
blobs := migration.SignedBlobsToV1Alpha1SignedBlobs(denebContents.SignedBlobSidecars)
|
||||
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Deneb{
|
||||
Deneb: ð.SignedBeaconBlockAndBlobsDeneb{
|
||||
Block: blk,
|
||||
Blobs: blobs,
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
|
||||
return status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
return status.Errorf(codes.Internal, "Could not propose block: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -4,17 +4,11 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
dbTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -24,11 +18,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
ethpbalpha "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
mock2 "github.com/prysmaticlabs/prysm/v4/testing/mock"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func fillDBTestBlocks(ctx context.Context, t *testing.T, beaconDB db.Database) (*ethpbalpha.SignedBeaconBlock, []*ethpbalpha.BeaconBlockContainer) {
|
||||
@@ -67,562 +59,6 @@ func fillDBTestBlocks(ctx context.Context, t *testing.T, beaconDB db.Database) (
|
||||
return genBlk, blkContainers
|
||||
}
|
||||
|
||||
func TestServer_GetBlockHeader(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 123
|
||||
b.Block.ProposerIndex = 123
|
||||
b.Block.StateRoot = bytesutil.PadTo([]byte("stateroot"), 32)
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
mockBlockFetcher := &testutil.MockBlocker{BlockToReturn: sb}
|
||||
|
||||
t.Run("get header", func(t *testing.T) {
|
||||
mockChainService := &mock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
bs := &Server{
|
||||
ChainInfoFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: mockBlockFetcher,
|
||||
}
|
||||
|
||||
header, err := bs.GetBlockHeader(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedBodyRoot, err := sb.Block().Body().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
expectedParentRoot := sb.Block().ParentRoot()
|
||||
expectedHeader := ðpbv1.BeaconBlockHeader{
|
||||
Slot: sb.Block().Slot(),
|
||||
ProposerIndex: sb.Block().ProposerIndex(),
|
||||
ParentRoot: expectedParentRoot[:],
|
||||
StateRoot: bytesutil.PadTo([]byte("stateroot"), 32),
|
||||
BodyRoot: expectedBodyRoot[:],
|
||||
}
|
||||
expectedHeaderRoot, err := expectedHeader.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
headerRoot, err := header.Data.Header.Message.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedHeaderRoot, headerRoot)
|
||||
assert.Equal(t, sb.Block().Slot(), header.Data.Header.Message.Slot)
|
||||
expectedStateRoot := sb.Block().StateRoot()
|
||||
assert.DeepEqual(t, expectedStateRoot[:], header.Data.Header.Message.StateRoot)
|
||||
assert.DeepEqual(t, expectedParentRoot[:], header.Data.Header.Message.ParentRoot)
|
||||
assert.DeepEqual(t, expectedBodyRoot[:], header.Data.Header.Message.BodyRoot)
|
||||
assert.Equal(t, sb.Block().ProposerIndex(), header.Data.Header.Message.ProposerIndex)
|
||||
})
|
||||
|
||||
t.Run("execution optimistic", func(t *testing.T) {
|
||||
r, err := sb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
mockChainService := &mock.ChainService{
|
||||
OptimisticRoots: map[[32]byte]bool{r: true},
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
bs := &Server{
|
||||
ChainInfoFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: mockBlockFetcher,
|
||||
}
|
||||
header, err := bs.GetBlockHeader(ctx, ðpbv1.BlockRequest{BlockId: []byte("head")})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, header.ExecutionOptimistic)
|
||||
})
|
||||
|
||||
t.Run("finalized", func(t *testing.T) {
|
||||
r, err := sb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("true", func(t *testing.T) {
|
||||
mockChainService := &mock.ChainService{FinalizedRoots: map[[32]byte]bool{r: true}}
|
||||
bs := &Server{
|
||||
ChainInfoFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: mockBlockFetcher,
|
||||
}
|
||||
|
||||
header, err := bs.GetBlockHeader(ctx, ðpbv1.BlockRequest{BlockId: r[:]})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, header.Finalized)
|
||||
})
|
||||
t.Run("false", func(t *testing.T) {
|
||||
mockChainService := &mock.ChainService{FinalizedRoots: map[[32]byte]bool{r: false}}
|
||||
bs := &Server{
|
||||
ChainInfoFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: mockBlockFetcher,
|
||||
}
|
||||
|
||||
header, err := bs.GetBlockHeader(ctx, ðpbv1.BlockRequest{BlockId: r[:]})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, header.Finalized)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_ListBlockHeaders(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, blkContainers := fillDBTestBlocks(ctx, t, beaconDB)
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
|
||||
b1 := util.NewBeaconBlock()
|
||||
b1.Block.Slot = 30
|
||||
b1.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b1)
|
||||
b2 := util.NewBeaconBlock()
|
||||
b2.Block.Slot = 30
|
||||
b2.Block.ParentRoot = bytesutil.PadTo([]byte{4}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b2)
|
||||
b3 := util.NewBeaconBlock()
|
||||
b3.Block.Slot = 31
|
||||
b3.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b3)
|
||||
b4 := util.NewBeaconBlock()
|
||||
b4.Block.Slot = 28
|
||||
b4.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b4)
|
||||
|
||||
t.Run("list headers", func(t *testing.T) {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block)
|
||||
require.NoError(t, err)
|
||||
mockChainFetcher := &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: wsb,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
OptimisticModeFetcher: mockChainFetcher,
|
||||
FinalizationFetcher: mockChainFetcher,
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
slot primitives.Slot
|
||||
parentRoot []byte
|
||||
want []*ethpbalpha.SignedBeaconBlock
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "slot",
|
||||
slot: primitives.Slot(30),
|
||||
want: []*ethpbalpha.SignedBeaconBlock{
|
||||
blkContainers[30].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
b1,
|
||||
b2,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "parent root",
|
||||
parentRoot: b1.Block.ParentRoot,
|
||||
want: []*ethpbalpha.SignedBeaconBlock{
|
||||
blkContainers[1].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
b1,
|
||||
b3,
|
||||
b4,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
headers, err := bs.ListBlockHeaders(ctx, ðpbv1.BlockHeadersRequest{
|
||||
Slot: &tt.slot,
|
||||
ParentRoot: tt.parentRoot,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, len(tt.want), len(headers.Data))
|
||||
for i, blk := range tt.want {
|
||||
expectedBodyRoot, err := blk.Block.Body.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
expectedHeader := ðpbv1.BeaconBlockHeader{
|
||||
Slot: blk.Block.Slot,
|
||||
ProposerIndex: blk.Block.ProposerIndex,
|
||||
ParentRoot: blk.Block.ParentRoot,
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: expectedBodyRoot[:],
|
||||
}
|
||||
expectedHeaderRoot, err := expectedHeader.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
headerRoot, err := headers.Data[i].Header.Message.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedHeaderRoot, headerRoot)
|
||||
|
||||
assert.Equal(t, blk.Block.Slot, headers.Data[i].Header.Message.Slot)
|
||||
assert.DeepEqual(t, blk.Block.StateRoot, headers.Data[i].Header.Message.StateRoot)
|
||||
assert.DeepEqual(t, blk.Block.ParentRoot, headers.Data[i].Header.Message.ParentRoot)
|
||||
expectedRoot, err := blk.Block.Body.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedRoot[:], headers.Data[i].Header.Message.BodyRoot)
|
||||
assert.Equal(t, blk.Block.ProposerIndex, headers.Data[i].Header.Message.ProposerIndex)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("execution optimistic", func(t *testing.T) {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block)
|
||||
require.NoError(t, err)
|
||||
mockChainFetcher := &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: wsb,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
Optimistic: true,
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
OptimisticRoots: map[[32]byte]bool{
|
||||
bytesutil.ToBytes32(blkContainers[30].BlockRoot): true,
|
||||
},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
OptimisticModeFetcher: mockChainFetcher,
|
||||
FinalizationFetcher: mockChainFetcher,
|
||||
}
|
||||
slot := primitives.Slot(30)
|
||||
headers, err := bs.ListBlockHeaders(ctx, ðpbv1.BlockHeadersRequest{
|
||||
Slot: &slot,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, headers.ExecutionOptimistic)
|
||||
})
|
||||
|
||||
t.Run("finalized", func(t *testing.T) {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block)
|
||||
require.NoError(t, err)
|
||||
child1 := util.NewBeaconBlock()
|
||||
child1.Block.ParentRoot = bytesutil.PadTo([]byte("parent"), 32)
|
||||
child1.Block.Slot = 999
|
||||
util.SaveBlock(t, ctx, beaconDB, child1)
|
||||
child2 := util.NewBeaconBlock()
|
||||
child2.Block.ParentRoot = bytesutil.PadTo([]byte("parent"), 32)
|
||||
child2.Block.Slot = 1000
|
||||
util.SaveBlock(t, ctx, beaconDB, child2)
|
||||
child1Root, err := child1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
child2Root, err := child2.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
mockChainFetcher := &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: wsb,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
FinalizedRoots: map[[32]byte]bool{child1Root: true, child2Root: false},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
OptimisticModeFetcher: mockChainFetcher,
|
||||
FinalizationFetcher: mockChainFetcher,
|
||||
}
|
||||
|
||||
t.Run("true", func(t *testing.T) {
|
||||
slot := primitives.Slot(999)
|
||||
headers, err := bs.ListBlockHeaders(ctx, ðpbv1.BlockHeadersRequest{
|
||||
Slot: &slot,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, headers.Finalized)
|
||||
})
|
||||
t.Run("false", func(t *testing.T) {
|
||||
slot := primitives.Slot(1000)
|
||||
headers, err := bs.ListBlockHeaders(ctx, ðpbv1.BlockHeadersRequest{
|
||||
Slot: &slot,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, headers.Finalized)
|
||||
})
|
||||
t.Run("false when at least one not finalized", func(t *testing.T) {
|
||||
headers, err := bs.ListBlockHeaders(ctx, ðpbv1.BlockHeadersRequest{
|
||||
ParentRoot: []byte("parent"),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, headers.Finalized)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_SubmitBlock(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
blockReq := ðpbv2.SignedBeaconBlockContentsContainer{
|
||||
Message: ðpbv2.SignedBeaconBlockContentsContainer_Phase0Block{Phase0Block: ðpbv1.SignedBeaconBlock{}},
|
||||
}
|
||||
_, err := server.SubmitBlock(context.Background(), blockReq)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
blockReq := ðpbv2.SignedBeaconBlockContentsContainer{
|
||||
Message: ðpbv2.SignedBeaconBlockContentsContainer_AltairBlock{AltairBlock: ðpbv2.SignedBeaconBlockAltair{}},
|
||||
}
|
||||
_, err := server.SubmitBlock(context.Background(), blockReq)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
blockReq := ðpbv2.SignedBeaconBlockContentsContainer{
|
||||
Message: ðpbv2.SignedBeaconBlockContentsContainer_BellatrixBlock{BellatrixBlock: ðpbv2.SignedBeaconBlockBellatrix{}},
|
||||
}
|
||||
_, err := server.SubmitBlock(context.Background(), blockReq)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
blockReq := ðpbv2.SignedBeaconBlockContentsContainer{
|
||||
Message: ðpbv2.SignedBeaconBlockContentsContainer_CapellaBlock{CapellaBlock: ðpbv2.SignedBeaconBlockCapella{}},
|
||||
}
|
||||
_, err := server.SubmitBlock(context.Background(), blockReq)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
blockReq := ðpbv2.SignedBeaconBlockContentsContainer{
|
||||
Message: ðpbv2.SignedBeaconBlockContentsContainer_DenebContents{
|
||||
DenebContents: ðpbv2.SignedBeaconBlockContentsDeneb{
|
||||
SignedBlock: ðpbv2.SignedBeaconBlockDeneb{},
|
||||
SignedBlobSidecars: []*ethpbv2.SignedBlobSidecar{},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := server.SubmitBlock(context.Background(), blockReq)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("sync not ready", func(t *testing.T) {
|
||||
chainService := &mock.ChainService{}
|
||||
v1Server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
_, err := v1Server.SubmitBlock(context.Background(), nil)
|
||||
require.ErrorContains(t, "Syncing to latest head", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_SubmitBlockSSZ(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
ssz, err := b.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(api.VersionHeader, "phase0")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
b := util.NewBeaconBlockAltair()
|
||||
b.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().AltairForkEpoch))
|
||||
ssz, err := b.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(api.VersionHeader, "altair")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
b := util.NewBeaconBlockBellatrix()
|
||||
b.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().BellatrixForkEpoch))
|
||||
ssz, err := b.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(api.VersionHeader, "bellatrix")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("Bellatrix blinded", func(t *testing.T) {
|
||||
server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
b := util.NewBlindedBeaconBlockBellatrix()
|
||||
b.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().BellatrixForkEpoch))
|
||||
ssz, err := b.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(api.VersionHeader, "bellatrix")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
b := util.NewBeaconBlockCapella()
|
||||
b.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().CapellaForkEpoch))
|
||||
ssz, err := b.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(api.VersionHeader, "capella")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("Capella blinded", func(t *testing.T) {
|
||||
server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
b := util.NewBlindedBeaconBlockCapella()
|
||||
b.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().CapellaForkEpoch))
|
||||
ssz, err := b.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(api.VersionHeader, "capella")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
b, err := util.NewBeaconBlockContentsDeneb(fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, err)
|
||||
// TODO: update to deneb fork epoch
|
||||
b.SignedBlock.Message.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().CapellaForkEpoch))
|
||||
ssz, err := b.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(api.VersionHeader, "deneb")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("Deneb blinded", func(t *testing.T) {
|
||||
server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
b, err := util.NewBlindedBeaconBlockContentsDeneb(fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, err)
|
||||
// TODO: update to deneb fork epoch
|
||||
b.SignedBlindedBlock.Message.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().CapellaForkEpoch))
|
||||
ssz, err := b.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(api.VersionHeader, "deneb")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
t.Run("sync not ready", func(t *testing.T) {
|
||||
chainService := &mock.ChainService{}
|
||||
v1Server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
_, err := v1Server.SubmitBlockSSZ(context.Background(), nil)
|
||||
require.ErrorContains(t, "Syncing to latest head", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_GetBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
b := util.NewBeaconBlock()
|
||||
|
||||
@@ -67,19 +67,6 @@ func (_ *Server) GetSpec(ctx context.Context, _ *emptypb.Empty) (*ethpb.SpecResp
|
||||
return ðpb.SpecResponse{Data: data}, nil
|
||||
}
|
||||
|
||||
// GetDepositContract retrieves deposit contract address and genesis fork version.
|
||||
func (_ *Server) GetDepositContract(ctx context.Context, _ *emptypb.Empty) (*ethpb.DepositContractResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beaconv1.GetDepositContract")
|
||||
defer span.End()
|
||||
|
||||
return ðpb.DepositContractResponse{
|
||||
Data: ðpb.DepositContract{
|
||||
ChainId: params.BeaconConfig().DepositChainID,
|
||||
Address: params.BeaconConfig().DepositContractAddress,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func prepareConfigSpec() (map[string]string, error) {
|
||||
data := make(map[string]string)
|
||||
config := *params.BeaconConfig()
|
||||
|
||||
@@ -141,7 +141,7 @@ func TestGetSpec(t *testing.T) {
|
||||
resp, err := server.GetSpec(context.Background(), &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 111, len(resp.Data))
|
||||
assert.Equal(t, 112, len(resp.Data))
|
||||
for k, v := range resp.Data {
|
||||
switch k {
|
||||
case "CONFIG_NAME":
|
||||
@@ -380,6 +380,8 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "20", v)
|
||||
case "REORG_PARENT_WEIGHT_THRESHOLD":
|
||||
assert.Equal(t, "160", v)
|
||||
case "MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT":
|
||||
assert.Equal(t, "8", v)
|
||||
case "SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY":
|
||||
default:
|
||||
t.Errorf("Incorrect key: %s", k)
|
||||
@@ -387,22 +389,6 @@ func TestGetSpec(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetDepositContract(t *testing.T) {
|
||||
const chainId = 99
|
||||
const address = "0x0000000000000000000000000000000000000009"
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.DepositChainID = chainId
|
||||
config.DepositContractAddress = address
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
s := Server{}
|
||||
resp, err := s.GetDepositContract(context.Background(), &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, uint64(chainId), resp.Data.ChainId)
|
||||
assert.Equal(t, address, resp.Data.Address)
|
||||
}
|
||||
|
||||
func TestForkSchedule_Ok(t *testing.T) {
|
||||
genesisForkVersion := []byte("Genesis")
|
||||
firstForkVersion, firstForkEpoch := []byte("Firs"), primitives.Epoch(100)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,25 +1,28 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-playground/validator/v10"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
|
||||
corehelpers "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
|
||||
ethpbalpha "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -89,13 +92,8 @@ func (s *Server) SubmitAttestations(w http.ResponseWriter, r *http.Request) {
|
||||
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
validate := validator.New()
|
||||
if err := validate.Struct(req); err != nil {
|
||||
http2.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var validAttestations []*ethpbalpha.Attestation
|
||||
var validAttestations []*eth.Attestation
|
||||
var attFailures []*shared.IndexedVerificationFailure
|
||||
for i, sourceAtt := range req.Data {
|
||||
att, err := sourceAtt.ToConsensus()
|
||||
@@ -141,7 +139,6 @@ func (s *Server) SubmitAttestations(w http.ResponseWriter, r *http.Request) {
|
||||
subnet := corehelpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), att.Data.CommitteeIndex, att.Data.Slot)
|
||||
|
||||
if err = s.Broadcaster.BroadcastAttestation(ctx, subnet, att); err != nil {
|
||||
failedBroadcasts = append(failedBroadcasts, strconv.Itoa(i))
|
||||
log.WithError(err).Errorf("could not broadcast attestation at index %d", i)
|
||||
}
|
||||
|
||||
@@ -209,11 +206,6 @@ func (s *Server) SubmitVoluntaryExit(w http.ResponseWriter, r *http.Request) {
|
||||
http2.HandleError(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
validate := validator.New()
|
||||
if err := validate.Struct(req); err != nil {
|
||||
http2.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
exit, err := req.ToConsensus()
|
||||
if err != nil {
|
||||
@@ -238,8 +230,8 @@ func (s *Server) SubmitVoluntaryExit(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
val, err := headState.ValidatorAtIndexReadOnly(exit.Exit.ValidatorIndex)
|
||||
if err != nil {
|
||||
if outOfRangeErr, ok := err.(*state_native.ValidatorIndexOutOfRangeError); ok {
|
||||
http2.HandleError(w, "Could not get exiting validator: "+outOfRangeErr.Error(), http.StatusBadRequest)
|
||||
if errors.Is(err, consensus_types.ErrOutOfBounds) {
|
||||
http2.HandleError(w, "Could not get validator: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
http2.HandleError(w, "Could not get validator: "+err.Error(), http.StatusInternalServerError)
|
||||
@@ -277,7 +269,7 @@ func (s *Server) SubmitSyncCommitteeSignatures(w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
var validMessages []*ethpbalpha.SyncCommitteeMessage
|
||||
var validMessages []*eth.SyncCommitteeMessage
|
||||
var msgFailures []*shared.IndexedVerificationFailure
|
||||
for i, sourceMsg := range req.Data {
|
||||
msg, err := sourceMsg.ToConsensus()
|
||||
@@ -292,8 +284,8 @@ func (s *Server) SubmitSyncCommitteeSignatures(w http.ResponseWriter, r *http.Re
|
||||
}
|
||||
|
||||
for _, msg := range validMessages {
|
||||
if err = s.CoreService.SubmitSyncMessage(ctx, msg); err != nil {
|
||||
http2.HandleError(w, "Could not submit message: "+err.Error(), http.StatusInternalServerError)
|
||||
if rpcerr := s.CoreService.SubmitSyncMessage(ctx, msg); rpcerr != nil {
|
||||
http2.HandleError(w, "Could not submit message: "+rpcerr.Err.Error(), core.ErrorReasonToHTTP(rpcerr.Reason))
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -307,3 +299,147 @@ func (s *Server) SubmitSyncCommitteeSignatures(w http.ResponseWriter, r *http.Re
|
||||
http2.WriteError(w, failuresErr)
|
||||
}
|
||||
}
|
||||
|
||||
// SubmitBLSToExecutionChanges submits said object to the node's pool
|
||||
// if it passes validation the node must broadcast it to the network.
|
||||
func (s *Server) SubmitBLSToExecutionChanges(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitBLSToExecutionChanges")
|
||||
defer span.End()
|
||||
st, err := s.ChainInfoFetcher.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
http2.HandleError(w, fmt.Sprintf("Could not get head state: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
var failures []*shared.IndexedVerificationFailure
|
||||
var toBroadcast []*eth.SignedBLSToExecutionChange
|
||||
|
||||
var req []*shared.SignedBLSToExecutionChange
|
||||
err = json.NewDecoder(r.Body).Decode(&req)
|
||||
switch {
|
||||
case err == io.EOF:
|
||||
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
|
||||
return
|
||||
case err != nil:
|
||||
http2.HandleError(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if len(req) == 0 {
|
||||
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
for i, change := range req {
|
||||
sbls, err := change.ToConsensus()
|
||||
if err != nil {
|
||||
failures = append(failures, &shared.IndexedVerificationFailure{
|
||||
Index: i,
|
||||
Message: "Unable to decode SignedBLSToExecutionChange: " + err.Error(),
|
||||
})
|
||||
continue
|
||||
}
|
||||
_, err = blocks.ValidateBLSToExecutionChange(st, sbls)
|
||||
if err != nil {
|
||||
failures = append(failures, &shared.IndexedVerificationFailure{
|
||||
Index: i,
|
||||
Message: "Could not validate SignedBLSToExecutionChange: " + err.Error(),
|
||||
})
|
||||
continue
|
||||
}
|
||||
if err := blocks.VerifyBLSChangeSignature(st, sbls); err != nil {
|
||||
failures = append(failures, &shared.IndexedVerificationFailure{
|
||||
Index: i,
|
||||
Message: "Could not validate signature: " + err.Error(),
|
||||
})
|
||||
continue
|
||||
}
|
||||
s.OperationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.BLSToExecutionChangeReceived,
|
||||
Data: &operation.BLSToExecutionChangeReceivedData{
|
||||
Change: sbls,
|
||||
},
|
||||
})
|
||||
s.BLSChangesPool.InsertBLSToExecChange(sbls)
|
||||
if st.Version() >= version.Capella {
|
||||
toBroadcast = append(toBroadcast, sbls)
|
||||
}
|
||||
}
|
||||
go s.broadcastBLSChanges(ctx, toBroadcast)
|
||||
if len(failures) > 0 {
|
||||
failuresErr := &shared.IndexedVerificationFailureError{
|
||||
Code: http.StatusBadRequest,
|
||||
Message: "One or more BLSToExecutionChange failed validation",
|
||||
Failures: failures,
|
||||
}
|
||||
http2.WriteError(w, failuresErr)
|
||||
}
|
||||
}
|
||||
|
||||
// broadcastBLSBatch broadcasts the first `broadcastBLSChangesRateLimit` messages from the slice pointed to by ptr.
|
||||
// It validates the messages again because they could have been invalidated by being included in blocks since the last validation.
|
||||
// It removes the messages from the slice and modifies it in place.
|
||||
func (s *Server) broadcastBLSBatch(ctx context.Context, ptr *[]*eth.SignedBLSToExecutionChange) {
|
||||
limit := broadcastBLSChangesRateLimit
|
||||
if len(*ptr) < broadcastBLSChangesRateLimit {
|
||||
limit = len(*ptr)
|
||||
}
|
||||
st, err := s.ChainInfoFetcher.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get head state")
|
||||
return
|
||||
}
|
||||
for _, ch := range (*ptr)[:limit] {
|
||||
if ch != nil {
|
||||
_, err := blocks.ValidateBLSToExecutionChange(st, ch)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not validate BLS to execution change")
|
||||
continue
|
||||
}
|
||||
if err := s.Broadcaster.Broadcast(ctx, ch); err != nil {
|
||||
log.WithError(err).Error("could not broadcast BLS to execution changes.")
|
||||
}
|
||||
}
|
||||
}
|
||||
*ptr = (*ptr)[limit:]
|
||||
}
|
||||
|
||||
func (s *Server) broadcastBLSChanges(ctx context.Context, changes []*eth.SignedBLSToExecutionChange) {
|
||||
s.broadcastBLSBatch(ctx, &changes)
|
||||
if len(changes) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(500 * time.Millisecond)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
s.broadcastBLSBatch(ctx, &changes)
|
||||
if len(changes) == 0 {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ListBLSToExecutionChanges retrieves BLS to execution changes known by the node but not necessarily incorporated into any block
|
||||
func (s *Server) ListBLSToExecutionChanges(w http.ResponseWriter, r *http.Request) {
|
||||
_, span := trace.StartSpan(r.Context(), "beacon.ListBLSToExecutionChanges")
|
||||
defer span.End()
|
||||
|
||||
sourceChanges, err := s.BLSChangesPool.PendingBLSToExecChanges()
|
||||
if err != nil {
|
||||
http2.HandleError(w, fmt.Sprintf("Could not get BLS to execution changes: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
changes, err := shared.SignedBlsToExecutionChangesFromConsensus(sourceChanges)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "failed to decode SignedBlsToExecutionChanges: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
http2.WriteJson(w, &BLSToExecutionChangesPoolResponse{
|
||||
Data: changes,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -8,25 +8,37 @@ import (
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
blockchainmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
|
||||
prysmtime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec"
|
||||
blstoexecmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec/mock"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits/mock"
|
||||
p2pMock "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls/common"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/ssz"
|
||||
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
|
||||
ethpbv1alpha1 "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
func TestListAttestations(t *testing.T) {
|
||||
@@ -485,7 +497,7 @@ func TestSubmitVoluntaryExit(t *testing.T) {
|
||||
e := &http2.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.Equal(t, true, strings.Contains(e.Message, "Could not get exiting validator"))
|
||||
assert.Equal(t, true, strings.Contains(e.Message, "Could not get validator"))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -618,6 +630,373 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestListBLSToExecutionChanges(t *testing.T) {
|
||||
change1 := ðpbv1alpha1.SignedBLSToExecutionChange{
|
||||
Message: ðpbv1alpha1.BLSToExecutionChange{
|
||||
ValidatorIndex: 1,
|
||||
FromBlsPubkey: bytesutil.PadTo([]byte("pubkey1"), 48),
|
||||
ToExecutionAddress: bytesutil.PadTo([]byte("address1"), 20),
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature1"), 96),
|
||||
}
|
||||
change2 := ðpbv1alpha1.SignedBLSToExecutionChange{
|
||||
Message: ðpbv1alpha1.BLSToExecutionChange{
|
||||
ValidatorIndex: 2,
|
||||
FromBlsPubkey: bytesutil.PadTo([]byte("pubkey2"), 48),
|
||||
ToExecutionAddress: bytesutil.PadTo([]byte("address2"), 20),
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature2"), 96),
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
BLSChangesPool: &blstoexecmock.PoolMock{Changes: []*ethpbv1alpha1.SignedBLSToExecutionChange{change1, change2}},
|
||||
}
|
||||
request := httptest.NewRequest(http.MethodGet, "http://foo.example/eth/v1/beacon/pool/bls_to_execution_changes", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.ListBLSToExecutionChanges(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
|
||||
json1, err := shared.SignedBlsToExecutionChangeFromConsensus(change1)
|
||||
require.NoError(t, err)
|
||||
json2, err := shared.SignedBlsToExecutionChangeFromConsensus(change2)
|
||||
require.NoError(t, err)
|
||||
resp := &BLSToExecutionChangesPoolResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 2, len(resp.Data))
|
||||
assert.DeepEqual(t, json1, resp.Data[0])
|
||||
assert.DeepEqual(t, json2, resp.Data[1])
|
||||
}
|
||||
|
||||
func TestSubmitSignedBLSToExecutionChanges_Ok(t *testing.T) {
|
||||
transition.SkipSlotCache.Disable()
|
||||
defer transition.SkipSlotCache.Enable()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
c := params.BeaconConfig().Copy()
|
||||
// Required for correct committee size calculation.
|
||||
c.CapellaForkEpoch = c.BellatrixForkEpoch.Add(2)
|
||||
params.OverrideBeaconConfig(c)
|
||||
|
||||
spb := ðpbv1alpha1.BeaconStateCapella{
|
||||
Fork: ðpbv1alpha1.Fork{
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
},
|
||||
}
|
||||
numValidators := 10
|
||||
validators := make([]*ethpbv1alpha1.Validator, numValidators)
|
||||
blsChanges := make([]*ethpbv1alpha1.BLSToExecutionChange, numValidators)
|
||||
spb.Balances = make([]uint64, numValidators)
|
||||
privKeys := make([]common.SecretKey, numValidators)
|
||||
maxEffectiveBalance := params.BeaconConfig().MaxEffectiveBalance
|
||||
executionAddress := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13}
|
||||
|
||||
for i := range validators {
|
||||
v := ðpbv1alpha1.Validator{}
|
||||
v.EffectiveBalance = maxEffectiveBalance
|
||||
v.WithdrawableEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
v.WithdrawalCredentials = make([]byte, 32)
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
privKeys[i] = priv
|
||||
pubkey := priv.PublicKey().Marshal()
|
||||
|
||||
message := ðpbv1alpha1.BLSToExecutionChange{
|
||||
ToExecutionAddress: executionAddress,
|
||||
ValidatorIndex: primitives.ValidatorIndex(i),
|
||||
FromBlsPubkey: pubkey,
|
||||
}
|
||||
|
||||
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
|
||||
digest := hashFn.Hash(pubkey)
|
||||
digest[0] = params.BeaconConfig().BLSWithdrawalPrefixByte
|
||||
copy(v.WithdrawalCredentials, digest[:])
|
||||
validators[i] = v
|
||||
blsChanges[i] = message
|
||||
}
|
||||
spb.Validators = validators
|
||||
slot, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
|
||||
require.NoError(t, err)
|
||||
spb.Slot = slot
|
||||
st, err := state_native.InitializeFromProtoCapella(spb)
|
||||
require.NoError(t, err)
|
||||
|
||||
signedChanges := make([]*shared.SignedBLSToExecutionChange, numValidators)
|
||||
for i, message := range blsChanges {
|
||||
signature, err := signing.ComputeDomainAndSign(st, prysmtime.CurrentEpoch(st), message, params.BeaconConfig().DomainBLSToExecutionChange, privKeys[i])
|
||||
require.NoError(t, err)
|
||||
m, err := shared.BlsToExecutionChangeFromConsensus(message)
|
||||
require.NoError(t, err)
|
||||
signed := &shared.SignedBLSToExecutionChange{
|
||||
Message: m,
|
||||
Signature: hexutil.Encode(signature),
|
||||
}
|
||||
signedChanges[i] = signed
|
||||
}
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{State: st}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
ChainInfoFetcher: chainService,
|
||||
AttestationsPool: attestations.NewPool(),
|
||||
Broadcaster: broadcaster,
|
||||
OperationNotifier: &blockchainmock.MockOperationNotifier{},
|
||||
BLSChangesPool: blstoexec.NewPool(),
|
||||
}
|
||||
jsonBytes, err := json.Marshal(signedChanges)
|
||||
require.NoError(t, err)
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example/eth/v1/beacon/pool/bls_to_execution_changes", bytes.NewReader(jsonBytes))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.SubmitBLSToExecutionChanges(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
time.Sleep(100 * time.Millisecond) // Delay to let the routine start
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled)
|
||||
assert.Equal(t, numValidators, len(broadcaster.BroadcastMessages))
|
||||
|
||||
poolChanges, err := s.BLSChangesPool.PendingBLSToExecChanges()
|
||||
require.Equal(t, len(poolChanges), len(signedChanges))
|
||||
require.NoError(t, err)
|
||||
for i, v1alphaChange := range poolChanges {
|
||||
sc, err := signedChanges[i].ToConsensus()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, v1alphaChange, sc)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubmitSignedBLSToExecutionChanges_Bellatrix(t *testing.T) {
|
||||
transition.SkipSlotCache.Disable()
|
||||
defer transition.SkipSlotCache.Enable()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
c := params.BeaconConfig().Copy()
|
||||
// Required for correct committee size calculation.
|
||||
c.CapellaForkEpoch = c.BellatrixForkEpoch.Add(2)
|
||||
params.OverrideBeaconConfig(c)
|
||||
|
||||
spb := ðpbv1alpha1.BeaconStateBellatrix{
|
||||
Fork: ðpbv1alpha1.Fork{
|
||||
CurrentVersion: params.BeaconConfig().BellatrixForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().AltairForkVersion,
|
||||
Epoch: params.BeaconConfig().BellatrixForkEpoch,
|
||||
},
|
||||
}
|
||||
numValidators := 10
|
||||
validators := make([]*ethpbv1alpha1.Validator, numValidators)
|
||||
blsChanges := make([]*ethpbv1alpha1.BLSToExecutionChange, numValidators)
|
||||
spb.Balances = make([]uint64, numValidators)
|
||||
privKeys := make([]common.SecretKey, numValidators)
|
||||
maxEffectiveBalance := params.BeaconConfig().MaxEffectiveBalance
|
||||
executionAddress := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13}
|
||||
|
||||
for i := range validators {
|
||||
v := ðpbv1alpha1.Validator{}
|
||||
v.EffectiveBalance = maxEffectiveBalance
|
||||
v.WithdrawableEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
v.WithdrawalCredentials = make([]byte, 32)
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
privKeys[i] = priv
|
||||
pubkey := priv.PublicKey().Marshal()
|
||||
|
||||
message := ðpbv1alpha1.BLSToExecutionChange{
|
||||
ToExecutionAddress: executionAddress,
|
||||
ValidatorIndex: primitives.ValidatorIndex(i),
|
||||
FromBlsPubkey: pubkey,
|
||||
}
|
||||
|
||||
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
|
||||
digest := hashFn.Hash(pubkey)
|
||||
digest[0] = params.BeaconConfig().BLSWithdrawalPrefixByte
|
||||
copy(v.WithdrawalCredentials, digest[:])
|
||||
validators[i] = v
|
||||
blsChanges[i] = message
|
||||
}
|
||||
spb.Validators = validators
|
||||
slot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
spb.Slot = slot
|
||||
st, err := state_native.InitializeFromProtoBellatrix(spb)
|
||||
require.NoError(t, err)
|
||||
|
||||
spc := ðpbv1alpha1.BeaconStateCapella{
|
||||
Fork: ðpbv1alpha1.Fork{
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
},
|
||||
}
|
||||
slot, err = slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
|
||||
require.NoError(t, err)
|
||||
spc.Slot = slot
|
||||
|
||||
stc, err := state_native.InitializeFromProtoCapella(spc)
|
||||
require.NoError(t, err)
|
||||
|
||||
signedChanges := make([]*shared.SignedBLSToExecutionChange, numValidators)
|
||||
for i, message := range blsChanges {
|
||||
signature, err := signing.ComputeDomainAndSign(stc, prysmtime.CurrentEpoch(stc), message, params.BeaconConfig().DomainBLSToExecutionChange, privKeys[i])
|
||||
require.NoError(t, err)
|
||||
|
||||
bl, err := shared.BlsToExecutionChangeFromConsensus(message)
|
||||
require.NoError(t, err)
|
||||
|
||||
signedChanges[i] = &shared.SignedBLSToExecutionChange{
|
||||
Message: bl,
|
||||
Signature: hexutil.Encode(signature),
|
||||
}
|
||||
}
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{State: st}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
ChainInfoFetcher: chainService,
|
||||
AttestationsPool: attestations.NewPool(),
|
||||
Broadcaster: broadcaster,
|
||||
OperationNotifier: &blockchainmock.MockOperationNotifier{},
|
||||
BLSChangesPool: blstoexec.NewPool(),
|
||||
}
|
||||
|
||||
jsonBytes, err := json.Marshal(signedChanges)
|
||||
require.NoError(t, err)
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example/eth/v1/beacon/pool/bls_to_execution_changes", bytes.NewReader(jsonBytes))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitBLSToExecutionChanges(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
// Check that we didn't broadcast the messages but did in fact fill in
|
||||
// the pool
|
||||
assert.Equal(t, false, broadcaster.BroadcastCalled)
|
||||
|
||||
poolChanges, err := s.BLSChangesPool.PendingBLSToExecChanges()
|
||||
require.Equal(t, len(poolChanges), len(signedChanges))
|
||||
require.NoError(t, err)
|
||||
for i, v1alphaChange := range poolChanges {
|
||||
sc, err := signedChanges[i].ToConsensus()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, v1alphaChange, sc)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubmitSignedBLSToExecutionChanges_Failures(t *testing.T) {
|
||||
transition.SkipSlotCache.Disable()
|
||||
defer transition.SkipSlotCache.Enable()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
c := params.BeaconConfig().Copy()
|
||||
// Required for correct committee size calculation.
|
||||
c.CapellaForkEpoch = c.BellatrixForkEpoch.Add(2)
|
||||
params.OverrideBeaconConfig(c)
|
||||
|
||||
spb := ðpbv1alpha1.BeaconStateCapella{
|
||||
Fork: ðpbv1alpha1.Fork{
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
},
|
||||
}
|
||||
numValidators := 10
|
||||
validators := make([]*ethpbv1alpha1.Validator, numValidators)
|
||||
blsChanges := make([]*ethpbv1alpha1.BLSToExecutionChange, numValidators)
|
||||
spb.Balances = make([]uint64, numValidators)
|
||||
privKeys := make([]common.SecretKey, numValidators)
|
||||
maxEffectiveBalance := params.BeaconConfig().MaxEffectiveBalance
|
||||
executionAddress := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13}
|
||||
|
||||
for i := range validators {
|
||||
v := ðpbv1alpha1.Validator{}
|
||||
v.EffectiveBalance = maxEffectiveBalance
|
||||
v.WithdrawableEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
v.WithdrawalCredentials = make([]byte, 32)
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
privKeys[i] = priv
|
||||
pubkey := priv.PublicKey().Marshal()
|
||||
|
||||
message := ðpbv1alpha1.BLSToExecutionChange{
|
||||
ToExecutionAddress: executionAddress,
|
||||
ValidatorIndex: primitives.ValidatorIndex(i),
|
||||
FromBlsPubkey: pubkey,
|
||||
}
|
||||
|
||||
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
|
||||
digest := hashFn.Hash(pubkey)
|
||||
digest[0] = params.BeaconConfig().BLSWithdrawalPrefixByte
|
||||
copy(v.WithdrawalCredentials, digest[:])
|
||||
validators[i] = v
|
||||
blsChanges[i] = message
|
||||
}
|
||||
spb.Validators = validators
|
||||
slot, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
|
||||
require.NoError(t, err)
|
||||
spb.Slot = slot
|
||||
st, err := state_native.InitializeFromProtoCapella(spb)
|
||||
require.NoError(t, err)
|
||||
|
||||
signedChanges := make([]*shared.SignedBLSToExecutionChange, numValidators)
|
||||
for i, message := range blsChanges {
|
||||
signature, err := signing.ComputeDomainAndSign(st, prysmtime.CurrentEpoch(st), message, params.BeaconConfig().DomainBLSToExecutionChange, privKeys[i])
|
||||
require.NoError(t, err)
|
||||
|
||||
bl, err := shared.BlsToExecutionChangeFromConsensus(message)
|
||||
require.NoError(t, err)
|
||||
if i == 1 {
|
||||
signature[0] = 0x00
|
||||
}
|
||||
signedChanges[i] = &shared.SignedBLSToExecutionChange{
|
||||
Message: bl,
|
||||
Signature: hexutil.Encode(signature),
|
||||
}
|
||||
}
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{State: st}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
ChainInfoFetcher: chainService,
|
||||
AttestationsPool: attestations.NewPool(),
|
||||
Broadcaster: broadcaster,
|
||||
OperationNotifier: &blockchainmock.MockOperationNotifier{},
|
||||
BLSChangesPool: blstoexec.NewPool(),
|
||||
}
|
||||
|
||||
jsonBytes, err := json.Marshal(signedChanges)
|
||||
require.NoError(t, err)
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example/eth/v1/beacon/pool/bls_to_execution_changes", bytes.NewReader(jsonBytes))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitBLSToExecutionChanges(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
time.Sleep(10 * time.Millisecond) // Delay to allow the routine to start
|
||||
require.StringContains(t, "One or more BLSToExecutionChange failed validation", writer.Body.String())
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled)
|
||||
assert.Equal(t, numValidators, len(broadcaster.BroadcastMessages)+1)
|
||||
|
||||
poolChanges, err := s.BLSChangesPool.PendingBLSToExecChanges()
|
||||
require.Equal(t, len(poolChanges)+1, len(signedChanges))
|
||||
require.NoError(t, err)
|
||||
|
||||
v2Change, err := shared.SignedBlsToExecutionChangeFromConsensus(poolChanges[0])
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, v2Change, signedChanges[0])
|
||||
|
||||
for i := 2; i < numValidators; i++ {
|
||||
v2Change, err := shared.SignedBlsToExecutionChangeFromConsensus(poolChanges[i-1])
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, v2Change, signedChanges[i])
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
singleAtt = `[
|
||||
{
|
||||
|
||||
336
beacon-chain/rpc/eth/beacon/handlers_state.go
Normal file
336
beacon-chain/rpc/eth/beacon/handlers_state.go
Normal file
@@ -0,0 +1,336 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
|
||||
ethpbalpha "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
type syncCommitteeStateRequest struct {
|
||||
epoch *primitives.Epoch
|
||||
stateId []byte
|
||||
}
|
||||
|
||||
// GetStateRoot calculates HashTreeRoot for state with given 'stateId'. If stateId is root, same value will be returned.
|
||||
func (s *Server) GetStateRoot(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.GetStateRoot")
|
||||
defer span.End()
|
||||
|
||||
stateId := mux.Vars(r)["state_id"]
|
||||
if stateId == "" {
|
||||
http2.HandleError(w, "state_id is required in URL params", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
stateRoot, err := s.Stater.StateRoot(ctx, []byte(stateId))
|
||||
if err != nil {
|
||||
if rootNotFoundErr, ok := err.(*lookup.StateRootNotFoundError); ok {
|
||||
http2.HandleError(w, "State root not found: "+rootNotFoundErr.Error(), http.StatusNotFound)
|
||||
return
|
||||
} else if parseErr, ok := err.(*lookup.StateIdParseError); ok {
|
||||
http2.HandleError(w, "Invalid state ID: "+parseErr.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
http2.HandleError(w, "Could not get state root: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
st, err := s.Stater.State(ctx, []byte(stateId))
|
||||
if err != nil {
|
||||
shared.WriteStateFetchError(w, err)
|
||||
return
|
||||
}
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not calculate root of latest block header: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
isFinalized := s.FinalizationFetcher.IsFinalized(ctx, blockRoot)
|
||||
|
||||
resp := &GetStateRootResponse{
|
||||
Data: &StateRoot{
|
||||
Root: hexutil.Encode(stateRoot),
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Finalized: isFinalized,
|
||||
}
|
||||
http2.WriteJson(w, resp)
|
||||
}
|
||||
|
||||
// GetRandao fetches the RANDAO mix for the requested epoch from the state identified by state_id.
|
||||
// If an epoch is not specified then the RANDAO mix for the state's current epoch will be returned.
|
||||
// By adjusting the state_id parameter you can query for any historic value of the RANDAO mix.
|
||||
// Ordinarily states from the same epoch will mutate the RANDAO mix for that epoch as blocks are applied.
|
||||
func (s *Server) GetRandao(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.GetRandao")
|
||||
defer span.End()
|
||||
|
||||
stateId := mux.Vars(r)["state_id"]
|
||||
if stateId == "" {
|
||||
http2.HandleError(w, "state_id is required in URL params", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
ok, rawEpoch, e := shared.UintFromQuery(w, r, "epoch")
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
st, err := s.Stater.State(ctx, []byte(stateId))
|
||||
if err != nil {
|
||||
shared.WriteStateFetchError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
stEpoch := slots.ToEpoch(st.Slot())
|
||||
epoch := stEpoch
|
||||
if rawEpoch != "" {
|
||||
epoch = primitives.Epoch(e)
|
||||
}
|
||||
|
||||
// future epochs and epochs too far back are not supported.
|
||||
randaoEpochLowerBound := uint64(0)
|
||||
// Lower bound should not underflow.
|
||||
if uint64(stEpoch) > uint64(st.RandaoMixesLength()) {
|
||||
randaoEpochLowerBound = uint64(stEpoch) - uint64(st.RandaoMixesLength())
|
||||
}
|
||||
if epoch > stEpoch || uint64(epoch) < randaoEpochLowerBound+1 {
|
||||
http2.HandleError(w, "Epoch is out of range for the randao mixes of the state", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
idx := epoch % params.BeaconConfig().EpochsPerHistoricalVector
|
||||
randao, err := st.RandaoMixAtIndex(uint64(idx))
|
||||
if err != nil {
|
||||
http2.HandleError(w, fmt.Sprintf("Could not get randao mix at index %d: %v", idx, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not calculate root of latest block header: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
isFinalized := s.FinalizationFetcher.IsFinalized(ctx, blockRoot)
|
||||
|
||||
resp := &GetRandaoResponse{
|
||||
Data: &Randao{Randao: hexutil.Encode(randao)},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Finalized: isFinalized,
|
||||
}
|
||||
http2.WriteJson(w, resp)
|
||||
}
|
||||
|
||||
// GetSyncCommittees retrieves the sync committees for the given epoch.
|
||||
// If the epoch is not passed in, then the sync committees for the epoch of the state will be obtained.
|
||||
func (s *Server) GetSyncCommittees(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.GetSyncCommittees")
|
||||
defer span.End()
|
||||
|
||||
stateId := mux.Vars(r)["state_id"]
|
||||
if stateId == "" {
|
||||
http2.HandleError(w, "state_id is required in URL params", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
ok, rawEpoch, e := shared.UintFromQuery(w, r, "epoch")
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
epoch := primitives.Epoch(e)
|
||||
|
||||
currentSlot := s.GenesisTimeFetcher.CurrentSlot()
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
currentPeriodStartEpoch, err := slots.SyncCommitteePeriodStartEpoch(currentEpoch)
|
||||
if err != nil {
|
||||
http2.HandleError(w, fmt.Sprintf("Could not calculate start period for slot %d: %v", currentSlot, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
requestNextCommittee := false
|
||||
if rawEpoch != "" {
|
||||
reqPeriodStartEpoch, err := slots.SyncCommitteePeriodStartEpoch(epoch)
|
||||
if err != nil {
|
||||
http2.HandleError(w, fmt.Sprintf("Could not calculate start period for epoch %d: %v", e, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if reqPeriodStartEpoch > currentPeriodStartEpoch+params.BeaconConfig().EpochsPerSyncCommitteePeriod {
|
||||
http2.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not fetch sync committee too far in the future (requested epoch %d, current epoch %d)", e, currentEpoch),
|
||||
http.StatusBadRequest,
|
||||
)
|
||||
return
|
||||
}
|
||||
if reqPeriodStartEpoch > currentPeriodStartEpoch {
|
||||
requestNextCommittee = true
|
||||
epoch = currentPeriodStartEpoch
|
||||
}
|
||||
}
|
||||
|
||||
syncCommitteeReq := &syncCommitteeStateRequest{
|
||||
epoch: nil,
|
||||
stateId: []byte(stateId),
|
||||
}
|
||||
if rawEpoch != "" {
|
||||
syncCommitteeReq.epoch = &epoch
|
||||
}
|
||||
st, ok := s.stateForSyncCommittee(ctx, w, syncCommitteeReq)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
var committeeIndices []string
|
||||
var committee *ethpbalpha.SyncCommittee
|
||||
if requestNextCommittee {
|
||||
// Get the next sync committee and sync committee indices from the state.
|
||||
committeeIndices, committee, err = nextCommitteeIndicesFromState(st)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not get next sync committee indices: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// Get the current sync committee and sync committee indices from the state.
|
||||
committeeIndices, committee, err = currentCommitteeIndicesFromState(st)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not get current sync committee indices: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
subcommittees, err := extractSyncSubcommittees(st, committee)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not extract sync subcommittees: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not calculate root of latest block header: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
isFinalized := s.FinalizationFetcher.IsFinalized(ctx, blockRoot)
|
||||
|
||||
resp := GetSyncCommitteeResponse{
|
||||
Data: &SyncCommitteeValidators{
|
||||
Validators: committeeIndices,
|
||||
ValidatorAggregates: subcommittees,
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Finalized: isFinalized,
|
||||
}
|
||||
http2.WriteJson(w, resp)
|
||||
}
|
||||
|
||||
func committeeIndicesFromState(st state.BeaconState, committee *ethpbalpha.SyncCommittee) ([]string, *ethpbalpha.SyncCommittee, error) {
|
||||
committeeIndices := make([]string, len(committee.Pubkeys))
|
||||
for i, key := range committee.Pubkeys {
|
||||
index, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(key))
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf(
|
||||
"validator index not found for pubkey %#x",
|
||||
bytesutil.Trunc(key),
|
||||
)
|
||||
}
|
||||
committeeIndices[i] = strconv.FormatUint(uint64(index), 10)
|
||||
}
|
||||
return committeeIndices, committee, nil
|
||||
}
|
||||
|
||||
func currentCommitteeIndicesFromState(st state.BeaconState) ([]string, *ethpbalpha.SyncCommittee, error) {
|
||||
committee, err := st.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf(
|
||||
"could not get sync committee: %v", err,
|
||||
)
|
||||
}
|
||||
|
||||
return committeeIndicesFromState(st, committee)
|
||||
}
|
||||
|
||||
func nextCommitteeIndicesFromState(st state.BeaconState) ([]string, *ethpbalpha.SyncCommittee, error) {
|
||||
committee, err := st.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf(
|
||||
"could not get sync committee: %v", err,
|
||||
)
|
||||
}
|
||||
|
||||
return committeeIndicesFromState(st, committee)
|
||||
}
|
||||
|
||||
func extractSyncSubcommittees(st state.BeaconState, committee *ethpbalpha.SyncCommittee) ([][]string, error) {
|
||||
subcommitteeCount := params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
subcommittees := make([][]string, subcommitteeCount)
|
||||
for i := uint64(0); i < subcommitteeCount; i++ {
|
||||
pubkeys, err := altair.SyncSubCommitteePubkeys(committee, primitives.CommitteeIndex(i))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to get subcommittee pubkeys: %v", err,
|
||||
)
|
||||
}
|
||||
subcommittee := make([]string, len(pubkeys))
|
||||
for j, key := range pubkeys {
|
||||
index, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(key))
|
||||
if !ok {
|
||||
return nil, fmt.Errorf(
|
||||
"validator index not found for pubkey %#x",
|
||||
bytesutil.Trunc(key),
|
||||
)
|
||||
}
|
||||
subcommittee[j] = strconv.FormatUint(uint64(index), 10)
|
||||
}
|
||||
subcommittees[i] = subcommittee
|
||||
}
|
||||
return subcommittees, nil
|
||||
}
|
||||
|
||||
func (s *Server) stateForSyncCommittee(ctx context.Context, w http.ResponseWriter, req *syncCommitteeStateRequest) (state.BeaconState, bool) {
|
||||
if req.epoch != nil {
|
||||
slot, err := slots.EpochStart(*req.epoch)
|
||||
if err != nil {
|
||||
http2.HandleError(w, fmt.Sprintf("Could not calculate start slot for epoch %d: %v", *req.epoch, err), http.StatusInternalServerError)
|
||||
return nil, false
|
||||
}
|
||||
st, err := s.Stater.State(ctx, []byte(strconv.FormatUint(uint64(slot), 10)))
|
||||
if err != nil {
|
||||
shared.WriteStateFetchError(w, err)
|
||||
return nil, false
|
||||
}
|
||||
return st, true
|
||||
}
|
||||
st, err := s.Stater.State(ctx, req.stateId)
|
||||
if err != nil {
|
||||
shared.WriteStateFetchError(w, err)
|
||||
return nil, false
|
||||
}
|
||||
return st, true
|
||||
}
|
||||
649
beacon-chain/rpc/eth/beacon/handlers_state_test.go
Normal file
649
beacon-chain/rpc/eth/beacon/handlers_state_test.go
Normal file
@@ -0,0 +1,649 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/gorilla/mux"
|
||||
chainMock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
dbTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
|
||||
ethpbalpha "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
)
|
||||
|
||||
func TestGetStateRoot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
stateRoot, err := fakeState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
db := dbTest.SetupDB(t)
|
||||
parentRoot := [32]byte{'a'}
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
root, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, db, blk)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconStateRoot: stateRoot[:],
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com//eth/v1/beacon/states/{state_id}/root", nil)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetStateRoot(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetStateRootResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, hexutil.Encode(stateRoot[:]), resp.Data.Root)
|
||||
|
||||
t.Run("execution optimistic", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{Optimistic: true}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconStateRoot: stateRoot[:],
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com//eth/v1/beacon/states/{state_id}/root", nil)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetStateRoot(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetStateRootResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.DeepEqual(t, true, resp.ExecutionOptimistic)
|
||||
})
|
||||
|
||||
t.Run("finalized", func(t *testing.T) {
|
||||
headerRoot, err := fakeState.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
chainService := &chainMock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{
|
||||
headerRoot: true,
|
||||
},
|
||||
}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconStateRoot: stateRoot[:],
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com//eth/v1/beacon/states/{state_id}/root", nil)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetStateRoot(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetStateRootResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.DeepEqual(t, true, resp.Finalized)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetRandao(t *testing.T) {
|
||||
mixCurrent := bytesutil.ToBytes32([]byte("current"))
|
||||
mixOld := bytesutil.ToBytes32([]byte("old"))
|
||||
epochCurrent := primitives.Epoch(100000)
|
||||
epochOld := 100000 - params.BeaconConfig().EpochsPerHistoricalVector + 1
|
||||
|
||||
ctx := context.Background()
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
// Set slot to epoch 100000
|
||||
require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch*100000))
|
||||
require.NoError(t, st.UpdateRandaoMixesAtIndex(uint64(epochCurrent%params.BeaconConfig().EpochsPerHistoricalVector), mixCurrent))
|
||||
require.NoError(t, st.UpdateRandaoMixesAtIndex(uint64(epochOld%params.BeaconConfig().EpochsPerHistoricalVector), mixOld))
|
||||
|
||||
headEpoch := primitives.Epoch(1)
|
||||
headSt, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headSt.SetSlot(params.BeaconConfig().SlotsPerEpoch))
|
||||
headRandao := bytesutil.ToBytes32([]byte("head"))
|
||||
require.NoError(t, headSt.UpdateRandaoMixesAtIndex(uint64(headEpoch), headRandao))
|
||||
|
||||
db := dbTest.SetupDB(t)
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
t.Run("no epoch requested", func(t *testing.T) {
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com//eth/v1/beacon/states/{state_id}/randao", nil)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetRandao(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetRandaoResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, hexutil.Encode(mixCurrent[:]), resp.Data.Randao)
|
||||
})
|
||||
t.Run("current epoch requested", func(t *testing.T) {
|
||||
request := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://example.com//eth/v1/beacon/states/{state_id}/randao?epoch=%d", epochCurrent), nil)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetRandao(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetRandaoResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, hexutil.Encode(mixCurrent[:]), resp.Data.Randao)
|
||||
})
|
||||
t.Run("old epoch requested", func(t *testing.T) {
|
||||
request := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://example.com//eth/v1/beacon/states/{state_id}/randao?epoch=%d", epochOld), nil)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetRandao(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetRandaoResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, hexutil.Encode(mixOld[:]), resp.Data.Randao)
|
||||
})
|
||||
t.Run("head state below `EpochsPerHistoricalVector`", func(t *testing.T) {
|
||||
s.Stater = &testutil.MockStater{
|
||||
BeaconState: headSt,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com//eth/v1/beacon/states/{state_id}/randao", nil)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetRandao(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetRandaoResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, hexutil.Encode(headRandao[:]), resp.Data.Randao)
|
||||
})
|
||||
t.Run("epoch too old", func(t *testing.T) {
|
||||
epochTooOld := primitives.Epoch(100000 - st.RandaoMixesLength())
|
||||
request := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://example.com//eth/v1/beacon/states/{state_id}/randao?epoch=%d", epochTooOld), nil)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetRandao(writer, request)
|
||||
require.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &http2.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
require.StringContains(t, "Epoch is out of range for the randao mixes of the state", e.Message)
|
||||
})
|
||||
t.Run("epoch in the future", func(t *testing.T) {
|
||||
futureEpoch := primitives.Epoch(100000 + 1)
|
||||
request := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://example.com//eth/v1/beacon/states/{state_id}/randao?epoch=%d", futureEpoch), nil)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetRandao(writer, request)
|
||||
require.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &http2.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
require.StringContains(t, "Epoch is out of range for the randao mixes of the state", e.Message)
|
||||
})
|
||||
t.Run("execution optimistic", func(t *testing.T) {
|
||||
parentRoot := [32]byte{'a'}
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
root, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, db, blk)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
chainService := &chainMock.ChainService{Optimistic: true}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com//eth/v1/beacon/states/{state_id}/randao", nil)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetRandao(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetRandaoResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.DeepEqual(t, true, resp.ExecutionOptimistic)
|
||||
})
|
||||
t.Run("finalized", func(t *testing.T) {
|
||||
parentRoot := [32]byte{'a'}
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
root, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, db, blk)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
headerRoot, err := headSt.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
chainService := &chainMock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{
|
||||
headerRoot: true,
|
||||
},
|
||||
}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com//eth/v1/beacon/states/{state_id}/randao", nil)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetRandao(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetRandaoResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.DeepEqual(t, true, resp.Finalized)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_currentCommitteeIndicesFromState(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().SyncCommitteeSize)
|
||||
vals := st.Validators()
|
||||
wantedCommittee := make([][]byte, params.BeaconConfig().SyncCommitteeSize)
|
||||
wantedIndices := make([]string, len(wantedCommittee))
|
||||
for i := 0; i < len(wantedCommittee); i++ {
|
||||
wantedIndices[i] = strconv.FormatUint(uint64(i), 10)
|
||||
wantedCommittee[i] = vals[i].PublicKey
|
||||
}
|
||||
require.NoError(t, st.SetCurrentSyncCommittee(ðpbalpha.SyncCommittee{
|
||||
Pubkeys: wantedCommittee,
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}))
|
||||
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
indices, committee, err := currentCommitteeIndicesFromState(st)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, wantedIndices, indices)
|
||||
require.DeepEqual(t, wantedCommittee, committee.Pubkeys)
|
||||
})
|
||||
t.Run("validator in committee not found in state", func(t *testing.T) {
|
||||
wantedCommittee[0] = bytesutil.PadTo([]byte("fakepubkey"), 48)
|
||||
require.NoError(t, st.SetCurrentSyncCommittee(ðpbalpha.SyncCommittee{
|
||||
Pubkeys: wantedCommittee,
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}))
|
||||
_, _, err := currentCommitteeIndicesFromState(st)
|
||||
require.ErrorContains(t, "index not found for pubkey", err)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_nextCommitteeIndicesFromState(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().SyncCommitteeSize)
|
||||
vals := st.Validators()
|
||||
wantedCommittee := make([][]byte, params.BeaconConfig().SyncCommitteeSize)
|
||||
wantedIndices := make([]string, len(wantedCommittee))
|
||||
for i := 0; i < len(wantedCommittee); i++ {
|
||||
wantedIndices[i] = strconv.FormatUint(uint64(i), 10)
|
||||
wantedCommittee[i] = vals[i].PublicKey
|
||||
}
|
||||
require.NoError(t, st.SetNextSyncCommittee(ðpbalpha.SyncCommittee{
|
||||
Pubkeys: wantedCommittee,
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}))
|
||||
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
indices, committee, err := nextCommitteeIndicesFromState(st)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, wantedIndices, indices)
|
||||
require.DeepEqual(t, wantedCommittee, committee.Pubkeys)
|
||||
})
|
||||
t.Run("validator in committee not found in state", func(t *testing.T) {
|
||||
wantedCommittee[0] = bytesutil.PadTo([]byte("fakepubkey"), 48)
|
||||
require.NoError(t, st.SetNextSyncCommittee(ðpbalpha.SyncCommittee{
|
||||
Pubkeys: wantedCommittee,
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}))
|
||||
_, _, err := nextCommitteeIndicesFromState(st)
|
||||
require.ErrorContains(t, "index not found for pubkey", err)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_extractSyncSubcommittees(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().SyncCommitteeSize)
|
||||
vals := st.Validators()
|
||||
syncCommittee := make([][]byte, params.BeaconConfig().SyncCommitteeSize)
|
||||
for i := 0; i < len(syncCommittee); i++ {
|
||||
syncCommittee[i] = vals[i].PublicKey
|
||||
}
|
||||
require.NoError(t, st.SetCurrentSyncCommittee(ðpbalpha.SyncCommittee{
|
||||
Pubkeys: syncCommittee,
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}))
|
||||
|
||||
commSize := params.BeaconConfig().SyncCommitteeSize
|
||||
subCommSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
wantedSubcommitteeValidators := make([][]string, 0)
|
||||
|
||||
for i := uint64(0); i < commSize; i += subCommSize {
|
||||
sub := make([]string, 0)
|
||||
start := i
|
||||
end := i + subCommSize
|
||||
if end > commSize {
|
||||
end = commSize
|
||||
}
|
||||
for j := start; j < end; j++ {
|
||||
sub = append(sub, strconv.FormatUint(j, 10))
|
||||
}
|
||||
wantedSubcommitteeValidators = append(wantedSubcommitteeValidators, sub)
|
||||
}
|
||||
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
committee, err := st.CurrentSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
subcommittee, err := extractSyncSubcommittees(st, committee)
|
||||
require.NoError(t, err)
|
||||
for i, got := range subcommittee {
|
||||
want := wantedSubcommitteeValidators[i]
|
||||
require.DeepEqual(t, want, got)
|
||||
}
|
||||
})
|
||||
t.Run("validator in subcommittee not found in state", func(t *testing.T) {
|
||||
syncCommittee[0] = bytesutil.PadTo([]byte("fakepubkey"), 48)
|
||||
require.NoError(t, st.SetCurrentSyncCommittee(ðpbalpha.SyncCommittee{
|
||||
Pubkeys: syncCommittee,
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}))
|
||||
committee, err := st.CurrentSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
_, err = extractSyncSubcommittees(st, committee)
|
||||
require.ErrorContains(t, "index not found for pubkey", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetSyncCommittees(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := make([][]byte, params.BeaconConfig().SyncCommitteeSize)
|
||||
vals := st.Validators()
|
||||
for i := 0; i < len(syncCommittee); i++ {
|
||||
syncCommittee[i] = vals[i].PublicKey
|
||||
}
|
||||
require.NoError(t, st.SetCurrentSyncCommittee(ðpbalpha.SyncCommittee{
|
||||
Pubkeys: syncCommittee,
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}))
|
||||
stRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
db := dbTest.SetupDB(t)
|
||||
|
||||
stSlot := st.Slot()
|
||||
chainService := &chainMock.ChainService{Slot: &stSlot}
|
||||
s := &Server{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
ChainInfoFetcher: chainService,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com//eth/v1/beacon/states/{state_id}/sync_committees", nil)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": hexutil.Encode(stRoot[:])})
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetSyncCommittees(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetSyncCommitteeResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
committeeVals := resp.Data.Validators
|
||||
require.Equal(t, params.BeaconConfig().SyncCommitteeSize, uint64(len(committeeVals)))
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSize; i++ {
|
||||
assert.Equal(t, strconv.FormatUint(i, 10), committeeVals[i])
|
||||
}
|
||||
require.Equal(t, params.BeaconConfig().SyncCommitteeSubnetCount, uint64(len(resp.Data.ValidatorAggregates)))
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
|
||||
vStartIndex := primitives.ValidatorIndex(params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount * i)
|
||||
vEndIndex := primitives.ValidatorIndex(params.BeaconConfig().SyncCommitteeSize/params.BeaconConfig().SyncCommitteeSubnetCount*(i+1) - 1)
|
||||
j := 0
|
||||
for vIndex := vStartIndex; vIndex <= vEndIndex; vIndex++ {
|
||||
assert.Equal(t, strconv.FormatUint(uint64(vIndex), 10), resp.Data.ValidatorAggregates[i][j])
|
||||
j++
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("execution optimistic", func(t *testing.T) {
|
||||
parentRoot := [32]byte{'a'}
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
root, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, db, blk)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
stSlot := st.Slot()
|
||||
chainService := &chainMock.ChainService{Optimistic: true, Slot: &stSlot}
|
||||
s := &Server{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
ChainInfoFetcher: chainService,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com//eth/v1/beacon/states/{state_id}/sync_committees", nil)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": hexutil.Encode(stRoot[:])})
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetSyncCommittees(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetSyncCommitteeResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, true, resp.ExecutionOptimistic)
|
||||
})
|
||||
|
||||
t.Run("finalized", func(t *testing.T) {
|
||||
parentRoot := [32]byte{'a'}
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
root, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, db, blk)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
headerRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
stSlot := st.Slot()
|
||||
chainService := &chainMock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{
|
||||
headerRoot: true,
|
||||
},
|
||||
Slot: &stSlot,
|
||||
}
|
||||
s := &Server{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
ChainInfoFetcher: chainService,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com//eth/v1/beacon/states/{state_id}/sync_committees", nil)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": hexutil.Encode(stRoot[:])})
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetSyncCommittees(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetSyncCommitteeResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, true, resp.Finalized)
|
||||
})
|
||||
}
|
||||
|
||||
type futureSyncMockFetcher struct {
|
||||
BeaconState state.BeaconState
|
||||
BeaconStateRoot []byte
|
||||
}
|
||||
|
||||
func (m *futureSyncMockFetcher) State(_ context.Context, stateId []byte) (state.BeaconState, error) {
|
||||
expectedRequest := []byte(strconv.FormatUint(uint64(0), 10))
|
||||
res := bytes.Compare(stateId, expectedRequest)
|
||||
if res != 0 {
|
||||
return nil, fmt.Errorf(
|
||||
"requested wrong epoch for next sync committee (expected %#x, received %#x)",
|
||||
expectedRequest,
|
||||
stateId,
|
||||
)
|
||||
}
|
||||
return m.BeaconState, nil
|
||||
}
|
||||
func (m *futureSyncMockFetcher) StateRoot(context.Context, []byte) ([]byte, error) {
|
||||
return m.BeaconStateRoot, nil
|
||||
}
|
||||
|
||||
func (m *futureSyncMockFetcher) StateBySlot(context.Context, primitives.Slot) (state.BeaconState, error) {
|
||||
return m.BeaconState, nil
|
||||
}
|
||||
|
||||
func TestGetSyncCommittees_Future(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := make([][]byte, params.BeaconConfig().SyncCommitteeSize)
|
||||
vals := st.Validators()
|
||||
for i := 0; i < len(syncCommittee); i++ {
|
||||
syncCommittee[i] = vals[i].PublicKey
|
||||
}
|
||||
require.NoError(t, st.SetNextSyncCommittee(ðpbalpha.SyncCommittee{
|
||||
Pubkeys: syncCommittee,
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}))
|
||||
db := dbTest.SetupDB(t)
|
||||
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := &Server{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
Stater: &futureSyncMockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
epoch := 2 * params.BeaconConfig().EpochsPerSyncCommitteePeriod
|
||||
request := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://example.com//eth/v1/beacon/states/{state_id}/sync_committees?epoch=%d", epoch), nil)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.GetSyncCommittees(writer, request)
|
||||
require.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &http2.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, "Could not fetch sync committee too far in the future", e.Message)
|
||||
|
||||
epoch = 2*params.BeaconConfig().EpochsPerSyncCommitteePeriod - 1
|
||||
request = httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://example.com//eth/v1/beacon/states/{state_id}/sync_committees?epoch=%d", epoch), nil)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
|
||||
writer = httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.GetSyncCommittees(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetSyncCommitteeResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
committeeVals := resp.Data.Validators
|
||||
require.Equal(t, params.BeaconConfig().SyncCommitteeSize, uint64(len(committeeVals)))
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSize; i++ {
|
||||
assert.Equal(t, strconv.FormatUint(i, 10), committeeVals[i])
|
||||
}
|
||||
require.Equal(t, params.BeaconConfig().SyncCommitteeSubnetCount, uint64(len(resp.Data.ValidatorAggregates)))
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
|
||||
vStartIndex := primitives.ValidatorIndex(params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount * i)
|
||||
vEndIndex := primitives.ValidatorIndex(params.BeaconConfig().SyncCommitteeSize/params.BeaconConfig().SyncCommitteeSubnetCount*(i+1) - 1)
|
||||
j := 0
|
||||
for vIndex := vStartIndex; vIndex <= vEndIndex; vIndex++ {
|
||||
assert.Equal(t, strconv.FormatUint(uint64(vIndex), 10), resp.Data.ValidatorAggregates[i][j])
|
||||
j++
|
||||
}
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user