mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
178 Commits
backfill-r
...
blob-proce
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
edbf5d11d1 | ||
|
|
1148d96313 | ||
|
|
daf5e9b286 | ||
|
|
309a863358 | ||
|
|
15b88ed7f2 | ||
|
|
4b4855d65c | ||
|
|
03564ee93b | ||
|
|
96541c2c6c | ||
|
|
0bc55339a7 | ||
|
|
2618852090 | ||
|
|
fb3e42c1b3 | ||
|
|
0e2a9d0d82 | ||
|
|
6573cccf1d | ||
|
|
a89727f38c | ||
|
|
5e1fb15c40 | ||
|
|
5de8ec4600 | ||
|
|
7e88eefc60 | ||
|
|
cabf3476e7 | ||
|
|
5a01eecc50 | ||
|
|
b608c9f711 | ||
|
|
671bf00c98 | ||
|
|
cbf6a2752d | ||
|
|
642458f037 | ||
|
|
2a067d5d03 | ||
|
|
a2f60364ae | ||
|
|
45f68fa8d5 | ||
|
|
f55708b995 | ||
|
|
00826e8858 | ||
|
|
76fec1799e | ||
|
|
9c938d354d | ||
|
|
83932d8e05 | ||
|
|
beebb56c8e | ||
|
|
0920fb1f61 | ||
|
|
29f8880638 | ||
|
|
f91efafe24 | ||
|
|
9387a36b66 | ||
|
|
65ce27292c | ||
|
|
823f8ee3a2 | ||
|
|
88e1b9edb3 | ||
|
|
c7e28908f5 | ||
|
|
7143fe80bc | ||
|
|
e231d88ca0 | ||
|
|
0486b64dcc | ||
|
|
b4847ac9ad | ||
|
|
bc125a95ae | ||
|
|
f592bf7f07 | ||
|
|
bcc23d2ded | ||
|
|
6e0715e92a | ||
|
|
71fa70ce40 | ||
|
|
4809da62cc | ||
|
|
e10dbaa8b4 | ||
|
|
71b08a50b7 | ||
|
|
cb5ce74a23 | ||
|
|
cc81444e13 | ||
|
|
bfae7f3c9f | ||
|
|
2fc5011091 | ||
|
|
493a7179d7 | ||
|
|
b52baba2f1 | ||
|
|
2f378a045a | ||
|
|
58cdb29ef3 | ||
|
|
cc2b4db582 | ||
|
|
be9b6ea837 | ||
|
|
806a394c89 | ||
|
|
97a99874e8 | ||
|
|
945b087ca9 | ||
|
|
b57effd096 | ||
|
|
867db1aeee | ||
|
|
99843688cd | ||
|
|
a536612c39 | ||
|
|
c5501f8775 | ||
|
|
55e4c6e1db | ||
|
|
2806326155 | ||
|
|
d7318ea485 | ||
|
|
e183d1dff4 | ||
|
|
a3868e7fc6 | ||
|
|
af70677778 | ||
|
|
8eb82dd378 | ||
|
|
0bd232667b | ||
|
|
39072e1b74 | ||
|
|
66011d5d9c | ||
|
|
419dbd57f7 | ||
|
|
da6ae3c204 | ||
|
|
44973b0bb3 | ||
|
|
de0c7e6256 | ||
|
|
c1c0cd040c | ||
|
|
734eb98941 | ||
|
|
ffaef83634 | ||
|
|
f9a40ef111 | ||
|
|
7454041356 | ||
|
|
f37301c0c0 | ||
|
|
cf1bfb9d67 | ||
|
|
9d8dd5c9ad | ||
|
|
f812bdcf60 | ||
|
|
58c0899676 | ||
|
|
4628c19f51 | ||
|
|
58f23d2302 | ||
|
|
9e33723b26 | ||
|
|
2c32a87d17 | ||
|
|
56f3dafb54 | ||
|
|
70380660b3 | ||
|
|
ecd55e5462 | ||
|
|
46b2442127 | ||
|
|
d8b2d9060f | ||
|
|
b667c68c87 | ||
|
|
fb19ee8895 | ||
|
|
51b8075474 | ||
|
|
367504f403 | ||
|
|
b4e72f1e19 | ||
|
|
a7361cd5ab | ||
|
|
02ee6897bb | ||
|
|
c20f188966 | ||
|
|
5870536dca | ||
|
|
c8b39e08ef | ||
|
|
b0b4e42436 | ||
|
|
b0caea3fae | ||
|
|
a46370f5bf | ||
|
|
0919b2245f | ||
|
|
42c192d97d | ||
|
|
3394bbe359 | ||
|
|
70225186ff | ||
|
|
942d63fcc1 | ||
|
|
90fb2325db | ||
|
|
57a63f37f7 | ||
|
|
9a2c2470c6 | ||
|
|
723f73795f | ||
|
|
6454081577 | ||
|
|
489b34a01f | ||
|
|
e22258caa9 | ||
|
|
3bc9ac37f6 | ||
|
|
7247b8bd3c | ||
|
|
e2591f7c5b | ||
|
|
b40729f6c0 | ||
|
|
b7cb5c81be | ||
|
|
e76aedf1ae | ||
|
|
0e5d299d02 | ||
|
|
14f040de48 | ||
|
|
1a1a30591e | ||
|
|
3070878d59 | ||
|
|
f59307358e | ||
|
|
ef1f5e6dbe | ||
|
|
a22ca3fecb | ||
|
|
14ce051668 | ||
|
|
998a493ee2 | ||
|
|
398f44bb53 | ||
|
|
4098b3a1d2 | ||
|
|
d8e6d2cb2e | ||
|
|
6b915bab26 | ||
|
|
dd73f762ec | ||
|
|
4d120b53ae | ||
|
|
4d6b3252ae | ||
|
|
9bb81537c8 | ||
|
|
0fdf63b565 | ||
|
|
bd85b0e4e1 | ||
|
|
d1562bab53 | ||
|
|
1e29877406 | ||
|
|
8c39c55f05 | ||
|
|
0ccfc74e86 | ||
|
|
9ecf4d34f5 | ||
|
|
a8793c9f21 | ||
|
|
79445d2bf6 | ||
|
|
35fc1c976f | ||
|
|
b0423a94af | ||
|
|
6c16e90fe9 | ||
|
|
c0a01bb859 | ||
|
|
2a408a0dd8 | ||
|
|
af16c71d6e | ||
|
|
ec954ec9a6 | ||
|
|
7781a3186b | ||
|
|
440cf32966 | ||
|
|
13c69af717 | ||
|
|
ca88e59ee3 | ||
|
|
809a67ebcc | ||
|
|
f17898f658 | ||
|
|
d506f9b2da | ||
|
|
d55757500f | ||
|
|
41dff74e90 | ||
|
|
4f48c551da | ||
|
|
2f6dcb34b6 |
2
.bazelrc
2
.bazelrc
@@ -15,7 +15,7 @@ coverage --define=coverage_enabled=1
|
||||
# Stamp binaries with git information
|
||||
build --workspace_status_command=./hack/workspace_status.sh
|
||||
|
||||
build --define blst_disabled=false --define blst_modern=true
|
||||
build --define blst_disabled=false
|
||||
run --define blst_disabled=false
|
||||
|
||||
build:blst_disabled --define blst_disabled=true
|
||||
|
||||
@@ -12,7 +12,8 @@
|
||||
#build:remote-cache --disk_cache=
|
||||
build:remote-cache --remote_download_toplevel
|
||||
build:remote-cache --remote_cache=grpc://bazel-remote-cache:9092
|
||||
build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
|
||||
# Does not work with rules_oci. See https://github.com/bazel-contrib/rules_oci/issues/292
|
||||
#build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
|
||||
build:remote-cache --remote_local_fallback
|
||||
build:remote-cache --experimental_remote_cache_async
|
||||
build:remote-cache --experimental_remote_merkle_tree_cache
|
||||
|
||||
59
.github/ISSUE_TEMPLATE/bug_report.md
vendored
59
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,59 +0,0 @@
|
||||
---
|
||||
name: "\U0001F41EBug report"
|
||||
about: Report a bug or problem with running Prysm
|
||||
---
|
||||
<!--💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎
|
||||
|
||||
Hellooo! 😄
|
||||
|
||||
To help us tend to your issue faster, please search our currently open issues before submitting a new one.
|
||||
Existing issues often contain information about workarounds, resolution, or progress updates.
|
||||
|
||||
💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎-->
|
||||
|
||||
# 🐞 Bug Report
|
||||
|
||||
### Description
|
||||
|
||||
<!-- ✍️--> A clear and concise description of the problem...
|
||||
|
||||
### Has this worked before in a previous version?
|
||||
|
||||
<!-- Did this behavior use to work in the previous version? -->
|
||||
<!-- ✍️--> Yes, the previous version in which this bug was not present was: ....
|
||||
|
||||
## 🔬 Minimal Reproduction
|
||||
|
||||
<!--
|
||||
Please let us know how we can reproduce this issue. Include the exact method you used to run Prysm along with any flags used in your beacon chain and/or validator. Make sure you don't upload any confidential files or private keys.
|
||||
-->
|
||||
|
||||
## 🔥 Error
|
||||
|
||||
<pre><code>
|
||||
<!-- If the issue is accompanied by an error, please share the error logs with us below. If you have a lot of logs, place make a paste bin with your logs and share the link with us here: -->
|
||||
<!-- ✍️-->
|
||||
|
||||
</code></pre>
|
||||
|
||||
|
||||
## 🌍 Your Environment
|
||||
|
||||
**Operating System:**
|
||||
|
||||
<pre>
|
||||
<code>
|
||||
|
||||
</code>
|
||||
</pre>
|
||||
|
||||
**What version of Prysm are you running? (Which release)**
|
||||
|
||||
<pre>
|
||||
<code>
|
||||
|
||||
</code>
|
||||
</pre>
|
||||
|
||||
**Anything else relevant (validator index / public key)?**
|
||||
|
||||
66
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
66
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
name: 🐞 Bug report
|
||||
description: Report a bug or problem with running Prysm
|
||||
labels: ["Bug"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
To help us tend to your issue faster, please search our currently open issues before submitting a new one.
|
||||
Existing issues often contain information about workarounds, resolution, or progress updates.
|
||||
- type: textarea
|
||||
id: what-happened
|
||||
attributes:
|
||||
label: Describe the bug
|
||||
description: |
|
||||
A clear and concise description of the problem...
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: previous-version
|
||||
attributes:
|
||||
label: Has this worked before in a previous version?
|
||||
description: Did this behavior use to work in the previous version?
|
||||
render: text
|
||||
- type: textarea
|
||||
id: reproduction-steps
|
||||
attributes:
|
||||
label: 🔬 Minimal Reproduction
|
||||
description: |
|
||||
Please let us know how we can reproduce this issue.
|
||||
Include the exact method you used to run Prysm along with any flags used in your beacon chain and/or validator.
|
||||
Make sure you don't upload any confidential files or private keys.
|
||||
placeholder: |
|
||||
Steps to reproduce:
|
||||
|
||||
1. Start '...'
|
||||
2. Then '...'
|
||||
3. Check '...'
|
||||
4. See error
|
||||
- type: textarea
|
||||
id: errors
|
||||
attributes:
|
||||
label: Error
|
||||
description: |
|
||||
If the issue is accompanied by an error, please share the error logs with us below.
|
||||
If you have a lot of logs, place make a paste bin with your logs and share the link with us here:
|
||||
render: text
|
||||
- type: dropdown
|
||||
id: platform
|
||||
attributes:
|
||||
label: Platform(s)
|
||||
description: What platform(s) did this occur on?
|
||||
multiple: true
|
||||
options:
|
||||
- Linux (x86)
|
||||
- Linux (ARM)
|
||||
- Mac (Intel)
|
||||
- Mac (Apple Silicon)
|
||||
- Windows (x86)
|
||||
- Windows (ARM)
|
||||
- type: input
|
||||
attributes:
|
||||
label: What version of Prysm are you running? (Which release)
|
||||
description: You can check your Prysm version by running your beacon node or validator with the `--version` flag.
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Anything else relevant (validator index / public key)?
|
||||
8
.github/workflows/go.yml
vendored
8
.github/workflows/go.yml
vendored
@@ -75,14 +75,14 @@ jobs:
|
||||
- name: Build
|
||||
# Use blst tag to allow go and bazel builds for blst.
|
||||
run: go build -v ./...
|
||||
env:
|
||||
CGO_CFLAGS: "-O -D__BLST_PORTABLE__"
|
||||
env:
|
||||
CGO_CFLAGS: "-O2 -D__BLST_PORTABLE__"
|
||||
# fuzz leverage go tag based stubs at compile time.
|
||||
# Building and testing with these tags should be checked and enforced at pre-submit.
|
||||
- name: Test for fuzzing
|
||||
run: go test -tags=fuzz,develop ./... -test.run=^Fuzz
|
||||
env:
|
||||
CGO_CFLAGS: "-O -D__BLST_PORTABLE__"
|
||||
env:
|
||||
CGO_CFLAGS: "-O2 -D__BLST_PORTABLE__"
|
||||
|
||||
# Tests run via Bazel for now...
|
||||
# - name: Test
|
||||
|
||||
95
WORKSPACE
95
WORKSPACE
@@ -50,8 +50,6 @@ load("@prysm//tools/cross-toolchain:prysm_toolchains.bzl", "configure_prysm_tool
|
||||
|
||||
configure_prysm_toolchains()
|
||||
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "bazel_skylib",
|
||||
sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c",
|
||||
@@ -67,10 +65,10 @@ bazel_skylib_workspace()
|
||||
|
||||
http_archive(
|
||||
name = "bazel_gazelle",
|
||||
sha256 = "29d5dafc2a5582995488c6735115d1d366fcd6a0fc2e2a153f02988706349825",
|
||||
sha256 = "d3fa66a39028e97d76f9e2db8f1b0c11c099e8e01bf363a923074784e451f809",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.31.0/bazel-gazelle-v0.31.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.31.0/bazel-gazelle-v0.31.0.tar.gz",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.33.0/bazel-gazelle-v0.33.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.33.0/bazel-gazelle-v0.33.0.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -87,6 +85,24 @@ http_archive(
|
||||
urls = ["https://github.com/bazelbuild/rules_docker/releases/download/v0.25.0/rules_docker-v0.25.0.tar.gz"],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "rules_oci",
|
||||
sha256 = "c71c25ed333a4909d2dd77e0b16c39e9912525a98c7fa85144282be8d04ef54c",
|
||||
strip_prefix = "rules_oci-1.3.4",
|
||||
url = "https://github.com/bazel-contrib/rules_oci/releases/download/v1.3.4/rules_oci-v1.3.4.tar.gz",
|
||||
)
|
||||
|
||||
load("@rules_oci//oci:dependencies.bzl", "rules_oci_dependencies")
|
||||
|
||||
rules_oci_dependencies()
|
||||
|
||||
load("@rules_oci//oci:repositories.bzl", "LATEST_CRANE_VERSION", "oci_register_toolchains")
|
||||
|
||||
oci_register_toolchains(
|
||||
name = "oci",
|
||||
crane_version = LATEST_CRANE_VERSION,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_go",
|
||||
patch_args = ["-p1"],
|
||||
@@ -94,10 +110,10 @@ http_archive(
|
||||
# Expose internals of go_test for custom build transitions.
|
||||
"//third_party:io_bazel_rules_go_test.patch",
|
||||
],
|
||||
sha256 = "bfc5ce70b9d1634ae54f4e7b495657a18a04e0d596785f672d35d5f505ab491a",
|
||||
sha256 = "91585017debb61982f7054c9688857a2ad1fd823fc3f9cb05048b0025c47d023",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.40.0/rules_go-v0.40.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.40.0/rules_go-v0.40.0.zip",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.42.0/rules_go-v0.42.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.42.0/rules_go-v0.42.0.zip",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -167,12 +183,30 @@ container_pull(
|
||||
repository = "pinglamb/alpine-glibc",
|
||||
)
|
||||
|
||||
load("@rules_oci//oci:pull.bzl", "oci_pull")
|
||||
|
||||
# A multi-arch base image
|
||||
oci_pull(
|
||||
name = "linux_debian11_multiarch_base", # Debian bullseye
|
||||
digest = "sha256:9b8e0854865dcaf49470b4ec305df45957020fbcf17b71eeb50ffd3bc5bf885d", # 2023-05-17
|
||||
image = "gcr.io/distroless/cc-debian11",
|
||||
platforms = [
|
||||
"linux/amd64",
|
||||
"linux/arm64",
|
||||
],
|
||||
reproducible = True,
|
||||
)
|
||||
|
||||
load("@prysm//tools:image_deps.bzl", "prysm_image_deps")
|
||||
|
||||
prysm_image_deps()
|
||||
|
||||
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
|
||||
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.20.7",
|
||||
go_version = "1.20.10",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
@@ -213,7 +247,9 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.4.0-beta.1"
|
||||
consensus_spec_test_version = "v1.4.0-beta.2-hotfix"
|
||||
|
||||
consensus_spec_version = "v1.4.0-beta.2"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -229,8 +265,8 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "24399b60ce3fbeb2311952d213dc3731b6dcb0f8881b016c283de5b518d2bbba",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
sha256 = "99770a001189f66204a4ef79161c8002bcbbcbd8236f1c6479bd5b83a3c68d42",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_test_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -245,8 +281,8 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "8e656ee48d2e2ebc9cf9baedb81f27925bc625b3e3fbb2883444a08758a5884a",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
sha256 = "56763f6492ee137108271007d62feef60d8e3f1698e53dee4bc4b07e55f7326b",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_test_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -261,8 +297,8 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "8bd137da6cc57a25383bfac5bc37e31265098145278bd8002b88e24c8b4718b9",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
sha256 = "bc1cac1a991cdc7426efea14385dcf215df85ed3f0572b824ad6a1d7ca0c89ad",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_test_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -276,7 +312,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "2bc1edb6e4a4f86c00317c04618a90b0ca29ee1eba833d0a64dd67fdd83fdbe3",
|
||||
sha256 = "c5898001aaab2a5bb38a39ff9d17a52f1f9befcc26e63752cbf556040f0c884e",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
@@ -323,9 +359,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "4116c8acb54eb3ca28cc4dc9bc688e08e25da91d70ed1f2622f02d3c33eba922",
|
||||
strip_prefix = "holesky-76057d57ab1f585519ecb606a9e5f7780e925a37",
|
||||
url = "https://github.com/eth-clients/holesky/archive/76057d57ab1f585519ecb606a9e5f7780e925a37.tar.gz", # Aug 27, 2023
|
||||
sha256 = "9f66d8d5644982d3d0d2e3d2b9ebe77a5f96638a5d7fcd715599c32818195cb3",
|
||||
strip_prefix = "holesky-ea39b9006210848e13f28d92e12a30548cecd41d",
|
||||
url = "https://github.com/eth-clients/holesky/archive/ea39b9006210848e13f28d92e12a30548cecd41d.tar.gz", # 2023-09-21
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -337,10 +373,23 @@ http_archive(
|
||||
],
|
||||
)
|
||||
|
||||
# Group the sources of the library so that CMake rule have access to it
|
||||
all_content = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])"""
|
||||
|
||||
# External dependencies
|
||||
http_archive(
|
||||
name = "googleapis",
|
||||
sha256 = "9d1a930e767c93c825398b8f8692eca3fe353b9aaadedfbcf1fca2282c85df88",
|
||||
strip_prefix = "googleapis-64926d52febbf298cb82a8f472ade4a3969ba922",
|
||||
urls = [
|
||||
"https://github.com/googleapis/googleapis/archive/64926d52febbf298cb82a8f472ade4a3969ba922.zip",
|
||||
],
|
||||
)
|
||||
|
||||
load("@googleapis//:repository_rules.bzl", "switched_rules_by_language")
|
||||
|
||||
switched_rules_by_language(
|
||||
name = "com_google_googleapis_imports",
|
||||
go = True,
|
||||
)
|
||||
|
||||
load("//:deps.bzl", "prysm_deps")
|
||||
|
||||
# gazelle:repository_macro deps.bzl%prysm_deps
|
||||
|
||||
@@ -13,6 +13,8 @@ go_library(
|
||||
"//api/client:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/apimiddleware:go_default_library",
|
||||
"//beacon-chain/rpc/eth/beacon:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
|
||||
@@ -14,6 +14,8 @@ import (
|
||||
"text/template"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/beacon"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/forks"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
|
||||
@@ -148,14 +150,14 @@ func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fo
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting fork by state id = %s", stateId)
|
||||
}
|
||||
fr := &forkResponse{}
|
||||
dataWrapper := &struct{ Data *forkResponse }{Data: fr}
|
||||
fr := &shared.Fork{}
|
||||
dataWrapper := &struct{ Data *shared.Fork }{Data: fr}
|
||||
err = json.Unmarshal(body, dataWrapper)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error decoding json response in GetFork")
|
||||
}
|
||||
|
||||
return fr.Fork()
|
||||
return fr.ToConsensus()
|
||||
}
|
||||
|
||||
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
|
||||
@@ -283,7 +285,7 @@ func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData
|
||||
|
||||
// SubmitChangeBLStoExecution calls a beacon API endpoint to set the withdrawal addresses based on the given signed messages.
|
||||
// If the API responds with something other than OK there will be failure messages associated to the corresponding request message.
|
||||
func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apimiddleware.SignedBLSToExecutionChangeJson) error {
|
||||
func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*shared.SignedBLSToExecutionChange) error {
|
||||
u := c.BaseURL().ResolveReference(&url.URL{Path: changeBLStoExecutionPath})
|
||||
body, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
@@ -322,12 +324,12 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apim
|
||||
|
||||
// GetBLStoExecutionChanges gets all the set withdrawal messages in the node's operation pool.
|
||||
// Returns a struct representation of json response.
|
||||
func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*apimiddleware.BLSToExecutionChangesPoolResponseJson, error) {
|
||||
func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*beacon.BLSToExecutionChangesPoolResponse, error) {
|
||||
body, err := c.Get(ctx, changeBLStoExecutionPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
poolResponse := &apimiddleware.BLSToExecutionChangesPoolResponseJson{}
|
||||
poolResponse := &beacon.BLSToExecutionChangesPoolResponse{}
|
||||
err = json.Unmarshal(body, poolResponse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -335,40 +337,8 @@ func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*apimiddleware.B
|
||||
return poolResponse, nil
|
||||
}
|
||||
|
||||
type forkResponse struct {
|
||||
PreviousVersion string `json:"previous_version"`
|
||||
CurrentVersion string `json:"current_version"`
|
||||
Epoch string `json:"epoch"`
|
||||
}
|
||||
|
||||
func (f *forkResponse) Fork() (*ethpb.Fork, error) {
|
||||
epoch, err := strconv.ParseUint(f.Epoch, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cSlice, err := hexutil.Decode(f.CurrentVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(cSlice) != 4 {
|
||||
return nil, fmt.Errorf("got %d byte version for CurrentVersion, expected 4 bytes. hex=%s", len(cSlice), f.CurrentVersion)
|
||||
}
|
||||
pSlice, err := hexutil.Decode(f.PreviousVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(pSlice) != 4 {
|
||||
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(pSlice), f.PreviousVersion)
|
||||
}
|
||||
return ðpb.Fork{
|
||||
CurrentVersion: cSlice,
|
||||
PreviousVersion: pSlice,
|
||||
Epoch: primitives.Epoch(epoch),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type forkScheduleResponse struct {
|
||||
Data []forkResponse
|
||||
Data []shared.Fork
|
||||
}
|
||||
|
||||
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
|
||||
|
||||
@@ -11,6 +11,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/builder",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
@@ -41,6 +42,7 @@ go_test(
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
@@ -52,7 +54,7 @@ go_test(
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_golang_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -13,9 +13,11 @@ import (
|
||||
"text/template"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v4/network"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/authorization"
|
||||
@@ -268,9 +270,13 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
vs := make([]*SignedValidatorRegistration, len(svr))
|
||||
vs := make([]*shared.SignedValidatorRegistration, len(svr))
|
||||
for i := 0; i < len(svr); i++ {
|
||||
vs[i] = &SignedValidatorRegistration{SignedValidatorRegistrationV1: svr[i]}
|
||||
svrJson, err := shared.SignedValidatorRegistrationFromConsensus(svr[i])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("failed to encode to SignedValidatorRegistration at index %d", i))
|
||||
}
|
||||
vs[i] = svrJson
|
||||
}
|
||||
body, err := json.Marshal(vs)
|
||||
if err != nil {
|
||||
@@ -295,12 +301,14 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
|
||||
}
|
||||
b := &SignedBlindedBeaconBlockBellatrix{SignedBlindedBeaconBlockBellatrix: psb}
|
||||
b, err := shared.SignedBlindedBeaconBlockBellatrixFromConsensus(ðpb.SignedBlindedBeaconBlockBellatrix{Block: psb.Block, Signature: bytesutil.SafeCopyBytes(psb.Signature)})
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not convert SignedBlindedBeaconBlockBellatrix to json marshalable type")
|
||||
}
|
||||
body, err := json.Marshal(b)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockBellatrix value body in SubmitBlindedBlock")
|
||||
}
|
||||
|
||||
versionOpt := func(r *http.Request) {
|
||||
r.Header.Add("Eth-Consensus-Version", version.String(version.Bellatrix))
|
||||
}
|
||||
@@ -330,12 +338,14 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
|
||||
}
|
||||
b := &SignedBlindedBeaconBlockCapella{SignedBlindedBeaconBlockCapella: psb}
|
||||
b, err := shared.SignedBlindedBeaconBlockCapellaFromConsensus(ðpb.SignedBlindedBeaconBlockCapella{Block: psb.Block, Signature: bytesutil.SafeCopyBytes(psb.Signature)})
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not convert SignedBlindedBeaconBlockCapella to json marshalable type")
|
||||
}
|
||||
body, err := json.Marshal(b)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockCapella value body in SubmitBlindedBlockCapella")
|
||||
}
|
||||
|
||||
versionOpt := func(r *http.Request) {
|
||||
r.Header.Add("Eth-Consensus-Version", version.String(version.Capella))
|
||||
}
|
||||
@@ -365,8 +375,10 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
|
||||
}
|
||||
|
||||
b := ðpb.SignedBlindedBeaconBlockAndBlobsDeneb{Block: psb, Blobs: blobs}
|
||||
b, err := shared.SignedBlindedBeaconBlockContentsDenebFromConsensus(ðpb.SignedBlindedBeaconBlockAndBlobsDeneb{SignedBlindedBlock: psb, SignedBlindedBlobSidecars: blobs})
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not convert SignedBlindedBeaconBlockContentsDeneb to json marshalable type")
|
||||
}
|
||||
body, err := json.Marshal(b)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockDeneb value body in SubmitBlindedBlockDeneb")
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
@@ -23,6 +24,7 @@ import (
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type roundtrip func(*http.Request) (*http.Response, error)
|
||||
@@ -283,6 +285,42 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
require.Equal(t, len(bundle.Proofs[i]) == 48, true)
|
||||
}
|
||||
})
|
||||
t.Run("deneb, no bundle", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponseDenebNoBundle)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
h, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
|
||||
require.NoError(t, err)
|
||||
expectedWithdrawalsRoot := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
bid, err := h.Message()
|
||||
require.NoError(t, err)
|
||||
bidHeader, err := bid.Header()
|
||||
require.NoError(t, err)
|
||||
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
|
||||
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
|
||||
bidValue := bytesutil.ReverseByteOrder(bid.Value())
|
||||
require.DeepEqual(t, bidValue, value.Bytes())
|
||||
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
|
||||
bundle, err := bid.BlindedBlobsBundle()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, (*v1.BlindedBlobsBundle)(nil), bundle)
|
||||
})
|
||||
|
||||
t.Run("unsupported version", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
@@ -361,10 +399,18 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
assert.Equal(t, uint64(1), withdrawals[0].Amount)
|
||||
})
|
||||
t.Run("deneb", func(t *testing.T) {
|
||||
|
||||
test := testSignedBlindedBeaconBlockAndBlobsDeneb(t)
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "deneb", r.Header.Get("Eth-Consensus-Version"))
|
||||
var req shared.SignedBlindedBeaconBlockContentsDeneb
|
||||
err := json.NewDecoder(r.Body).Decode(&req)
|
||||
require.NoError(t, err)
|
||||
block, err := req.SignedBlindedBlock.ToConsensus()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, block, test.SignedBlindedBlock)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadDeneb)),
|
||||
@@ -376,11 +422,11 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
test := testSignedBlindedBeaconBlockAndBlobsDeneb(t)
|
||||
sbb, err := blocks.NewSignedBeaconBlock(test.Block)
|
||||
|
||||
sbb, err := blocks.NewSignedBeaconBlock(test.SignedBlindedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
ep, blobBundle, err := c.SubmitBlindedBlock(ctx, sbb, test.Blobs)
|
||||
ep, blobBundle, err := c.SubmitBlindedBlock(ctx, sbb, test.SignedBlindedBlobSidecars)
|
||||
require.NoError(t, err)
|
||||
withdrawals, err := ep.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
@@ -708,9 +754,13 @@ func testSignedBlindedBeaconBlockCapella(t *testing.T) *eth.SignedBlindedBeaconB
|
||||
}
|
||||
|
||||
func testSignedBlindedBeaconBlockAndBlobsDeneb(t *testing.T) *eth.SignedBlindedBeaconBlockAndBlobsDeneb {
|
||||
basebytes, err := shared.Uint256ToSSZBytes("14074904626401341155369551180448584754667373453244490859944217516317499064576")
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
return ð.SignedBlindedBeaconBlockAndBlobsDeneb{
|
||||
Block: ð.SignedBlindedBeaconBlockDeneb{
|
||||
Block: ð.BlindedBeaconBlockDeneb{
|
||||
SignedBlindedBlock: ð.SignedBlindedBeaconBlockDeneb{
|
||||
Message: ð.BlindedBeaconBlockDeneb{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
@@ -722,7 +772,7 @@ func testSignedBlindedBeaconBlockAndBlobsDeneb(t *testing.T) *eth.SignedBlindedB
|
||||
DepositCount: 1,
|
||||
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Graffiti: ezDecode(t, "0xdeadbeefc0ffee"),
|
||||
Graffiti: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
ProposerSlashings: []*eth.ProposerSlashing{
|
||||
{
|
||||
Header_1: ð.SignedBeaconBlockHeader{
|
||||
@@ -825,8 +875,8 @@ func testSignedBlindedBeaconBlockAndBlobsDeneb(t *testing.T) *eth.SignedBlindedB
|
||||
},
|
||||
},
|
||||
SyncAggregate: ð.SyncAggregate{
|
||||
SyncCommitteeSignature: make([]byte, 48),
|
||||
SyncCommitteeBits: bitfield.Bitvector512{0x01},
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
SyncCommitteeBits: ezDecode(t, "0x6451e9f951ebf05edc01de67e593484b672877054f055903ff0df1a1a945cf30ca26bb4d4b154f94a1bc776bcf5d0efb3603e1f9b8ee2499ccdcfe2a18cef458"),
|
||||
},
|
||||
ExecutionPayloadHeader: &v1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
@@ -840,7 +890,7 @@ func testSignedBlindedBeaconBlockAndBlobsDeneb(t *testing.T) *eth.SignedBlindedB
|
||||
GasUsed: 1,
|
||||
Timestamp: 1,
|
||||
ExtraData: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BaseFeePerGas: []byte(strconv.FormatUint(1, 10)),
|
||||
BaseFeePerGas: basebytes,
|
||||
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
WithdrawalsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
@@ -851,7 +901,7 @@ func testSignedBlindedBeaconBlockAndBlobsDeneb(t *testing.T) *eth.SignedBlindedB
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
Blobs: []*eth.SignedBlindedBlobSidecar{
|
||||
SignedBlindedBlobSidecars: []*eth.SignedBlindedBlobSidecar{
|
||||
{
|
||||
Message: ð.BlindedBlobSidecar{
|
||||
BlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -16,87 +16,6 @@ import (
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// SignedValidatorRegistration a struct for signed validator registrations.
|
||||
type SignedValidatorRegistration struct {
|
||||
*eth.SignedValidatorRegistrationV1
|
||||
}
|
||||
|
||||
// ValidatorRegistration a struct for validator registrations.
|
||||
type ValidatorRegistration struct {
|
||||
*eth.ValidatorRegistrationV1
|
||||
}
|
||||
|
||||
// MarshalJSON returns a json representation copy of signed validator registration.
|
||||
func (r *SignedValidatorRegistration) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Message *ValidatorRegistration `json:"message"`
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
}{
|
||||
Message: &ValidatorRegistration{r.Message},
|
||||
Signature: r.SignedValidatorRegistrationV1.Signature,
|
||||
})
|
||||
}
|
||||
|
||||
// UnmarshalJSON returns a byte representation of signed validator registration from json.
|
||||
func (r *SignedValidatorRegistration) UnmarshalJSON(b []byte) error {
|
||||
if r.SignedValidatorRegistrationV1 == nil {
|
||||
r.SignedValidatorRegistrationV1 = ð.SignedValidatorRegistrationV1{}
|
||||
}
|
||||
o := struct {
|
||||
Message *ValidatorRegistration `json:"message"`
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
}{}
|
||||
if err := json.Unmarshal(b, &o); err != nil {
|
||||
return err
|
||||
}
|
||||
r.Message = o.Message.ValidatorRegistrationV1
|
||||
r.Signature = o.Signature
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON returns a json representation copy of validator registration.
|
||||
func (r *ValidatorRegistration) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
Pubkey hexutil.Bytes `json:"pubkey"`
|
||||
}{
|
||||
FeeRecipient: r.FeeRecipient,
|
||||
GasLimit: fmt.Sprintf("%d", r.GasLimit),
|
||||
Timestamp: fmt.Sprintf("%d", r.Timestamp),
|
||||
Pubkey: r.Pubkey,
|
||||
})
|
||||
}
|
||||
|
||||
// UnmarshalJSON returns a byte representation of validator registration from json.
|
||||
func (r *ValidatorRegistration) UnmarshalJSON(b []byte) error {
|
||||
if r.ValidatorRegistrationV1 == nil {
|
||||
r.ValidatorRegistrationV1 = ð.ValidatorRegistrationV1{}
|
||||
}
|
||||
o := struct {
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
Pubkey hexutil.Bytes `json:"pubkey"`
|
||||
}{}
|
||||
if err := json.Unmarshal(b, &o); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.FeeRecipient = o.FeeRecipient
|
||||
r.Pubkey = o.Pubkey
|
||||
var err error
|
||||
if r.GasLimit, err = strconv.ParseUint(o.GasLimit, 10, 64); err != nil {
|
||||
return errors.Wrap(err, "failed to parse gas limit")
|
||||
}
|
||||
if r.Timestamp, err = strconv.ParseUint(o.Timestamp, 10, 64); err != nil {
|
||||
return errors.Wrap(err, "failed to parse timestamp")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var errInvalidUint256 = errors.New("invalid Uint256")
|
||||
var errDecodeUint256 = errors.New("unable to decode into Uint256")
|
||||
|
||||
@@ -636,44 +555,6 @@ type SignedBlindedBeaconBlockBellatrix struct {
|
||||
*eth.SignedBlindedBeaconBlockBellatrix
|
||||
}
|
||||
|
||||
// BlindedBeaconBlockBellatrix is a field in SignedBlindedBeaconBlockBellatrix.
|
||||
type BlindedBeaconBlockBellatrix struct {
|
||||
*eth.BlindedBeaconBlockBellatrix
|
||||
}
|
||||
|
||||
// BlindedBeaconBlockBodyBellatrix is a field in BlindedBeaconBlockBellatrix.
|
||||
type BlindedBeaconBlockBodyBellatrix struct {
|
||||
*eth.BlindedBeaconBlockBodyBellatrix
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of SignedBlindedBeaconBlockBellatrix.
|
||||
func (r *SignedBlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Message *BlindedBeaconBlockBellatrix `json:"message"`
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
}{
|
||||
Message: &BlindedBeaconBlockBellatrix{r.SignedBlindedBeaconBlockBellatrix.Block},
|
||||
Signature: r.SignedBlindedBeaconBlockBellatrix.Signature,
|
||||
})
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of BlindedBeaconBlockBellatrix.
|
||||
func (b *BlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot hexutil.Bytes `json:"parent_root"`
|
||||
StateRoot hexutil.Bytes `json:"state_root"`
|
||||
Body *BlindedBeaconBlockBodyBellatrix `json:"body"`
|
||||
}{
|
||||
Slot: fmt.Sprintf("%d", b.Slot),
|
||||
ProposerIndex: fmt.Sprintf("%d", b.ProposerIndex),
|
||||
ParentRoot: b.ParentRoot,
|
||||
StateRoot: b.StateRoot,
|
||||
Body: &BlindedBeaconBlockBodyBellatrix{b.BlindedBeaconBlockBellatrix.Body},
|
||||
})
|
||||
}
|
||||
|
||||
// ProposerSlashing is a field in BlindedBeaconBlockBodyCapella.
|
||||
type ProposerSlashing struct {
|
||||
*eth.ProposerSlashing
|
||||
@@ -928,53 +809,6 @@ func (e *Eth1Data) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of BlindedBeaconBlockBodyBellatrix.
|
||||
func (b *BlindedBeaconBlockBodyBellatrix) MarshalJSON() ([]byte, error) {
|
||||
sve := make([]*SignedVoluntaryExit, len(b.BlindedBeaconBlockBodyBellatrix.VoluntaryExits))
|
||||
for i := range b.BlindedBeaconBlockBodyBellatrix.VoluntaryExits {
|
||||
sve[i] = &SignedVoluntaryExit{SignedVoluntaryExit: b.BlindedBeaconBlockBodyBellatrix.VoluntaryExits[i]}
|
||||
}
|
||||
deps := make([]*Deposit, len(b.BlindedBeaconBlockBodyBellatrix.Deposits))
|
||||
for i := range b.BlindedBeaconBlockBodyBellatrix.Deposits {
|
||||
deps[i] = &Deposit{Deposit: b.BlindedBeaconBlockBodyBellatrix.Deposits[i]}
|
||||
}
|
||||
atts := make([]*Attestation, len(b.BlindedBeaconBlockBodyBellatrix.Attestations))
|
||||
for i := range b.BlindedBeaconBlockBodyBellatrix.Attestations {
|
||||
atts[i] = &Attestation{Attestation: b.BlindedBeaconBlockBodyBellatrix.Attestations[i]}
|
||||
}
|
||||
atsl := make([]*AttesterSlashing, len(b.BlindedBeaconBlockBodyBellatrix.AttesterSlashings))
|
||||
for i := range b.BlindedBeaconBlockBodyBellatrix.AttesterSlashings {
|
||||
atsl[i] = &AttesterSlashing{AttesterSlashing: b.BlindedBeaconBlockBodyBellatrix.AttesterSlashings[i]}
|
||||
}
|
||||
pros := make([]*ProposerSlashing, len(b.BlindedBeaconBlockBodyBellatrix.ProposerSlashings))
|
||||
for i := range b.BlindedBeaconBlockBodyBellatrix.ProposerSlashings {
|
||||
pros[i] = &ProposerSlashing{ProposerSlashing: b.BlindedBeaconBlockBodyBellatrix.ProposerSlashings[i]}
|
||||
}
|
||||
return json.Marshal(struct {
|
||||
RandaoReveal hexutil.Bytes `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Graffiti hexutil.Bytes `json:"graffiti"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashing `json:"attester_slashings"`
|
||||
Attestations []*Attestation `json:"attestations"`
|
||||
Deposits []*Deposit `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeader `json:"execution_payload_header"`
|
||||
}{
|
||||
RandaoReveal: b.RandaoReveal,
|
||||
Eth1Data: &Eth1Data{b.BlindedBeaconBlockBodyBellatrix.Eth1Data},
|
||||
Graffiti: b.BlindedBeaconBlockBodyBellatrix.Graffiti,
|
||||
ProposerSlashings: pros,
|
||||
AttesterSlashings: atsl,
|
||||
Attestations: atts,
|
||||
Deposits: deps,
|
||||
VoluntaryExits: sve,
|
||||
SyncAggregate: &SyncAggregate{b.BlindedBeaconBlockBodyBellatrix.SyncAggregate},
|
||||
ExecutionPayloadHeader: &ExecutionPayloadHeader{ExecutionPayloadHeader: b.BlindedBeaconBlockBodyBellatrix.ExecutionPayloadHeader},
|
||||
})
|
||||
}
|
||||
|
||||
// SignedBLSToExecutionChange is a field in Beacon Block Body for capella and above.
|
||||
type SignedBLSToExecutionChange struct {
|
||||
*eth.SignedBLSToExecutionChange
|
||||
@@ -1009,102 +843,6 @@ func (ch *BLSToExecutionChange) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// SignedBlindedBeaconBlockCapella is part of the request object sent to builder API /eth/v1/builder/blinded_blocks for Capella.
|
||||
type SignedBlindedBeaconBlockCapella struct {
|
||||
*eth.SignedBlindedBeaconBlockCapella
|
||||
}
|
||||
|
||||
// BlindedBeaconBlockCapella is a field in SignedBlindedBeaconBlockCapella.
|
||||
type BlindedBeaconBlockCapella struct {
|
||||
*eth.BlindedBeaconBlockCapella
|
||||
}
|
||||
|
||||
// BlindedBeaconBlockBodyCapella is a field in BlindedBeaconBlockCapella.
|
||||
type BlindedBeaconBlockBodyCapella struct {
|
||||
*eth.BlindedBeaconBlockBodyCapella
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of SignedBlindedBeaconBlockCapella.
|
||||
func (b *SignedBlindedBeaconBlockCapella) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Message *BlindedBeaconBlockCapella `json:"message"`
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
}{
|
||||
Message: &BlindedBeaconBlockCapella{b.Block},
|
||||
Signature: b.Signature,
|
||||
})
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of BlindedBeaconBlockCapella
|
||||
func (b *BlindedBeaconBlockCapella) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot hexutil.Bytes `json:"parent_root"`
|
||||
StateRoot hexutil.Bytes `json:"state_root"`
|
||||
Body *BlindedBeaconBlockBodyCapella `json:"body"`
|
||||
}{
|
||||
Slot: fmt.Sprintf("%d", b.Slot),
|
||||
ProposerIndex: fmt.Sprintf("%d", b.ProposerIndex),
|
||||
ParentRoot: b.ParentRoot,
|
||||
StateRoot: b.StateRoot,
|
||||
Body: &BlindedBeaconBlockBodyCapella{b.Body},
|
||||
})
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of BlindedBeaconBlockBodyCapella
|
||||
func (b *BlindedBeaconBlockBodyCapella) MarshalJSON() ([]byte, error) {
|
||||
sve := make([]*SignedVoluntaryExit, len(b.VoluntaryExits))
|
||||
for i := range b.VoluntaryExits {
|
||||
sve[i] = &SignedVoluntaryExit{SignedVoluntaryExit: b.VoluntaryExits[i]}
|
||||
}
|
||||
deps := make([]*Deposit, len(b.Deposits))
|
||||
for i := range b.Deposits {
|
||||
deps[i] = &Deposit{Deposit: b.Deposits[i]}
|
||||
}
|
||||
atts := make([]*Attestation, len(b.Attestations))
|
||||
for i := range b.Attestations {
|
||||
atts[i] = &Attestation{Attestation: b.Attestations[i]}
|
||||
}
|
||||
atsl := make([]*AttesterSlashing, len(b.AttesterSlashings))
|
||||
for i := range b.AttesterSlashings {
|
||||
atsl[i] = &AttesterSlashing{AttesterSlashing: b.AttesterSlashings[i]}
|
||||
}
|
||||
pros := make([]*ProposerSlashing, len(b.ProposerSlashings))
|
||||
for i := range b.ProposerSlashings {
|
||||
pros[i] = &ProposerSlashing{ProposerSlashing: b.ProposerSlashings[i]}
|
||||
}
|
||||
chs := make([]*SignedBLSToExecutionChange, len(b.BlsToExecutionChanges))
|
||||
for i := range b.BlsToExecutionChanges {
|
||||
chs[i] = &SignedBLSToExecutionChange{SignedBLSToExecutionChange: b.BlsToExecutionChanges[i]}
|
||||
}
|
||||
return json.Marshal(struct {
|
||||
RandaoReveal hexutil.Bytes `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Graffiti hexutil.Bytes `json:"graffiti"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashing `json:"attester_slashings"`
|
||||
Attestations []*Attestation `json:"attestations"`
|
||||
Deposits []*Deposit `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeaderCapella `json:"execution_payload_header"`
|
||||
}{
|
||||
RandaoReveal: b.RandaoReveal,
|
||||
Eth1Data: &Eth1Data{b.Eth1Data},
|
||||
Graffiti: b.Graffiti,
|
||||
ProposerSlashings: pros,
|
||||
AttesterSlashings: atsl,
|
||||
Attestations: atts,
|
||||
Deposits: deps,
|
||||
VoluntaryExits: sve,
|
||||
BLSToExecutionChanges: chs,
|
||||
SyncAggregate: &SyncAggregate{b.SyncAggregate},
|
||||
ExecutionPayloadHeader: &ExecutionPayloadHeaderCapella{ExecutionPayloadHeaderCapella: b.ExecutionPayloadHeader},
|
||||
})
|
||||
}
|
||||
|
||||
// ExecHeaderResponseDeneb is the header response for builder API /eth/v1/builder/header/{slot}/{parent_hash}/{pubkey}.
|
||||
type ExecHeaderResponseDeneb struct {
|
||||
Data struct {
|
||||
@@ -1131,9 +869,12 @@ func (bb *BuilderBidDeneb) ToProto() (*eth.BuilderBidDeneb, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bundle, err := bb.BlindedBlobsBundle.ToProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var bundle *v1.BlindedBlobsBundle
|
||||
if bb.BlindedBlobsBundle != nil {
|
||||
bundle, err = bb.BlindedBlobsBundle.ToProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return ð.BuilderBidDeneb{
|
||||
Header: header,
|
||||
|
||||
@@ -12,8 +12,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/math"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
@@ -38,7 +38,8 @@ func TestSignedValidatorRegistration_MarshalJSON(t *testing.T) {
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
a := &SignedValidatorRegistration{SignedValidatorRegistrationV1: svr}
|
||||
a, err := shared.SignedValidatorRegistrationFromConsensus(svr)
|
||||
require.NoError(t, err)
|
||||
je, err := json.Marshal(a)
|
||||
require.NoError(t, err)
|
||||
// decode with a struct w/ plain strings so we can check the string encoding of the hex fields
|
||||
@@ -55,11 +56,11 @@ func TestSignedValidatorRegistration_MarshalJSON(t *testing.T) {
|
||||
require.Equal(t, "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", un.Message.Pubkey)
|
||||
|
||||
t.Run("roundtrip", func(t *testing.T) {
|
||||
b := &SignedValidatorRegistration{}
|
||||
b := &shared.SignedValidatorRegistration{}
|
||||
if err := json.Unmarshal(je, b); err != nil {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.Equal(t, proto.Equal(a.SignedValidatorRegistrationV1, b.SignedValidatorRegistrationV1), true)
|
||||
require.DeepEqual(t, a, b)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -159,6 +160,36 @@ var testExampleHeaderResponseDeneb = `{
|
||||
}
|
||||
}`
|
||||
|
||||
var testExampleHeaderResponseDenebNoBundle = `{
|
||||
"version": "deneb",
|
||||
"data": {
|
||||
"message": {
|
||||
"header": {
|
||||
"parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"block_number": "1",
|
||||
"gas_limit": "1",
|
||||
"gas_used": "1",
|
||||
"timestamp": "1",
|
||||
"extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"base_fee_per_gas": "452312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"transactions_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"withdrawals_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"blob_gas_used": "1",
|
||||
"excess_blob_gas": "2"
|
||||
},
|
||||
"value": "652312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
},
|
||||
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
}
|
||||
}`
|
||||
|
||||
var testExampleHeaderResponseUnknownVersion = `{
|
||||
"version": "bad",
|
||||
"data": {
|
||||
@@ -1499,7 +1530,7 @@ func TestUint256UnmarshalTooBig(t *testing.T) {
|
||||
func TestMarshalBlindedBeaconBlockBodyBellatrix(t *testing.T) {
|
||||
expected, err := os.ReadFile("testdata/blinded-block.json")
|
||||
require.NoError(t, err)
|
||||
b := &BlindedBeaconBlockBellatrix{BlindedBeaconBlockBellatrix: ð.BlindedBeaconBlockBellatrix{
|
||||
b, err := shared.BlindedBeaconBlockBellatrixFromConsensus(ð.BlindedBeaconBlockBellatrix{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
@@ -1516,7 +1547,8 @@ func TestMarshalBlindedBeaconBlockBodyBellatrix(t *testing.T) {
|
||||
SyncAggregate: pbSyncAggregate(),
|
||||
ExecutionPayloadHeader: pbExecutionPayloadHeader(t),
|
||||
},
|
||||
}}
|
||||
})
|
||||
require.NoError(t, err)
|
||||
m, err := json.Marshal(b)
|
||||
require.NoError(t, err)
|
||||
// string error output is easier to deal with
|
||||
@@ -1528,7 +1560,7 @@ func TestMarshalBlindedBeaconBlockBodyBellatrix(t *testing.T) {
|
||||
func TestMarshalBlindedBeaconBlockBodyCapella(t *testing.T) {
|
||||
expected, err := os.ReadFile("testdata/blinded-block-capella.json")
|
||||
require.NoError(t, err)
|
||||
b := &BlindedBeaconBlockCapella{BlindedBeaconBlockCapella: ð.BlindedBeaconBlockCapella{
|
||||
b, err := shared.BlindedBeaconBlockCapellaFromConsensus(ð.BlindedBeaconBlockCapella{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
@@ -1545,7 +1577,8 @@ func TestMarshalBlindedBeaconBlockBodyCapella(t *testing.T) {
|
||||
SyncAggregate: pbSyncAggregate(),
|
||||
ExecutionPayloadHeader: pbExecutionPayloadHeaderCapella(t),
|
||||
},
|
||||
}}
|
||||
})
|
||||
require.NoError(t, err)
|
||||
m, err := json.Marshal(b)
|
||||
require.NoError(t, err)
|
||||
// string error output is easier to deal with
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/client:go_default_library",
|
||||
"//validator/rpc:go_default_library",
|
||||
"//validator/rpc/apimiddleware:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v4/validator/rpc"
|
||||
"github.com/prysmaticlabs/prysm/v4/validator/rpc/apimiddleware"
|
||||
)
|
||||
|
||||
@@ -41,7 +42,7 @@ func (c *Client) GetValidatorPubKeys(ctx context.Context) ([]string, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(jsonlocal.Keystores) == 0 && len(jsonremote.Keystores) == 0 {
|
||||
if len(jsonlocal.Keystores) == 0 && len(jsonremote.Data) == 0 {
|
||||
return nil, errors.New("there are no local keys or remote keys on the validator")
|
||||
}
|
||||
|
||||
@@ -50,8 +51,8 @@ func (c *Client) GetValidatorPubKeys(ctx context.Context) ([]string, error) {
|
||||
for index := range jsonlocal.Keystores {
|
||||
hexKeys[jsonlocal.Keystores[index].ValidatingPubkey] = true
|
||||
}
|
||||
for index := range jsonremote.Keystores {
|
||||
hexKeys[jsonremote.Keystores[index].Pubkey] = true
|
||||
for index := range jsonremote.Data {
|
||||
hexKeys[jsonremote.Data[index].Pubkey] = true
|
||||
}
|
||||
keys := make([]string, 0)
|
||||
for k := range hexKeys {
|
||||
@@ -74,14 +75,14 @@ func (c *Client) GetLocalValidatorKeys(ctx context.Context) (*apimiddleware.List
|
||||
}
|
||||
|
||||
// GetRemoteValidatorKeys calls the keymanager APIs for web3signer validator keys
|
||||
func (c *Client) GetRemoteValidatorKeys(ctx context.Context) (*apimiddleware.ListRemoteKeysResponseJson, error) {
|
||||
func (c *Client) GetRemoteValidatorKeys(ctx context.Context) (*rpc.ListRemoteKeysResponse, error) {
|
||||
remoteBytes, err := c.Get(ctx, remoteKeysPath, client.WithAuthorizationToken(c.Token()))
|
||||
if err != nil {
|
||||
if !strings.Contains(err.Error(), "Prysm Wallet is not of type Web3Signer") {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
jsonremote := &apimiddleware.ListRemoteKeysResponseJson{}
|
||||
jsonremote := &rpc.ListRemoteKeysResponse{}
|
||||
if len(remoteBytes) != 0 {
|
||||
if err := json.Unmarshal(remoteBytes, jsonremote); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse remote keystore list")
|
||||
@@ -107,13 +108,13 @@ func (c *Client) GetFeeRecipientAddresses(ctx context.Context, validators []stri
|
||||
}
|
||||
|
||||
// GetFeeRecipientAddress takes a public key and calls the keymanager API to return its fee recipient.
|
||||
func (c *Client) GetFeeRecipientAddress(ctx context.Context, pubkey string) (*apimiddleware.GetFeeRecipientByPubkeyResponseJson, error) {
|
||||
func (c *Client) GetFeeRecipientAddress(ctx context.Context, pubkey string) (*rpc.GetFeeRecipientByPubkeyResponse, error) {
|
||||
path := strings.Replace(feeRecipientPath, "{pubkey}", pubkey, 1)
|
||||
b, err := c.Get(ctx, path, client.WithAuthorizationToken(c.Token()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
feejson := &apimiddleware.GetFeeRecipientByPubkeyResponseJson{}
|
||||
feejson := &rpc.GetFeeRecipientByPubkeyResponse{}
|
||||
if err := json.Unmarshal(b, feejson); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse fee recipient")
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ const (
|
||||
VersionHeader = "Eth-Consensus-Version"
|
||||
ExecutionPayloadBlindedHeader = "Eth-Execution-Payload-Blinded"
|
||||
ExecutionPayloadValueHeader = "Eth-Execution-Payload-Value"
|
||||
ConsensusBlockValueHeader = "Eth-Consensus-Block-Value"
|
||||
JsonMediaType = "application/json"
|
||||
OctetStreamMediaType = "application/octet-stream"
|
||||
)
|
||||
|
||||
@@ -15,6 +15,9 @@ func StartAndEndPage(pageToken string, pageSize, totalSize int) (int, int, strin
|
||||
if pageToken == "" {
|
||||
pageToken = "0"
|
||||
}
|
||||
if pageSize < 0 || totalSize < 0 {
|
||||
return 0, 0, "", errors.Errorf("invalid page and total sizes provided: page size %d , total size %d", pageSize, totalSize)
|
||||
}
|
||||
if pageSize == 0 {
|
||||
pageSize = params.BeaconConfig().DefaultPageSize
|
||||
}
|
||||
@@ -23,6 +26,9 @@ func StartAndEndPage(pageToken string, pageSize, totalSize int) (int, int, strin
|
||||
if err != nil {
|
||||
return 0, 0, "", errors.Wrap(err, "could not convert page token")
|
||||
}
|
||||
if token < 0 {
|
||||
return 0, 0, "", errors.Errorf("invalid token value provided: %d", token)
|
||||
}
|
||||
|
||||
// Start page can not be greater than set size.
|
||||
start := token * pageSize
|
||||
|
||||
@@ -85,3 +85,19 @@ func TestStartAndEndPage_ExceedsMaxPage(t *testing.T) {
|
||||
_, _, _, err := pagination.StartAndEndPage("", 0, 0)
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
}
|
||||
|
||||
func TestStartAndEndPage_InvalidPageValues(t *testing.T) {
|
||||
_, _, _, err := pagination.StartAndEndPage("10", -1, 10)
|
||||
assert.ErrorContains(t, "invalid page and total sizes provided", err)
|
||||
|
||||
_, _, _, err = pagination.StartAndEndPage("12", 10, -10)
|
||||
assert.ErrorContains(t, "invalid page and total sizes provided", err)
|
||||
|
||||
_, _, _, err = pagination.StartAndEndPage("12", -50, -60)
|
||||
assert.ErrorContains(t, "invalid page and total sizes provided", err)
|
||||
}
|
||||
|
||||
func TestStartAndEndPage_InvalidTokenValue(t *testing.T) {
|
||||
_, _, _, err := pagination.StartAndEndPage("-12", 50, 60)
|
||||
assert.ErrorContains(t, "invalid token value provided", err)
|
||||
}
|
||||
|
||||
24
api/server/BUILD.bazel
Normal file
24
api/server/BUILD.bazel
Normal file
@@ -0,0 +1,24 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"middleware.go",
|
||||
"util.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/api/server",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"middleware_test.go",
|
||||
"util_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
)
|
||||
15
api/server/middleware.go
Normal file
15
api/server/middleware.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func NormalizeQueryValuesHandler(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
query := r.URL.Query()
|
||||
NormalizeQueryValues(query)
|
||||
r.URL.RawQuery = query.Encode()
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
54
api/server/middleware_test.go
Normal file
54
api/server/middleware_test.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestNormalizeQueryValuesHandler(t *testing.T) {
|
||||
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, err := w.Write([]byte("next handler"))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
handler := NormalizeQueryValuesHandler(nextHandler)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
inputQuery string
|
||||
expectedQuery string
|
||||
}{
|
||||
{
|
||||
name: "3 values",
|
||||
inputQuery: "key=value1,value2,value3",
|
||||
expectedQuery: "key=value1&key=value2&key=value3", // replace with expected normalized value
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
req, err := http.NewRequest("GET", "/test?"+test.inputQuery, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusOK {
|
||||
t.Errorf("handler returned wrong status code: got %v want %v", rr.Code, http.StatusOK)
|
||||
}
|
||||
|
||||
if req.URL.RawQuery != test.expectedQuery {
|
||||
t.Errorf("query not normalized: got %v want %v", req.URL.RawQuery, test.expectedQuery)
|
||||
}
|
||||
|
||||
if rr.Body.String() != "next handler" {
|
||||
t.Errorf("next handler was not executed")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package helpers
|
||||
package server
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
@@ -1,4 +1,4 @@
|
||||
package helpers
|
||||
package server
|
||||
|
||||
import (
|
||||
"testing"
|
||||
@@ -5,12 +5,14 @@ go_library(
|
||||
srcs = [
|
||||
"chain_info.go",
|
||||
"chain_info_forkchoice.go",
|
||||
"currently_syncing_block.go",
|
||||
"error.go",
|
||||
"execution_engine.go",
|
||||
"forkchoice_update_execution.go",
|
||||
"head.go",
|
||||
"head_sync_committee_info.go",
|
||||
"init_sync_process_block.go",
|
||||
"lightclient.go",
|
||||
"log.go",
|
||||
"merge_ascii_art.go",
|
||||
"metrics.go",
|
||||
@@ -38,7 +40,6 @@ go_library(
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
@@ -48,6 +49,7 @@ go_library(
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/das:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
@@ -77,6 +79,8 @@ go_library(
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
|
||||
@@ -525,3 +525,8 @@ func (s *Service) recoverStateSummary(ctx context.Context, blockRoot [32]byte) (
|
||||
}
|
||||
return nil, errBlockDoesNotExist
|
||||
}
|
||||
|
||||
// BlockBeingSynced returns whether the block with the given root is currently being synced
|
||||
func (s *Service) BlockBeingSynced(root [32]byte) bool {
|
||||
return s.blockBeingSynced.isSyncing(root)
|
||||
}
|
||||
|
||||
27
beacon-chain/blockchain/currently_syncing_block.go
Normal file
27
beacon-chain/blockchain/currently_syncing_block.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package blockchain
|
||||
|
||||
import "sync"
|
||||
|
||||
type currentlySyncingBlock struct {
|
||||
sync.Mutex
|
||||
roots map[[32]byte]struct{}
|
||||
}
|
||||
|
||||
func (b *currentlySyncingBlock) set(root [32]byte) {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
b.roots[root] = struct{}{}
|
||||
}
|
||||
|
||||
func (b *currentlySyncingBlock) unset(root [32]byte) {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
delete(b.roots, root)
|
||||
}
|
||||
|
||||
func (b *currentlySyncingBlock) isSyncing(root [32]byte) bool {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
_, ok := b.roots[root]
|
||||
return ok
|
||||
}
|
||||
@@ -70,11 +70,8 @@ func IsInvalidBlock(e error) bool {
|
||||
if e == nil {
|
||||
return false
|
||||
}
|
||||
_, ok := e.(invalidBlockError)
|
||||
if !ok {
|
||||
return IsInvalidBlock(errors.Unwrap(e))
|
||||
}
|
||||
return true
|
||||
var d invalidBlockError
|
||||
return errors.As(e, &d)
|
||||
}
|
||||
|
||||
// InvalidBlockLVH returns the invalid block last valid hash root. If the error
|
||||
@@ -83,7 +80,8 @@ func InvalidBlockLVH(e error) [32]byte {
|
||||
if e == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
d, ok := e.(invalidBlockError)
|
||||
var d invalidBlockError
|
||||
ok := errors.As(e, &d)
|
||||
if !ok {
|
||||
return [32]byte{}
|
||||
}
|
||||
@@ -96,7 +94,8 @@ func InvalidBlockRoot(e error) [32]byte {
|
||||
if e == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
d, ok := e.(invalidBlockError)
|
||||
var d invalidBlockError
|
||||
ok := errors.As(e, &d)
|
||||
if !ok {
|
||||
return [32]byte{}
|
||||
}
|
||||
@@ -108,7 +107,8 @@ func InvalidAncestorRoots(e error) [][32]byte {
|
||||
if e == nil {
|
||||
return [][32]byte{}
|
||||
}
|
||||
d, ok := e.(invalidBlockError)
|
||||
var d invalidBlockError
|
||||
ok := errors.As(e, &d)
|
||||
if !ok {
|
||||
return [][32]byte{}
|
||||
}
|
||||
|
||||
@@ -24,6 +24,9 @@ func TestInvalidBlockRoot(t *testing.T) {
|
||||
err := invalidBlock{error: ErrInvalidPayload, root: [32]byte{'a'}}
|
||||
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(err))
|
||||
require.DeepEqual(t, [][32]byte(nil), InvalidAncestorRoots(err))
|
||||
|
||||
newErr := errors.Wrap(err, "wrap me")
|
||||
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(newErr))
|
||||
}
|
||||
|
||||
func TestInvalidRoots(t *testing.T) {
|
||||
@@ -33,4 +36,9 @@ func TestInvalidRoots(t *testing.T) {
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(err))
|
||||
require.DeepEqual(t, roots, InvalidAncestorRoots(err))
|
||||
|
||||
newErr := errors.Wrap(err, "wrap me")
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(newErr))
|
||||
require.DeepEqual(t, roots, InvalidAncestorRoots(newErr))
|
||||
}
|
||||
|
||||
@@ -390,7 +390,7 @@ func (s *Service) removeInvalidBlockAndState(ctx context.Context, blkRoots [][32
|
||||
return err
|
||||
}
|
||||
// No op if the sidecar does not exist.
|
||||
if err := s.cfg.BeaconDB.DeleteBlobSidecar(ctx, root); err != nil {
|
||||
if err := s.cfg.BeaconDB.DeleteBlobSidecars(ctx, root); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -405,8 +405,13 @@ func kzgCommitmentsToVersionedHashes(body interfaces.ReadOnlyBeaconBlockBody) ([
|
||||
|
||||
versionedHashes := make([]common.Hash, len(commitments))
|
||||
for i, commitment := range commitments {
|
||||
versionedHashes[i] = sha256.Sum256(commitment)
|
||||
versionedHashes[i][0] = blobCommitmentVersionKZG
|
||||
versionedHashes[i] = ConvertKzgCommitmentToVersionedHash(commitment)
|
||||
}
|
||||
return versionedHashes, nil
|
||||
}
|
||||
|
||||
func ConvertKzgCommitmentToVersionedHash(commitment []byte) common.Hash {
|
||||
versionedHash := sha256.Sum256(commitment)
|
||||
versionedHash[0] = blobCommitmentVersionKZG
|
||||
return versionedHash
|
||||
}
|
||||
|
||||
@@ -2,8 +2,10 @@ package kzg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -31,6 +33,61 @@ func IsDataAvailable(commitments [][]byte, sidecars []*ethpb.BlobSidecar) error
|
||||
return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs)
|
||||
}
|
||||
|
||||
var ErrKzgProofFailed = errors.New("failed to prove commitment to BlobSidecar Blob data")
|
||||
|
||||
type KzgProofError struct {
|
||||
failed [][48]byte
|
||||
}
|
||||
|
||||
func NewKzgProofError(failed [][48]byte) *KzgProofError {
|
||||
return &KzgProofError{failed: failed}
|
||||
}
|
||||
|
||||
func (e *KzgProofError) Error() string {
|
||||
cmts := make([]string, len(e.failed))
|
||||
for i := range e.failed {
|
||||
cmts[i] = fmt.Sprintf("%#x", e.failed[i])
|
||||
}
|
||||
return fmt.Sprintf("%s: bad commitments=%s", ErrKzgProofFailed.Error(), strings.Join(cmts, ","))
|
||||
}
|
||||
|
||||
func (e *KzgProofError) Failed() [][48]byte {
|
||||
return e.failed
|
||||
}
|
||||
|
||||
func (e *KzgProofError) Unwrap() error {
|
||||
return ErrKzgProofFailed
|
||||
}
|
||||
|
||||
// BisectBlobSidecarKzgProofs tries to batch prove the given sidecars against their own specified commitment.
|
||||
// The caller is responsible for ensuring that the commitments match those specified by the block.
|
||||
// If the batch fails, it will then try to verify the proofs one-by-one.
|
||||
// If an error is returned, it will be a custom error of type KzgProofError that provides access
|
||||
// to the list of commitments that failed.
|
||||
func BisectBlobSidecarKzgProofs(sidecars []*ethpb.BlobSidecar) error {
|
||||
if len(sidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
blobs := make([]GoKZG.Blob, len(sidecars))
|
||||
cmts := make([]GoKZG.KZGCommitment, len(sidecars))
|
||||
proofs := make([]GoKZG.KZGProof, len(sidecars))
|
||||
for i, sidecar := range sidecars {
|
||||
blobs[i] = bytesToBlob(sidecar.Blob)
|
||||
cmts[i] = bytesToCommitment(sidecar.KzgCommitment)
|
||||
proofs[i] = bytesToKZGProof(sidecar.KzgProof)
|
||||
}
|
||||
if err := kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs); err == nil {
|
||||
return nil
|
||||
}
|
||||
failed := make([][48]byte, 0, len(blobs))
|
||||
for i := range blobs {
|
||||
if err := kzgContext.VerifyBlobKZGProof(blobs[i], cmts[i], proofs[i]); err != nil {
|
||||
failed = append(failed, cmts[i])
|
||||
}
|
||||
}
|
||||
return NewKzgProofError(failed)
|
||||
}
|
||||
|
||||
func bytesToBlob(blob []byte) (ret GoKZG.Blob) {
|
||||
copy(ret[:], blob)
|
||||
return
|
||||
|
||||
245
beacon-chain/blockchain/lightclient.go
Normal file
245
beacon-chain/blockchain/lightclient.go
Normal file
@@ -0,0 +1,245 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
const (
|
||||
finalityBranchNumOfLeaves = 6
|
||||
)
|
||||
|
||||
// CreateLightClientFinalityUpdate - implements https://github.com/ethereum/consensus-specs/blob/3d235740e5f1e641d3b160c8688f26e7dc5a1894/specs/altair/light-client/full-node.md#create_light_client_finality_update
|
||||
// def create_light_client_finality_update(update: LightClientUpdate) -> LightClientFinalityUpdate:
|
||||
//
|
||||
// return LightClientFinalityUpdate(
|
||||
// attested_header=update.attested_header,
|
||||
// finalized_header=update.finalized_header,
|
||||
// finality_branch=update.finality_branch,
|
||||
// sync_aggregate=update.sync_aggregate,
|
||||
// signature_slot=update.signature_slot,
|
||||
// )
|
||||
func CreateLightClientFinalityUpdate(update *ethpbv2.LightClientUpdate) *ethpbv2.LightClientFinalityUpdate {
|
||||
return ðpbv2.LightClientFinalityUpdate{
|
||||
AttestedHeader: update.AttestedHeader,
|
||||
FinalizedHeader: update.FinalizedHeader,
|
||||
FinalityBranch: update.FinalityBranch,
|
||||
SyncAggregate: update.SyncAggregate,
|
||||
SignatureSlot: update.SignatureSlot,
|
||||
}
|
||||
}
|
||||
|
||||
// CreateLightClientOptimisticUpdate - implements https://github.com/ethereum/consensus-specs/blob/3d235740e5f1e641d3b160c8688f26e7dc5a1894/specs/altair/light-client/full-node.md#create_light_client_optimistic_update
|
||||
// def create_light_client_optimistic_update(update: LightClientUpdate) -> LightClientOptimisticUpdate:
|
||||
//
|
||||
// return LightClientOptimisticUpdate(
|
||||
// attested_header=update.attested_header,
|
||||
// sync_aggregate=update.sync_aggregate,
|
||||
// signature_slot=update.signature_slot,
|
||||
// )
|
||||
func CreateLightClientOptimisticUpdate(update *ethpbv2.LightClientUpdate) *ethpbv2.LightClientOptimisticUpdate {
|
||||
return ðpbv2.LightClientOptimisticUpdate{
|
||||
AttestedHeader: update.AttestedHeader,
|
||||
SyncAggregate: update.SyncAggregate,
|
||||
SignatureSlot: update.SignatureSlot,
|
||||
}
|
||||
}
|
||||
|
||||
func NewLightClientOptimisticUpdateFromBeaconState(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
attestedState state.BeaconState) (*ethpbv2.LightClientUpdate, error) {
|
||||
// assert compute_epoch_at_slot(attested_state.slot) >= ALTAIR_FORK_EPOCH
|
||||
attestedEpoch := slots.ToEpoch(attestedState.Slot())
|
||||
if attestedEpoch < params.BeaconConfig().AltairForkEpoch {
|
||||
return nil, fmt.Errorf("invalid attested epoch %d", attestedEpoch)
|
||||
}
|
||||
|
||||
// assert sum(block.message.body.sync_aggregate.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS
|
||||
syncAggregate, err := block.Block().Body().SyncAggregate()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get sync aggregate %v", err)
|
||||
}
|
||||
|
||||
if syncAggregate.SyncCommitteeBits.Count() < params.BeaconConfig().MinSyncCommitteeParticipants {
|
||||
return nil, fmt.Errorf("invalid sync committee bits count %d", syncAggregate.SyncCommitteeBits.Count())
|
||||
}
|
||||
|
||||
// assert state.slot == state.latest_block_header.slot
|
||||
if state.Slot() != state.LatestBlockHeader().Slot {
|
||||
return nil, fmt.Errorf("state slot %d not equal to latest block header slot %d", state.Slot(), state.LatestBlockHeader().Slot)
|
||||
}
|
||||
|
||||
// assert hash_tree_root(header) == hash_tree_root(block.message)
|
||||
header := state.LatestBlockHeader()
|
||||
stateRoot, err := state.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get state root %v", err)
|
||||
}
|
||||
header.StateRoot = stateRoot[:]
|
||||
|
||||
headerRoot, err := header.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get header root %v", err)
|
||||
}
|
||||
|
||||
blockRoot, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get block root %v", err)
|
||||
}
|
||||
|
||||
if headerRoot != blockRoot {
|
||||
return nil, fmt.Errorf("header root %#x not equal to block root %#x", headerRoot, blockRoot)
|
||||
}
|
||||
|
||||
// assert attested_state.slot == attested_state.latest_block_header.slot
|
||||
if attestedState.Slot() != attestedState.LatestBlockHeader().Slot {
|
||||
return nil, fmt.Errorf("attested state slot %d not equal to attested latest block header slot %d", attestedState.Slot(), attestedState.LatestBlockHeader().Slot)
|
||||
}
|
||||
|
||||
// attested_header = attested_state.latest_block_header.copy()
|
||||
attestedHeader := attestedState.LatestBlockHeader()
|
||||
|
||||
// attested_header.state_root = hash_tree_root(attested_state)
|
||||
attestedStateRoot, err := attestedState.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get attested state root %v", err)
|
||||
}
|
||||
attestedHeader.StateRoot = attestedStateRoot[:]
|
||||
|
||||
// assert hash_tree_root(attested_header) == block.message.parent_root
|
||||
attestedHeaderRoot, err := attestedHeader.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get attested header root %v", err)
|
||||
}
|
||||
|
||||
if attestedHeaderRoot != block.Block().ParentRoot() {
|
||||
return nil, fmt.Errorf("attested header root %#x not equal to block parent root %#x", attestedHeaderRoot, block.Block().ParentRoot())
|
||||
}
|
||||
|
||||
// Return result
|
||||
attestedHeaderResult := ðpbv1.BeaconBlockHeader{
|
||||
Slot: attestedHeader.Slot,
|
||||
ProposerIndex: attestedHeader.ProposerIndex,
|
||||
ParentRoot: attestedHeader.ParentRoot,
|
||||
StateRoot: attestedHeader.StateRoot,
|
||||
BodyRoot: attestedHeader.BodyRoot,
|
||||
}
|
||||
|
||||
syncAggregateResult := ðpbv1.SyncAggregate{
|
||||
SyncCommitteeBits: syncAggregate.SyncCommitteeBits,
|
||||
SyncCommitteeSignature: syncAggregate.SyncCommitteeSignature,
|
||||
}
|
||||
|
||||
result := ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: attestedHeaderResult,
|
||||
SyncAggregate: syncAggregateResult,
|
||||
SignatureSlot: block.Block().Slot(),
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func NewLightClientFinalityUpdateFromBeaconState(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
attestedState state.BeaconState,
|
||||
finalizedBlock interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.LightClientUpdate, error) {
|
||||
result, err := NewLightClientOptimisticUpdateFromBeaconState(
|
||||
ctx,
|
||||
state,
|
||||
block,
|
||||
attestedState,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Indicate finality whenever possible
|
||||
var finalizedHeader *ethpbv1.BeaconBlockHeader
|
||||
var finalityBranch [][]byte
|
||||
|
||||
if finalizedBlock != nil && !finalizedBlock.IsNil() {
|
||||
if finalizedBlock.Block().Slot() != 0 {
|
||||
tempFinalizedHeader, err := finalizedBlock.Header()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get finalized header %v", err)
|
||||
}
|
||||
finalizedHeader = migration.V1Alpha1SignedHeaderToV1(tempFinalizedHeader).GetMessage()
|
||||
|
||||
finalizedHeaderRoot, err := finalizedHeader.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get finalized header root %v", err)
|
||||
}
|
||||
|
||||
if finalizedHeaderRoot != bytesutil.ToBytes32(attestedState.FinalizedCheckpoint().Root) {
|
||||
return nil, fmt.Errorf("finalized header root %#x not equal to attested finalized checkpoint root %#x", finalizedHeaderRoot, bytesutil.ToBytes32(attestedState.FinalizedCheckpoint().Root))
|
||||
}
|
||||
} else {
|
||||
if !bytes.Equal(attestedState.FinalizedCheckpoint().Root, make([]byte, 32)) {
|
||||
return nil, fmt.Errorf("invalid finalized header root %v", attestedState.FinalizedCheckpoint().Root)
|
||||
}
|
||||
|
||||
finalizedHeader = ðpbv1.BeaconBlockHeader{
|
||||
Slot: 0,
|
||||
ProposerIndex: 0,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
}
|
||||
}
|
||||
|
||||
var bErr error
|
||||
finalityBranch, bErr = attestedState.FinalizedRootProof(ctx)
|
||||
if bErr != nil {
|
||||
return nil, fmt.Errorf("could not get finalized root proof %v", bErr)
|
||||
}
|
||||
} else {
|
||||
finalizedHeader = ðpbv1.BeaconBlockHeader{
|
||||
Slot: 0,
|
||||
ProposerIndex: 0,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
}
|
||||
|
||||
finalityBranch = make([][]byte, finalityBranchNumOfLeaves)
|
||||
for i := 0; i < finalityBranchNumOfLeaves; i++ {
|
||||
finalityBranch[i] = make([]byte, 32)
|
||||
}
|
||||
}
|
||||
|
||||
result.FinalizedHeader = finalizedHeader
|
||||
result.FinalityBranch = finalityBranch
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func NewLightClientUpdateFromFinalityUpdate(update *ethpbv2.LightClientFinalityUpdate) *ethpbv2.LightClientUpdate {
|
||||
return ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: update.AttestedHeader,
|
||||
FinalizedHeader: update.FinalizedHeader,
|
||||
FinalityBranch: update.FinalityBranch,
|
||||
SyncAggregate: update.SyncAggregate,
|
||||
SignatureSlot: update.SignatureSlot,
|
||||
}
|
||||
}
|
||||
|
||||
func NewLightClientUpdateFromOptimisticUpdate(update *ethpbv2.LightClientOptimisticUpdate) *ethpbv2.LightClientUpdate {
|
||||
return ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: update.AttestedHeader,
|
||||
SyncAggregate: update.SyncAggregate,
|
||||
SignatureSlot: update.SignatureSlot,
|
||||
}
|
||||
}
|
||||
160
beacon-chain/blockchain/lightclient_test.go
Normal file
160
beacon-chain/blockchain/lightclient_test.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
)
|
||||
|
||||
type testlc struct {
|
||||
t *testing.T
|
||||
ctx context.Context
|
||||
state state.BeaconState
|
||||
block interfaces.ReadOnlySignedBeaconBlock
|
||||
attestedState state.BeaconState
|
||||
attestedHeader *ethpb.BeaconBlockHeader
|
||||
}
|
||||
|
||||
func newTestLc(t *testing.T) *testlc {
|
||||
return &testlc{t: t}
|
||||
}
|
||||
|
||||
func (l *testlc) setupTest() *testlc {
|
||||
ctx := context.Background()
|
||||
|
||||
slot := primitives.Slot(params.BeaconConfig().AltairForkEpoch * primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)).Add(1)
|
||||
|
||||
attestedState, err := util.NewBeaconStateCapella()
|
||||
require.NoError(l.t, err)
|
||||
err = attestedState.SetSlot(slot)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
parent := util.NewBeaconBlockCapella()
|
||||
parent.Block.Slot = slot
|
||||
|
||||
signedParent, err := blocks.NewSignedBeaconBlock(parent)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
parentHeader, err := signedParent.Header()
|
||||
require.NoError(l.t, err)
|
||||
attestedHeader := parentHeader.Header
|
||||
|
||||
err = attestedState.SetLatestBlockHeader(attestedHeader)
|
||||
require.NoError(l.t, err)
|
||||
attestedStateRoot, err := attestedState.HashTreeRoot(ctx)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
// get a new signed block so the root is updated with the new state root
|
||||
parent.Block.StateRoot = attestedStateRoot[:]
|
||||
signedParent, err = blocks.NewSignedBeaconBlock(parent)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
state, err := util.NewBeaconStateCapella()
|
||||
require.NoError(l.t, err)
|
||||
err = state.SetSlot(slot)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
parentRoot, err := signedParent.Block().HashTreeRoot()
|
||||
require.NoError(l.t, err)
|
||||
|
||||
block := util.NewBeaconBlockCapella()
|
||||
block.Block.Slot = slot
|
||||
block.Block.ParentRoot = parentRoot[:]
|
||||
|
||||
for i := uint64(0); i < params.BeaconConfig().MinSyncCommitteeParticipants; i++ {
|
||||
block.Block.Body.SyncAggregate.SyncCommitteeBits.SetBitAt(i, true)
|
||||
}
|
||||
|
||||
signedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
h, err := signedBlock.Header()
|
||||
require.NoError(l.t, err)
|
||||
|
||||
err = state.SetLatestBlockHeader(h.Header)
|
||||
require.NoError(l.t, err)
|
||||
stateRoot, err := state.HashTreeRoot(ctx)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
// get a new signed block so the root is updated with the new state root
|
||||
block.Block.StateRoot = stateRoot[:]
|
||||
signedBlock, err = blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
l.state = state
|
||||
l.attestedState = attestedState
|
||||
l.attestedHeader = attestedHeader
|
||||
l.block = signedBlock
|
||||
l.ctx = ctx
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *testlc) checkAttestedHeader(update *ethpbv2.LightClientUpdate) {
|
||||
require.Equal(l.t, l.attestedHeader.Slot, update.AttestedHeader.Slot, "Attested header slot is not equal")
|
||||
require.Equal(l.t, l.attestedHeader.ProposerIndex, update.AttestedHeader.ProposerIndex, "Attested header proposer index is not equal")
|
||||
require.DeepSSZEqual(l.t, l.attestedHeader.ParentRoot, update.AttestedHeader.ParentRoot, "Attested header parent root is not equal")
|
||||
require.DeepSSZEqual(l.t, l.attestedHeader.BodyRoot, update.AttestedHeader.BodyRoot, "Attested header body root is not equal")
|
||||
|
||||
attestedStateRoot, err := l.attestedState.HashTreeRoot(l.ctx)
|
||||
require.NoError(l.t, err)
|
||||
require.DeepSSZEqual(l.t, attestedStateRoot[:], update.AttestedHeader.StateRoot, "Attested header state root is not equal")
|
||||
}
|
||||
|
||||
func (l *testlc) checkSyncAggregate(update *ethpbv2.LightClientUpdate) {
|
||||
syncAggregate, err := l.block.Block().Body().SyncAggregate()
|
||||
require.NoError(l.t, err)
|
||||
require.DeepSSZEqual(l.t, syncAggregate.SyncCommitteeBits, update.SyncAggregate.SyncCommitteeBits, "SyncAggregate bits is not equal")
|
||||
require.DeepSSZEqual(l.t, syncAggregate.SyncCommitteeSignature, update.SyncAggregate.SyncCommitteeSignature, "SyncAggregate signature is not equal")
|
||||
}
|
||||
|
||||
func TestLightClient_NewLightClientOptimisticUpdateFromBeaconState(t *testing.T) {
|
||||
l := newTestLc(t).setupTest()
|
||||
|
||||
update, err := NewLightClientOptimisticUpdateFromBeaconState(l.ctx, l.state, l.block, l.attestedState)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
require.Equal(t, l.block.Block().Slot(), update.SignatureSlot, "Signature slot is not equal")
|
||||
|
||||
l.checkSyncAggregate(update)
|
||||
l.checkAttestedHeader(update)
|
||||
|
||||
require.Equal(t, (*v1.BeaconBlockHeader)(nil), update.FinalizedHeader, "Finalized header is not nil")
|
||||
require.DeepSSZEqual(t, ([][]byte)(nil), update.FinalityBranch, "Finality branch is not nil")
|
||||
}
|
||||
|
||||
func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
|
||||
l := newTestLc(t).setupTest()
|
||||
|
||||
update, err := NewLightClientFinalityUpdateFromBeaconState(l.ctx, l.state, l.block, l.attestedState, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
require.Equal(t, l.block.Block().Slot(), update.SignatureSlot, "Signature slot is not equal")
|
||||
|
||||
l.checkSyncAggregate(update)
|
||||
l.checkAttestedHeader(update)
|
||||
|
||||
zeroHash := params.BeaconConfig().ZeroHash[:]
|
||||
require.NotNil(t, update.FinalizedHeader, "Finalized header is nil")
|
||||
require.Equal(t, primitives.Slot(0), update.FinalizedHeader.Slot, "Finalized header slot is not zero")
|
||||
require.Equal(t, primitives.ValidatorIndex(0), update.FinalizedHeader.ProposerIndex, "Finalized header proposer index is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.ParentRoot, "Finalized header parent root is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.StateRoot, "Finalized header state root is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.BodyRoot, "Finalized header body root is not zero")
|
||||
require.Equal(t, finalityBranchNumOfLeaves, len(update.FinalityBranch), "Invalid finality branch leaves")
|
||||
for _, leaf := range update.FinalityBranch {
|
||||
require.DeepSSZEqual(t, zeroHash, leaf, "Leaf is not zero")
|
||||
}
|
||||
}
|
||||
@@ -66,7 +66,7 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get blob KZG commitments")
|
||||
} else if len(kzgs) > 0 {
|
||||
log = log.WithField("blobCommitmentCount", len(kzgs))
|
||||
log = log.WithField("kzgCommitmentCount", len(kzgs))
|
||||
}
|
||||
}
|
||||
log.Info("Finished applying state transition")
|
||||
@@ -147,3 +147,15 @@ func logPayload(block interfaces.ReadOnlyBeaconBlock) error {
|
||||
log.WithFields(fields).Debug("Synced new payload")
|
||||
return nil
|
||||
}
|
||||
|
||||
func logBlobSidecar(scs []*ethpb.BlobSidecar, startTime time.Time) {
|
||||
if len(scs) == 0 {
|
||||
return
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"count": len(scs),
|
||||
"slot": scs[0].Slot,
|
||||
"block": hex.EncodeToString(scs[0].BlockRoot),
|
||||
"validationTime": time.Since(startTime),
|
||||
}).Debug("Synced new blob sidecars")
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ func TestReportEpochMetrics_BadHeadState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, h.SetValidators(nil))
|
||||
err = reportEpochMetrics(context.Background(), s, h)
|
||||
require.ErrorContains(t, "failed to initialize precompute: nil validators in state", err)
|
||||
require.ErrorContains(t, "failed to initialize precompute: state has nil validator slice", err)
|
||||
}
|
||||
|
||||
func TestReportEpochMetrics_BadAttestation(t *testing.T) {
|
||||
|
||||
@@ -3,7 +3,6 @@ package blockchain
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
@@ -62,7 +61,7 @@ func WithExecutionEngineCaller(c execution.EngineCaller) Option {
|
||||
}
|
||||
|
||||
// WithDepositCache for deposit lifecycle after chain inclusion.
|
||||
func WithDepositCache(c *depositcache.DepositCache) Option {
|
||||
func WithDepositCache(c cache.DepositCache) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.DepositCache = c
|
||||
return nil
|
||||
|
||||
@@ -13,10 +13,11 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
@@ -162,7 +163,7 @@ func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionD
|
||||
return preStateVersion, preStateHeader, nil
|
||||
}
|
||||
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock) error {
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, avs das.AvailabilityStore) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
@@ -265,6 +266,9 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
|
||||
return errors.Wrap(err, "could not validate blob data availability")
|
||||
}
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b.Block(),
|
||||
JustifiedCheckpoint: jCheckpoints[i],
|
||||
FinalizedCheckpoint: fCheckpoints[i]}
|
||||
@@ -503,14 +507,17 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
t := time.Now()
|
||||
|
||||
block := signed.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
if slots.ToEpoch(block.Slot())+params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest > primitives.Epoch(s.CurrentSlot()) {
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(block.Slot()), slots.ToEpoch(s.CurrentSlot())) {
|
||||
return nil
|
||||
}
|
||||
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
@@ -519,52 +526,60 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get KZG commitments")
|
||||
}
|
||||
existingBlobs := len(kzgCommitments)
|
||||
if existingBlobs == 0 {
|
||||
expected := len(kzgCommitments)
|
||||
if expected == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read first from db in case we have the blobs
|
||||
s.blobNotifier.Lock()
|
||||
var nc *blobNotifierChan
|
||||
var ok bool
|
||||
nc, ok = s.blobNotifier.chanForRoot[root]
|
||||
sidecars, err := s.cfg.BeaconDB.BlobSidecarsByRoot(ctx, root)
|
||||
if err == nil {
|
||||
if len(sidecars) >= existingBlobs {
|
||||
delete(s.blobNotifier.chanForRoot, root)
|
||||
s.blobNotifier.Unlock()
|
||||
return kzg.IsDataAvailable(kzgCommitments, sidecars)
|
||||
switch {
|
||||
case err == nil:
|
||||
if len(sidecars) >= expected {
|
||||
s.blobNotifiers.delete(root)
|
||||
if err := kzg.IsDataAvailable(kzgCommitments, sidecars); err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", root)).Warn("removing blob sidecars with invalid proofs")
|
||||
if err2 := s.cfg.BeaconDB.DeleteBlobSidecars(ctx, root); err2 != nil {
|
||||
log.WithError(err2).Error("could not delete sidecars")
|
||||
}
|
||||
return err
|
||||
}
|
||||
logBlobSidecar(sidecars, t)
|
||||
return nil
|
||||
}
|
||||
case errors.Is(err, db.ErrNotFound):
|
||||
// If the blob sidecars haven't arrived yet, the subsequent code will wait for them.
|
||||
// Note: The system will not exit with an error in this scenario.
|
||||
default:
|
||||
log.WithError(err).Error("could not get blob sidecars from DB")
|
||||
}
|
||||
// Create the channel if it didn't exist already the index map will be
|
||||
// created later anyway
|
||||
if !ok {
|
||||
nc = &blobNotifierChan{channel: make(chan struct{}, fieldparams.MaxBlobsPerBlock)}
|
||||
s.blobNotifier.chanForRoot[root] = nc
|
||||
|
||||
found := map[uint64]struct{}{}
|
||||
for _, sc := range sidecars {
|
||||
found[sc.Index] = struct{}{}
|
||||
}
|
||||
// We have more commitments in the block than blobs in database
|
||||
// We sync the channel indices with the sidecars
|
||||
nc.indices = make(map[uint64]struct{})
|
||||
for _, sidecar := range sidecars {
|
||||
nc.indices[sidecar.Index] = struct{}{}
|
||||
}
|
||||
s.blobNotifier.Unlock()
|
||||
channelWrites := len(sidecars)
|
||||
nc := s.blobNotifiers.forRoot(root)
|
||||
for {
|
||||
select {
|
||||
case <-nc.channel:
|
||||
channelWrites++
|
||||
if channelWrites == existingBlobs {
|
||||
s.blobNotifier.Lock()
|
||||
delete(s.blobNotifier.chanForRoot, root)
|
||||
s.blobNotifier.Unlock()
|
||||
sidecars, err := s.cfg.BeaconDB.BlobSidecarsByRoot(ctx, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get blob sidecars")
|
||||
}
|
||||
return kzg.IsDataAvailable(kzgCommitments, sidecars)
|
||||
case idx := <-nc:
|
||||
found[idx] = struct{}{}
|
||||
if len(found) != expected {
|
||||
continue
|
||||
}
|
||||
s.blobNotifiers.delete(root)
|
||||
sidecars, err := s.cfg.BeaconDB.BlobSidecarsByRoot(ctx, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get blob sidecars")
|
||||
}
|
||||
if err := kzg.IsDataAvailable(kzgCommitments, sidecars); err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", root)).Warn("removing blob sidecars with invalid proofs")
|
||||
if err2 := s.cfg.BeaconDB.DeleteBlobSidecars(ctx, root); err2 != nil {
|
||||
log.WithError(err2).Error("could not delete sidecars")
|
||||
}
|
||||
return err
|
||||
}
|
||||
logBlobSidecar(sidecars, t)
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return errors.Wrap(ctx.Err(), "context deadline waiting for blob sidecars")
|
||||
}
|
||||
@@ -616,11 +631,13 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
return
|
||||
}
|
||||
s.headLock.RUnlock()
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, ¬ifyForkchoiceUpdateArg{
|
||||
headState: headState,
|
||||
headRoot: headRoot,
|
||||
headBlock: headBlock.Block(),
|
||||
})
|
||||
s.cfg.ForkChoiceStore.RUnlock()
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
@@ -235,7 +236,8 @@ func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) {
|
||||
// to be included(rather than the last one to be processed). This was most likely
|
||||
// done as the state cannot represent signed integers.
|
||||
finalizedEth1DepIdx := eth1DepositIndex - 1
|
||||
if err = s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(finalizedEth1DepIdx)); err != nil {
|
||||
if err = s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(finalizedEth1DepIdx), common.Hash(finalizedState.Eth1Data().BlockHash),
|
||||
0 /* Setting a zero value as we have no access to block height */); err != nil {
|
||||
log.WithError(err).Error("could not insert finalized deposits")
|
||||
return
|
||||
}
|
||||
@@ -247,7 +249,7 @@ func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) {
|
||||
// to the provided eth1 deposit index.
|
||||
s.cfg.DepositCache.PrunePendingDeposits(ctx, int64(eth1DepositIndex)) // lint:ignore uintcast -- Deposit index should not exceed int64 in your lifetime.
|
||||
|
||||
log.WithField("duration", time.Since(startTime).String()).Debug("Finalized deposit insertion completed")
|
||||
log.WithField("duration", time.Since(startTime).String()).Debugf("Finalized deposit insertion completed at index %d", finalizedEth1DepIdx)
|
||||
}
|
||||
|
||||
// This ensures that the input root defaults to using genesis root instead of zero hashes. This is needed for handling
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
@@ -66,7 +67,7 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
blks = append(blks, rwsb)
|
||||
}
|
||||
err := service.onBlockBatch(ctx, blks)
|
||||
err := service.onBlockBatch(ctx, blks, &das.MockAvailabilityStore{})
|
||||
require.NoError(t, err)
|
||||
jcp := service.CurrentJustifiedCheckpt()
|
||||
jroot := bytesutil.ToBytes32(jcp.Root)
|
||||
@@ -96,7 +97,7 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
|
||||
require.NoError(t, service.saveInitSyncBlock(ctx, rwsb.Root(), wsb))
|
||||
blks = append(blks, rwsb)
|
||||
}
|
||||
require.NoError(t, service.onBlockBatch(ctx, blks))
|
||||
require.NoError(t, service.onBlockBatch(ctx, blks, &das.MockAvailabilityStore{}))
|
||||
}
|
||||
|
||||
func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
|
||||
@@ -699,7 +700,7 @@ func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
gs, _ := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
gs = gs.Copy()
|
||||
assert.NoError(t, gs.SetEth1Data(ðpb.Eth1Data{DepositCount: 10}))
|
||||
assert.NoError(t, gs.SetEth1Data(ðpb.Eth1Data{DepositCount: 10, BlockHash: make([]byte, 32)}))
|
||||
assert.NoError(t, gs.SetEth1DepositIndex(8))
|
||||
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
|
||||
var zeroSig [96]byte
|
||||
@@ -713,8 +714,9 @@ func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root)))
|
||||
}
|
||||
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'})
|
||||
fDeposits := depositCache.FinalizedDeposits(ctx)
|
||||
assert.Equal(t, 7, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
|
||||
fDeposits, err := depositCache.FinalizedDeposits(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 7, int(fDeposits.MerkleTrieIndex()), "Finalized deposits not inserted correctly")
|
||||
deps := depositCache.AllDeposits(ctx, big.NewInt(107))
|
||||
for _, d := range deps {
|
||||
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
|
||||
@@ -728,7 +730,7 @@ func TestInsertFinalizedDeposits_PrunePendingDeposits(t *testing.T) {
|
||||
gs, _ := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
gs = gs.Copy()
|
||||
assert.NoError(t, gs.SetEth1Data(ðpb.Eth1Data{DepositCount: 10}))
|
||||
assert.NoError(t, gs.SetEth1Data(ðpb.Eth1Data{DepositCount: 10, BlockHash: make([]byte, 32)}))
|
||||
assert.NoError(t, gs.SetEth1DepositIndex(8))
|
||||
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
|
||||
var zeroSig [96]byte
|
||||
@@ -748,8 +750,9 @@ func TestInsertFinalizedDeposits_PrunePendingDeposits(t *testing.T) {
|
||||
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root))
|
||||
}
|
||||
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'})
|
||||
fDeposits := depositCache.FinalizedDeposits(ctx)
|
||||
assert.Equal(t, 7, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
|
||||
fDeposits, err := depositCache.FinalizedDeposits(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 7, int(fDeposits.MerkleTrieIndex()), "Finalized deposits not inserted correctly")
|
||||
deps := depositCache.AllDeposits(ctx, big.NewInt(107))
|
||||
for _, d := range deps {
|
||||
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
|
||||
@@ -767,11 +770,11 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
gs, _ := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
gs = gs.Copy()
|
||||
assert.NoError(t, gs.SetEth1Data(ðpb.Eth1Data{DepositCount: 7}))
|
||||
assert.NoError(t, gs.SetEth1Data(ðpb.Eth1Data{DepositCount: 7, BlockHash: make([]byte, 32)}))
|
||||
assert.NoError(t, gs.SetEth1DepositIndex(6))
|
||||
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
|
||||
gs2 := gs.Copy()
|
||||
assert.NoError(t, gs2.SetEth1Data(ðpb.Eth1Data{DepositCount: 15}))
|
||||
assert.NoError(t, gs2.SetEth1Data(ðpb.Eth1Data{DepositCount: 15, BlockHash: make([]byte, 32)}))
|
||||
assert.NoError(t, gs2.SetEth1DepositIndex(13))
|
||||
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k', '2'}, gs2))
|
||||
var zeroSig [96]byte
|
||||
@@ -785,11 +788,11 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root)))
|
||||
}
|
||||
// Insert 3 deposits before hand.
|
||||
require.NoError(t, depositCache.InsertFinalizedDeposits(ctx, 2))
|
||||
|
||||
require.NoError(t, depositCache.InsertFinalizedDeposits(ctx, 2, [32]byte{}, 0))
|
||||
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'})
|
||||
fDeposits := depositCache.FinalizedDeposits(ctx)
|
||||
assert.Equal(t, 5, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
|
||||
fDeposits, err := depositCache.FinalizedDeposits(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 5, int(fDeposits.MerkleTrieIndex()), "Finalized deposits not inserted correctly")
|
||||
|
||||
deps := depositCache.AllDeposits(ctx, big.NewInt(105))
|
||||
for _, d := range deps {
|
||||
@@ -798,8 +801,9 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
|
||||
// Insert New Finalized State with higher deposit count.
|
||||
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k', '2'})
|
||||
fDeposits = depositCache.FinalizedDeposits(ctx)
|
||||
assert.Equal(t, 12, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
|
||||
fDeposits, err = depositCache.FinalizedDeposits(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 12, int(fDeposits.MerkleTrieIndex()), "Finalized deposits not inserted correctly")
|
||||
deps = depositCache.AllDeposits(ctx, big.NewInt(112))
|
||||
for _, d := range deps {
|
||||
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
|
||||
@@ -1098,12 +1102,15 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(4)
|
||||
var lock sync.Mutex
|
||||
go func() {
|
||||
preState, err := service.getBlockPreState(ctx, wsb1.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb1)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb1, r1, postState, true))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
@@ -1111,7 +1118,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb2)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb2, r2, postState, true))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
@@ -1119,7 +1128,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb3)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb3, r3, postState, true))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
@@ -1127,7 +1138,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb4)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb4, r4, postState, true))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
@@ -1931,7 +1944,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
rwsb, err := consensusblocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
// We use onBlockBatch here because the valid chain is missing in forkchoice
|
||||
require.NoError(t, service.onBlockBatch(ctx, []consensusblocks.ROBlock{rwsb}))
|
||||
require.NoError(t, service.onBlockBatch(ctx, []consensusblocks.ROBlock{rwsb}, &das.MockAvailabilityStore{}))
|
||||
// Check that the head is now VALID and the node is not optimistic
|
||||
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot()))
|
||||
headRoot, err = service.HeadRoot(ctx)
|
||||
|
||||
@@ -1,22 +1,23 @@
|
||||
package blockchain
|
||||
|
||||
import fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
import (
|
||||
"context"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// SendNewBlobEvent sends a message to the BlobNotifier channel that the blob
|
||||
// for the blocroot `root` is ready in the database
|
||||
func (s *Service) SendNewBlobEvent(root [32]byte, index uint64) {
|
||||
s.blobNotifier.Lock()
|
||||
nc, ok := s.blobNotifier.chanForRoot[root]
|
||||
if !ok {
|
||||
nc = &blobNotifierChan{indices: make(map[uint64]struct{}), channel: make(chan struct{}, fieldparams.MaxBlobsPerBlock)}
|
||||
s.blobNotifier.chanForRoot[root] = nc
|
||||
}
|
||||
_, ok = nc.indices[index]
|
||||
if ok {
|
||||
s.blobNotifier.Unlock()
|
||||
return
|
||||
}
|
||||
nc.indices[index] = struct{}{}
|
||||
s.blobNotifier.Unlock()
|
||||
nc.channel <- struct{}{}
|
||||
func (s *Service) sendNewBlobEvent(root [32]byte, index uint64) {
|
||||
s.blobNotifiers.forRoot(root) <- index
|
||||
}
|
||||
|
||||
// ReceiveBlob saves the blob to database and sends the new event
|
||||
func (s *Service) ReceiveBlob(ctx context.Context, b *ethpb.BlobSidecar) error {
|
||||
if err := s.cfg.BeaconDB.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{b}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.sendNewBlobEvent([32]byte(b.BlockRoot), b.Index)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,6 +3,8 @@ package blockchain
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
@@ -10,6 +12,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
@@ -21,7 +24,6 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
"golang.org/x/sync/errgroup"
|
||||
@@ -33,15 +35,16 @@ var epochsSinceFinalitySaveHotStateDB = primitives.Epoch(100)
|
||||
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
|
||||
type BlockReceiver interface {
|
||||
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error
|
||||
HasBlock(ctx context.Context, root [32]byte) bool
|
||||
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
|
||||
BlockBeingSynced([32]byte) bool
|
||||
}
|
||||
|
||||
// BlobReceiver interface defines the methods of chain service for receiving new
|
||||
// blobs
|
||||
type BlobReceiver interface {
|
||||
SendNewBlobEvent([32]byte, uint64)
|
||||
ReceiveBlob(context.Context, *ethpb.BlobSidecar) error
|
||||
}
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
@@ -57,7 +60,15 @@ type SlashingReceiver interface {
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlock")
|
||||
defer span.End()
|
||||
// Return early if the block has been synced
|
||||
if s.InForkchoice(blockRoot) {
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Debug("Ignoring already synced block")
|
||||
return nil
|
||||
}
|
||||
receivedTime := time.Now()
|
||||
s.blockBeingSynced.set(blockRoot)
|
||||
defer s.blockBeingSynced.unset(blockRoot)
|
||||
|
||||
blockCopy, err := block.Copy()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -181,7 +192,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
|
||||
// the state, performing batch verification of all collected signatures and then performing the appropriate
|
||||
// actions for a block post-transition.
|
||||
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock) error {
|
||||
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
@@ -189,7 +200,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
// Apply state transition on the incoming newly received block batches, one by one.
|
||||
if err := s.onBlockBatch(ctx, blocks); err != nil {
|
||||
if err := s.onBlockBatch(ctx, blocks, avs); err != nil {
|
||||
err := errors.Wrap(err, "could not process block in batch")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
blockchainTesting "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
@@ -23,9 +24,10 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
genesis, keys := util.DeterministicGenesisState(t, 64)
|
||||
copiedGen := genesis.Copy()
|
||||
genFullBlock := func(t *testing.T, conf *util.BlockGenConfig, slot primitives.Slot) *ethpb.SignedBeaconBlock {
|
||||
blk, err := util.GenerateFullBlock(genesis, keys, conf, slot)
|
||||
assert.NoError(t, err)
|
||||
blk, err := util.GenerateFullBlock(copiedGen.Copy(), keys, conf, slot)
|
||||
require.NoError(t, err)
|
||||
return blk
|
||||
}
|
||||
//params.SetupTestConfigCleanupWithLock(t)
|
||||
@@ -108,6 +110,9 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
block: genFullBlock(t, util.DefaultBlockGenConfig(), 1 /*slot*/),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
// Hacky sleep, should use a better way to be able to resolve the race
|
||||
// between event being sent out and processed.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
||||
}
|
||||
@@ -119,6 +124,10 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
wg.Add(1)
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
defer func() {
|
||||
wg.Done()
|
||||
}()
|
||||
genesis = genesis.Copy()
|
||||
s, tr := minimalTestService(t,
|
||||
WithFinalizedStateAtStartUp(genesis),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
@@ -139,10 +148,9 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
tt.check(t, s)
|
||||
}
|
||||
wg.Done()
|
||||
})
|
||||
}
|
||||
wg.Wait()
|
||||
@@ -173,6 +181,7 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
||||
}
|
||||
@@ -215,6 +224,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
block: genFullBlock(t, util.DefaultBlockGenConfig(), 1 /*slot*/),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
||||
}
|
||||
@@ -231,7 +241,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
rwsb, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
err = s.ReceiveBlockBatch(ctx, []blocks.ROBlock{rwsb})
|
||||
err = s.ReceiveBlockBatch(ctx, []blocks.ROBlock{rwsb}, &das.MockAvailabilityStore{})
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
} else {
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
@@ -33,6 +32,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
@@ -61,7 +61,8 @@ type Service struct {
|
||||
clockSetter startup.ClockSetter
|
||||
clockWaiter startup.ClockWaiter
|
||||
syncComplete chan struct{}
|
||||
blobNotifier *blobNotifier
|
||||
blobNotifiers *blobNotifierMap
|
||||
blockBeingSynced *currentlySyncingBlock
|
||||
}
|
||||
|
||||
// config options for the service.
|
||||
@@ -69,7 +70,7 @@ type config struct {
|
||||
BeaconBlockBuf int
|
||||
ChainStartFetcher execution.ChainStartFetcher
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
DepositCache *depositcache.DepositCache
|
||||
DepositCache cache.DepositCache
|
||||
ProposerSlotIndexCache *cache.ProposerPayloadIDsCache
|
||||
AttPool attestations.Pool
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
@@ -90,14 +91,26 @@ type config struct {
|
||||
|
||||
var ErrMissingClockSetter = errors.New("blockchain Service initialized without a startup.ClockSetter")
|
||||
|
||||
type blobNotifierChan struct {
|
||||
indices map[uint64]struct{}
|
||||
channel chan struct{}
|
||||
type blobNotifierMap struct {
|
||||
sync.RWMutex
|
||||
notifiers map[[32]byte]chan uint64
|
||||
}
|
||||
|
||||
type blobNotifier struct {
|
||||
sync.RWMutex
|
||||
chanForRoot map[[32]byte]*blobNotifierChan
|
||||
func (bn *blobNotifierMap) forRoot(root [32]byte) chan uint64 {
|
||||
bn.Lock()
|
||||
defer bn.Unlock()
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, fieldparams.MaxBlobsPerBlock)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func (bn *blobNotifierMap) delete(root [32]byte) {
|
||||
bn.Lock()
|
||||
defer bn.Unlock()
|
||||
delete(bn.notifiers, root)
|
||||
}
|
||||
|
||||
// NewService instantiates a new block service instance that will
|
||||
@@ -111,8 +124,8 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
}
|
||||
}
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
bn := &blobNotifier{
|
||||
chanForRoot: make(map[[32]byte]*blobNotifierChan),
|
||||
bn := &blobNotifierMap{
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
}
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
@@ -120,8 +133,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
boundaryRoots: [][32]byte{},
|
||||
checkpointStateCache: cache.NewCheckpointStateCache(),
|
||||
initSyncBlocks: make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock),
|
||||
blobNotifier: bn,
|
||||
blobNotifiers: bn,
|
||||
cfg: &config{ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache()},
|
||||
blockBeingSynced: ¤tlySyncingBlock{roots: make(map[[32]byte]struct{})},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if err := opt(srv); err != nil {
|
||||
@@ -368,7 +382,7 @@ func (s *Service) startFromExecutionChain() error {
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("Subscription to state notifier failed")
|
||||
log.WithError(err).Error("Subscription to state forRoot failed")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
@@ -23,10 +24,13 @@ import (
|
||||
|
||||
type mockBeaconNode struct {
|
||||
stateFeed *event.Feed
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// StateFeed mocks the same method in the beacon node.
|
||||
func (mbn *mockBeaconNode) StateFeed() *event.Feed {
|
||||
mbn.mu.Lock()
|
||||
defer mbn.mu.Unlock()
|
||||
if mbn.stateFeed == nil {
|
||||
mbn.stateFeed = new(event.Feed)
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ go_library(
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/das:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
opfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
@@ -71,6 +72,8 @@ type ChainService struct {
|
||||
FinalizedRoots map[[32]byte]bool
|
||||
OptimisticRoots map[[32]byte]bool
|
||||
BlockSlot primitives.Slot
|
||||
SyncingRoot [32]byte
|
||||
Blobs []*ethpb.BlobSidecar
|
||||
}
|
||||
|
||||
func (s *ChainService) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
|
||||
@@ -205,7 +208,7 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block interf
|
||||
}
|
||||
|
||||
// ReceiveBlockBatch processes blocks in batches from initial-sync.
|
||||
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock) error {
|
||||
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, avs das.AvailabilityStore) error {
|
||||
if s.State == nil {
|
||||
return ErrNilState
|
||||
}
|
||||
@@ -605,5 +608,13 @@ func (s *ChainService) UnrealizedJustifiedPayloadBlockHash() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// SendNewBlobEvent mocks the same method in the chain service
|
||||
func (s *ChainService) SendNewBlobEvent(_ [32]byte, _ uint64) {}
|
||||
// BlockBeingSynced mocks the same method in the chain service
|
||||
func (c *ChainService) BlockBeingSynced(root [32]byte) bool {
|
||||
return root == c.SyncingRoot
|
||||
}
|
||||
|
||||
// ReceiveBlob implements the same method in the chain service
|
||||
func (c *ChainService) ReceiveBlob(_ context.Context, b *ethpb.BlobSidecar) error {
|
||||
c.Blobs = append(c.Blobs, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
2
beacon-chain/cache/BUILD.bazel
vendored
2
beacon-chain/cache/BUILD.bazel
vendored
@@ -13,6 +13,7 @@ go_library(
|
||||
"common.go",
|
||||
"doc.go",
|
||||
"error.go",
|
||||
"interfaces.go",
|
||||
"payload_id.go",
|
||||
"proposer_indices.go",
|
||||
"proposer_indices_disabled.go", # keep
|
||||
@@ -44,6 +45,7 @@ go_library(
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_patrickmn_go_cache//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
4
beacon-chain/cache/checkpoint_state.go
vendored
4
beacon-chain/cache/checkpoint_state.go
vendored
@@ -42,7 +42,7 @@ func NewCheckpointStateCache() *CheckpointStateCache {
|
||||
// StateByCheckpoint fetches state by checkpoint. Returns true with a
|
||||
// reference to the CheckpointState info, if exists. Otherwise returns false, nil.
|
||||
func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (state.BeaconState, error) {
|
||||
h, err := hash.HashProto(cp)
|
||||
h, err := hash.Proto(cp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -62,7 +62,7 @@ func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (state.Be
|
||||
// AddCheckpointState adds CheckpointState object to the cache. This method also trims the least
|
||||
// recently added CheckpointState object if the cache size has ready the max cache size limit.
|
||||
func (c *CheckpointStateCache) AddCheckpointState(cp *ethpb.Checkpoint, s state.ReadOnlyBeaconState) error {
|
||||
h, err := hash.HashProto(cp)
|
||||
h, err := hash.Proto(cp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
3
beacon-chain/cache/depositcache/BUILD.bazel
vendored
3
beacon-chain/cache/depositcache/BUILD.bazel
vendored
@@ -13,12 +13,14 @@ go_library(
|
||||
"//testing/spectest:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
@@ -35,6 +37,7 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
|
||||
@@ -11,9 +11,11 @@ import (
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/container/trie"
|
||||
@@ -30,20 +32,11 @@ var (
|
||||
})
|
||||
)
|
||||
|
||||
// DepositFetcher defines a struct which can retrieve deposit information from a store.
|
||||
type DepositFetcher interface {
|
||||
AllDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit
|
||||
DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int)
|
||||
DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte)
|
||||
FinalizedDeposits(ctx context.Context) *FinalizedDeposits
|
||||
NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit
|
||||
}
|
||||
|
||||
// FinalizedDeposits stores the trie of deposits that have been included
|
||||
// in the beacon state up to the latest finalized checkpoint.
|
||||
type FinalizedDeposits struct {
|
||||
Deposits *trie.SparseMerkleTrie
|
||||
MerkleTrieIndex int64
|
||||
deposits *trie.SparseMerkleTrie
|
||||
merkleTrieIndex int64
|
||||
}
|
||||
|
||||
// DepositCache stores all in-memory deposit objects. This
|
||||
@@ -52,7 +45,7 @@ type DepositCache struct {
|
||||
// Beacon chain deposits in memory.
|
||||
pendingDeposits []*ethpb.DepositContainer
|
||||
deposits []*ethpb.DepositContainer
|
||||
finalizedDeposits *FinalizedDeposits
|
||||
finalizedDeposits FinalizedDeposits
|
||||
depositsByKey map[[fieldparams.BLSPubkeyLength]byte][]*ethpb.DepositContainer
|
||||
depositsLock sync.RWMutex
|
||||
}
|
||||
@@ -64,13 +57,13 @@ func New() (*DepositCache, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// finalizedDeposits.MerkleTrieIndex is initialized to -1 because it represents the index of the last trie item.
|
||||
// finalizedDeposits.merkleTrieIndex is initialized to -1 because it represents the index of the last trie item.
|
||||
// Inserting the first item into the trie will set the value of the index to 0.
|
||||
return &DepositCache{
|
||||
pendingDeposits: []*ethpb.DepositContainer{},
|
||||
deposits: []*ethpb.DepositContainer{},
|
||||
depositsByKey: map[[fieldparams.BLSPubkeyLength]byte][]*ethpb.DepositContainer{},
|
||||
finalizedDeposits: &FinalizedDeposits{Deposits: finalizedDepositsTrie, MerkleTrieIndex: -1},
|
||||
finalizedDeposits: FinalizedDeposits{deposits: finalizedDepositsTrie, merkleTrieIndex: -1},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -129,14 +122,15 @@ func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*eth
|
||||
}
|
||||
|
||||
// InsertFinalizedDeposits inserts deposits up to eth1DepositIndex (inclusive) into the finalized deposits cache.
|
||||
func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1DepositIndex int64) error {
|
||||
func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context,
|
||||
eth1DepositIndex int64, _ common.Hash, _ uint64) error {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertFinalizedDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
depositTrie := dc.finalizedDeposits.Deposits
|
||||
insertIndex := int(dc.finalizedDeposits.MerkleTrieIndex + 1)
|
||||
depositTrie := dc.finalizedDeposits.Deposits()
|
||||
insertIndex := int(dc.finalizedDeposits.merkleTrieIndex + 1)
|
||||
|
||||
// Don't insert into finalized trie if there is no deposit to
|
||||
// insert.
|
||||
@@ -154,7 +148,7 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
|
||||
return nil
|
||||
}
|
||||
for _, d := range dc.deposits {
|
||||
if d.Index <= dc.finalizedDeposits.MerkleTrieIndex {
|
||||
if d.Index <= dc.finalizedDeposits.merkleTrieIndex {
|
||||
continue
|
||||
}
|
||||
if d.Index > eth1DepositIndex {
|
||||
@@ -169,10 +163,13 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
|
||||
}
|
||||
insertIndex++
|
||||
}
|
||||
|
||||
dc.finalizedDeposits = &FinalizedDeposits{
|
||||
Deposits: depositTrie,
|
||||
MerkleTrieIndex: eth1DepositIndex,
|
||||
tree, ok := depositTrie.(*trie.SparseMerkleTrie)
|
||||
if !ok {
|
||||
return errors.New("not a sparse merkle tree")
|
||||
}
|
||||
dc.finalizedDeposits = FinalizedDeposits{
|
||||
deposits: tree,
|
||||
merkleTrieIndex: eth1DepositIndex,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -204,8 +201,6 @@ func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*ethpb.Depos
|
||||
// AllDeposits returns a list of historical deposits until the given block number
|
||||
// (inclusive). If no block is specified then this method returns all historical deposits.
|
||||
func (dc *DepositCache) AllDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.AllDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
@@ -261,16 +256,16 @@ func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*et
|
||||
}
|
||||
|
||||
// FinalizedDeposits returns the finalized deposits trie.
|
||||
func (dc *DepositCache) FinalizedDeposits(ctx context.Context) *FinalizedDeposits {
|
||||
func (dc *DepositCache) FinalizedDeposits(ctx context.Context) (cache.FinalizedDeposits, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.FinalizedDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
return &FinalizedDeposits{
|
||||
Deposits: dc.finalizedDeposits.Deposits.Copy(),
|
||||
MerkleTrieIndex: dc.finalizedDeposits.MerkleTrieIndex,
|
||||
}
|
||||
deposits: dc.finalizedDeposits.deposits.Copy(),
|
||||
merkleTrieIndex: dc.finalizedDeposits.merkleTrieIndex,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NonFinalizedDeposits returns the list of non-finalized deposits until the given block number (inclusive).
|
||||
@@ -281,7 +276,7 @@ func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, lastFinalizedI
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
if dc.finalizedDeposits == nil {
|
||||
if dc.finalizedDeposits.Deposits() == nil {
|
||||
return dc.allDeposits(untilBlk)
|
||||
}
|
||||
|
||||
@@ -307,9 +302,6 @@ func (dc *DepositCache) PruneProofs(ctx context.Context, untilDepositIndex int64
|
||||
}
|
||||
|
||||
for i := untilDepositIndex; i >= 0; i-- {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
// Finding a nil proof means that all proofs up to this deposit have been already pruned.
|
||||
if dc.deposits[i].Deposit.Proof == nil {
|
||||
break
|
||||
@@ -319,3 +311,14 @@ func (dc *DepositCache) PruneProofs(ctx context.Context, untilDepositIndex int64
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deposits returns the cached internal deposit tree.
|
||||
func (fd *FinalizedDeposits) Deposits() cache.MerkleTree {
|
||||
return fd.deposits
|
||||
}
|
||||
|
||||
// MerkleTrieIndex represents the last finalized index in
|
||||
// the finalized deposit container.
|
||||
func (fd *FinalizedDeposits) MerkleTrieIndex() int64 {
|
||||
return fd.merkleTrieIndex
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/container/trie"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
@@ -18,7 +19,7 @@ import (
|
||||
|
||||
const nilDepositErr = "Ignoring nil deposit insertion"
|
||||
|
||||
var _ DepositFetcher = (*DepositCache)(nil)
|
||||
var _ cache.DepositFetcher = (*DepositCache)(nil)
|
||||
|
||||
func TestInsertDeposit_LogsOnNilDepositInsertion(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
@@ -416,11 +417,12 @@ func TestFinalizedDeposits_DepositsCachedCorrectly(t *testing.T) {
|
||||
Index: 3,
|
||||
})
|
||||
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0))
|
||||
|
||||
cachedDeposits := dc.FinalizedDeposits(context.Background())
|
||||
cachedDeposits, err := dc.FinalizedDeposits(context.Background())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cachedDeposits, "Deposits not cached")
|
||||
assert.Equal(t, int64(2), cachedDeposits.MerkleTrieIndex)
|
||||
assert.Equal(t, int64(2), cachedDeposits.MerkleTrieIndex())
|
||||
|
||||
var deps [][]byte
|
||||
for _, d := range finalizedDeposits {
|
||||
@@ -432,7 +434,7 @@ func TestFinalizedDeposits_DepositsCachedCorrectly(t *testing.T) {
|
||||
require.NoError(t, err, "Could not generate deposit trie")
|
||||
rootA, err := generatedTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
rootB, err := cachedDeposits.Deposits.HashTreeRoot()
|
||||
rootB, err := cachedDeposits.Deposits().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, rootA, rootB)
|
||||
}
|
||||
@@ -474,15 +476,16 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
|
||||
Index: 2,
|
||||
}
|
||||
dc.deposits = oldFinalizedDeposits
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 1))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0))
|
||||
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0))
|
||||
|
||||
dc.deposits = append(dc.deposits, []*ethpb.DepositContainer{newFinalizedDeposit}...)
|
||||
|
||||
cachedDeposits := dc.FinalizedDeposits(context.Background())
|
||||
cachedDeposits, err := dc.FinalizedDeposits(context.Background())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cachedDeposits, "Deposits not cached")
|
||||
assert.Equal(t, int64(1), cachedDeposits.MerkleTrieIndex)
|
||||
assert.Equal(t, int64(1), cachedDeposits.MerkleTrieIndex())
|
||||
|
||||
var deps [][]byte
|
||||
for _, d := range oldFinalizedDeposits {
|
||||
@@ -494,7 +497,7 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
|
||||
require.NoError(t, err, "Could not generate deposit trie")
|
||||
rootA, err := generatedTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
rootB, err := cachedDeposits.Deposits.HashTreeRoot()
|
||||
rootB, err := cachedDeposits.Deposits().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, rootA, rootB)
|
||||
}
|
||||
@@ -503,11 +506,12 @@ func TestFinalizedDeposits_HandleZeroDeposits(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0))
|
||||
|
||||
cachedDeposits := dc.FinalizedDeposits(context.Background())
|
||||
cachedDeposits, err := dc.FinalizedDeposits(context.Background())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cachedDeposits, "Deposits not cached")
|
||||
assert.Equal(t, int64(-1), cachedDeposits.MerkleTrieIndex)
|
||||
assert.Equal(t, int64(-1), cachedDeposits.MerkleTrieIndex())
|
||||
}
|
||||
|
||||
func TestFinalizedDeposits_HandleSmallerThanExpectedDeposits(t *testing.T) {
|
||||
@@ -548,11 +552,12 @@ func TestFinalizedDeposits_HandleSmallerThanExpectedDeposits(t *testing.T) {
|
||||
}
|
||||
dc.deposits = finalizedDeposits
|
||||
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 5))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 5, [32]byte{}, 0))
|
||||
|
||||
cachedDeposits := dc.FinalizedDeposits(context.Background())
|
||||
cachedDeposits, err := dc.FinalizedDeposits(context.Background())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cachedDeposits, "Deposits not cached")
|
||||
assert.Equal(t, int64(2), cachedDeposits.MerkleTrieIndex)
|
||||
assert.Equal(t, int64(2), cachedDeposits.MerkleTrieIndex())
|
||||
}
|
||||
|
||||
func TestFinalizedDeposits_HandleLowerEth1DepositIndex(t *testing.T) {
|
||||
@@ -623,14 +628,15 @@ func TestFinalizedDeposits_HandleLowerEth1DepositIndex(t *testing.T) {
|
||||
}
|
||||
dc.deposits = finalizedDeposits
|
||||
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 5))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 5, [32]byte{}, 0))
|
||||
|
||||
// Reinsert finalized deposits with a lower index.
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0))
|
||||
|
||||
cachedDeposits := dc.FinalizedDeposits(context.Background())
|
||||
cachedDeposits, err := dc.FinalizedDeposits(context.Background())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cachedDeposits, "Deposits not cached")
|
||||
assert.Equal(t, int64(5), cachedDeposits.MerkleTrieIndex)
|
||||
assert.Equal(t, int64(5), cachedDeposits.MerkleTrieIndex())
|
||||
}
|
||||
|
||||
func TestFinalizedDeposits_InitializedCorrectly(t *testing.T) {
|
||||
@@ -640,7 +646,7 @@ func TestFinalizedDeposits_InitializedCorrectly(t *testing.T) {
|
||||
finalizedDeposits := dc.finalizedDeposits
|
||||
assert.NotNil(t, finalizedDeposits)
|
||||
assert.NotNil(t, finalizedDeposits.Deposits)
|
||||
assert.Equal(t, int64(-1), finalizedDeposits.MerkleTrieIndex)
|
||||
assert.Equal(t, int64(-1), finalizedDeposits.merkleTrieIndex)
|
||||
}
|
||||
|
||||
func TestNonFinalizedDeposits_ReturnsAllNonFinalizedDeposits(t *testing.T) {
|
||||
@@ -694,7 +700,7 @@ func TestNonFinalizedDeposits_ReturnsAllNonFinalizedDeposits(t *testing.T) {
|
||||
},
|
||||
Index: 3,
|
||||
})
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 1))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0))
|
||||
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), 1, nil)
|
||||
assert.Equal(t, 2, len(deps))
|
||||
@@ -751,7 +757,7 @@ func TestNonFinalizedDeposits_ReturnsNonFinalizedDepositsUpToBlockNumber(t *test
|
||||
},
|
||||
Index: 3,
|
||||
})
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 1))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0))
|
||||
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), 1, big.NewInt(10))
|
||||
assert.Equal(t, 1, len(deps))
|
||||
@@ -799,41 +805,43 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Perform this in a non-sensical ordering
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 10))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 3))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 4))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 10, [32]byte{}, 0))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 3, [32]byte{}, 0))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 4, [32]byte{}, 0))
|
||||
|
||||
// Mimic finalized deposit trie fetch.
|
||||
fd := dc.FinalizedDeposits(context.Background())
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex, big.NewInt(14))
|
||||
insertIndex := fd.MerkleTrieIndex + 1
|
||||
fd, err := dc.FinalizedDeposits(context.Background())
|
||||
require.NoError(t, err)
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex(), big.NewInt(14))
|
||||
insertIndex := fd.MerkleTrieIndex() + 1
|
||||
|
||||
for _, dep := range deps {
|
||||
depHash, err := dep.Data.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
if err = fd.Deposits.Insert(depHash[:], int(insertIndex)); err != nil {
|
||||
if err = fd.Deposits().Insert(depHash[:], int(insertIndex)); err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
insertIndex++
|
||||
}
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 15))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 15))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 14))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 15, [32]byte{}, 0))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 15, [32]byte{}, 0))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 14, [32]byte{}, 0))
|
||||
|
||||
fd = dc.FinalizedDeposits(context.Background())
|
||||
deps = dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex, big.NewInt(30))
|
||||
insertIndex = fd.MerkleTrieIndex + 1
|
||||
fd, err = dc.FinalizedDeposits(context.Background())
|
||||
require.NoError(t, err)
|
||||
deps = dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex(), big.NewInt(30))
|
||||
insertIndex = fd.MerkleTrieIndex() + 1
|
||||
|
||||
for _, dep := range deps {
|
||||
depHash, err := dep.Data.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
if err = fd.Deposits.Insert(depHash[:], int(insertIndex)); err != nil {
|
||||
if err = fd.Deposits().Insert(depHash[:], int(insertIndex)); err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
insertIndex++
|
||||
}
|
||||
assert.Equal(t, fd.Deposits.NumOfItems(), depositTrie.NumOfItems())
|
||||
assert.Equal(t, fd.Deposits().NumOfItems(), depositTrie.NumOfItems())
|
||||
}
|
||||
|
||||
func TestPruneProofs_Ok(t *testing.T) {
|
||||
@@ -1056,3 +1064,11 @@ func makeDepositProof() [][]byte {
|
||||
}
|
||||
return proof
|
||||
}
|
||||
|
||||
func TestEmptyTree(t *testing.T) {
|
||||
finalizedDepositsTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err)
|
||||
v, err := finalizedDepositsTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
fmt.Printf("%x", v)
|
||||
}
|
||||
|
||||
@@ -98,7 +98,7 @@ func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Depos
|
||||
return
|
||||
}
|
||||
|
||||
depRoot, err := hash.HashProto(d)
|
||||
depRoot, err := hash.Proto(d)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not remove deposit")
|
||||
return
|
||||
@@ -109,7 +109,7 @@ func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Depos
|
||||
|
||||
idx := -1
|
||||
for i, ctnr := range dc.pendingDeposits {
|
||||
h, err := hash.HashProto(ctnr.Deposit)
|
||||
h, err := hash.Proto(ctnr.Deposit)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not hash deposit")
|
||||
continue
|
||||
|
||||
@@ -17,7 +17,7 @@ func TestInsertPendingDeposit_OK(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dc.InsertPendingDeposit(context.Background(), ðpb.Deposit{}, 111, 100, [32]byte{})
|
||||
|
||||
assert.Equal(t, 1, len(dc.pendingDeposits), "Deposit not inserted")
|
||||
assert.Equal(t, 1, len(dc.pendingDeposits), "deposit not inserted")
|
||||
}
|
||||
|
||||
func TestInsertPendingDeposit_ignoresNilDeposit(t *testing.T) {
|
||||
@@ -56,7 +56,7 @@ func TestRemovePendingDeposit_IgnoresNilDeposit(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{{Deposit: ðpb.Deposit{}}}
|
||||
dc.RemovePendingDeposit(context.Background(), nil /*deposit*/)
|
||||
assert.Equal(t, 1, len(dc.pendingDeposits), "Deposit unexpectedly removed")
|
||||
assert.Equal(t, 1, len(dc.pendingDeposits), "deposit unexpectedly removed")
|
||||
}
|
||||
|
||||
func TestPendingDeposit_RoundTrip(t *testing.T) {
|
||||
|
||||
23
beacon-chain/cache/depositsnapshot/BUILD.bazel
vendored
23
beacon-chain/cache/depositsnapshot/BUILD.bazel
vendored
@@ -3,26 +3,37 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"deposit_fetcher.go",
|
||||
"deposit_inserter.go",
|
||||
"deposit_tree.go",
|
||||
"deposit_tree_snapshot.go",
|
||||
"merkle_tree.go",
|
||||
"zerohashes.gen.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositsnapshot",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_wealdtech_go_bytesutil//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"deposit_cache_test.go",
|
||||
"deposit_tree_snapshot_test.go",
|
||||
"merkle_tree_test.go",
|
||||
"spec_test.go",
|
||||
@@ -32,10 +43,16 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@in_gopkg_yaml_v3//:go_default_library",
|
||||
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
|
||||
|
||||
1199
beacon-chain/cache/depositsnapshot/deposit_cache_test.go
vendored
Normal file
1199
beacon-chain/cache/depositsnapshot/deposit_cache_test.go
vendored
Normal file
File diff suppressed because it is too large
Load Diff
304
beacon-chain/cache/depositsnapshot/deposit_fetcher.go
vendored
Normal file
304
beacon-chain/cache/depositsnapshot/deposit_fetcher.go
vendored
Normal file
@@ -0,0 +1,304 @@
|
||||
package depositsnapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/wealdtech/go-bytesutil"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var (
|
||||
pendingDepositsCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacondb_pending_deposits_eip4881",
|
||||
Help: "The number of pending deposits in memory",
|
||||
})
|
||||
)
|
||||
|
||||
// Cache stores all in-memory deposit objects. This
|
||||
// stores all the deposit related data that is required by the beacon-node.
|
||||
type Cache struct {
|
||||
pendingDeposits []*ethpb.DepositContainer
|
||||
deposits []*ethpb.DepositContainer
|
||||
finalizedDeposits finalizedDepositsContainer
|
||||
depositsByKey map[[fieldparams.BLSPubkeyLength]byte][]*ethpb.DepositContainer
|
||||
depositsLock sync.RWMutex
|
||||
}
|
||||
|
||||
// finalizedDepositsContainer stores the trie of deposits that have been included
|
||||
// in the beacon state up to the latest finalized checkpoint.
|
||||
type finalizedDepositsContainer struct {
|
||||
depositTree *DepositTree
|
||||
merkleTrieIndex int64
|
||||
}
|
||||
|
||||
// New instantiates a new deposit cache
|
||||
func New() (*Cache, error) {
|
||||
finalizedDepositsTrie := NewDepositTree()
|
||||
|
||||
// finalizedDeposits.merkleTrieIndex is initialized to -1 because it represents the index of the last trie item.
|
||||
// Inserting the first item into the trie will set the value of the index to 0.
|
||||
return &Cache{
|
||||
pendingDeposits: []*ethpb.DepositContainer{},
|
||||
deposits: []*ethpb.DepositContainer{},
|
||||
depositsByKey: map[[fieldparams.BLSPubkeyLength]byte][]*ethpb.DepositContainer{},
|
||||
finalizedDeposits: toFinalizedDepositsContainer(finalizedDepositsTrie, -1),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AllDeposits returns a list of historical deposits until the given block number
|
||||
// (inclusive). If no block is specified then this method returns all historical deposits.
|
||||
func (c *Cache) AllDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
c.depositsLock.RLock()
|
||||
defer c.depositsLock.RUnlock()
|
||||
|
||||
return c.allDeposits(untilBlk)
|
||||
}
|
||||
|
||||
func (c *Cache) allDeposits(untilBlk *big.Int) []*ethpb.Deposit {
|
||||
var deposits []*ethpb.Deposit
|
||||
for _, ctnr := range c.deposits {
|
||||
cBlk := big.NewInt(0).SetUint64(ctnr.Eth1BlockHeight)
|
||||
if untilBlk == nil || untilBlk.Cmp(cBlk) >= 0 {
|
||||
deposits = append(deposits, ctnr.Deposit)
|
||||
}
|
||||
}
|
||||
return deposits
|
||||
}
|
||||
|
||||
// AllDepositContainers returns all historical deposit containers.
|
||||
func (c *Cache) AllDepositContainers(ctx context.Context) []*ethpb.DepositContainer {
|
||||
ctx, span := trace.StartSpan(ctx, "Cache.AllDepositContainers")
|
||||
defer span.End()
|
||||
c.depositsLock.RLock()
|
||||
defer c.depositsLock.RUnlock()
|
||||
|
||||
// Make a shallow copy of the deposits and return that. This way, the
|
||||
// caller can safely iterate over the returned list of deposits without
|
||||
// the possibility of new deposits showing up. If we were to return the
|
||||
// list without a copy, when a new deposit is added to the cache, it
|
||||
// would also be present in the returned value. This could result in a
|
||||
// race condition if the list is being iterated over.
|
||||
//
|
||||
// It's not necessary to make a deep copy of this list because the
|
||||
// deposits in the cache should never be modified. It is still possible
|
||||
// for the caller to modify one of the underlying deposits and modify
|
||||
// the cache, but that's not a race condition. Also, a deep copy would
|
||||
// take too long and use too much memory.
|
||||
deposits := make([]*ethpb.DepositContainer, len(c.deposits))
|
||||
copy(deposits, c.deposits)
|
||||
return deposits
|
||||
}
|
||||
|
||||
// DepositByPubkey looks through historical deposits and finds one which contains
|
||||
// a certain public key within its deposit data.
|
||||
func (c *Cache) DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int) {
|
||||
ctx, span := trace.StartSpan(ctx, "Cache.DepositByPubkey")
|
||||
defer span.End()
|
||||
c.depositsLock.RLock()
|
||||
defer c.depositsLock.RUnlock()
|
||||
|
||||
var deposit *ethpb.Deposit
|
||||
var blockNum *big.Int
|
||||
deps, ok := c.depositsByKey[bytesutil.ToBytes48(pubKey)]
|
||||
if !ok || len(deps) == 0 {
|
||||
return deposit, blockNum
|
||||
}
|
||||
// We always return the first deposit if a particular
|
||||
// validator key has multiple deposits assigned to
|
||||
// it.
|
||||
deposit = deps[0].Deposit
|
||||
blockNum = big.NewInt(int64(deps[0].Eth1BlockHeight))
|
||||
return deposit, blockNum
|
||||
}
|
||||
|
||||
// DepositsNumberAndRootAtHeight returns number of deposits made up to blockheight and the
|
||||
// root that corresponds to the latest deposit at that blockheight.
|
||||
func (c *Cache) DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "Cache.DepositsNumberAndRootAtHeight")
|
||||
defer span.End()
|
||||
c.depositsLock.RLock()
|
||||
defer c.depositsLock.RUnlock()
|
||||
heightIdx := sort.Search(len(c.deposits), func(i int) bool {
|
||||
dBlkHeight := big.NewInt(0).SetUint64(c.deposits[i].Eth1BlockHeight)
|
||||
return dBlkHeight.Cmp(blockHeight) > 0
|
||||
})
|
||||
// send the deposit root of the empty trie, if eth1follow distance is greater than the time of the earliest
|
||||
// deposit.
|
||||
if heightIdx == 0 {
|
||||
return 0, [32]byte{}
|
||||
}
|
||||
return uint64(heightIdx), bytesutil.ToBytes32(c.deposits[heightIdx-1].DepositRoot)
|
||||
}
|
||||
|
||||
// FinalizedDeposits returns the finalized deposits trie.
|
||||
func (c *Cache) FinalizedDeposits(ctx context.Context) (cache.FinalizedDeposits, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "Cache.FinalizedDeposits")
|
||||
defer span.End()
|
||||
c.depositsLock.RLock()
|
||||
defer c.depositsLock.RUnlock()
|
||||
|
||||
tree, err := c.finalizedDeposits.depositTree.Copy()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &finalizedDepositsContainer{
|
||||
depositTree: tree,
|
||||
merkleTrieIndex: c.finalizedDeposits.merkleTrieIndex,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NonFinalizedDeposits returns the list of non-finalized deposits until the given block number (inclusive).
|
||||
// If no block is specified then this method returns all non-finalized deposits.
|
||||
func (c *Cache) NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
ctx, span := trace.StartSpan(ctx, "Cache.NonFinalizedDeposits")
|
||||
defer span.End()
|
||||
c.depositsLock.RLock()
|
||||
defer c.depositsLock.RUnlock()
|
||||
|
||||
if c.finalizedDeposits.depositTree == nil {
|
||||
return c.allDeposits(untilBlk)
|
||||
}
|
||||
|
||||
var deposits []*ethpb.Deposit
|
||||
for _, d := range c.deposits {
|
||||
if (d.Index > lastFinalizedIndex) && (untilBlk == nil || untilBlk.Uint64() >= d.Eth1BlockHeight) {
|
||||
deposits = append(deposits, d.Deposit)
|
||||
}
|
||||
}
|
||||
|
||||
return deposits
|
||||
}
|
||||
|
||||
// PruneProofs removes proofs from all deposits whose index is equal or less than untilDepositIndex.
|
||||
func (c *Cache) PruneProofs(ctx context.Context, untilDepositIndex int64) error {
|
||||
ctx, span := trace.StartSpan(ctx, "Cache.PruneProofs")
|
||||
defer span.End()
|
||||
c.depositsLock.Lock()
|
||||
defer c.depositsLock.Unlock()
|
||||
|
||||
if untilDepositIndex >= int64(len(c.deposits)) {
|
||||
untilDepositIndex = int64(len(c.deposits) - 1)
|
||||
}
|
||||
|
||||
for i := untilDepositIndex; i >= 0; i-- {
|
||||
// Finding a nil proof means that all proofs up to this deposit have been already pruned.
|
||||
if c.deposits[i].Deposit.Proof == nil {
|
||||
break
|
||||
}
|
||||
c.deposits[i].Deposit.Proof = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PrunePendingDeposits removes any deposit which is older than the given deposit merkle tree index.
|
||||
func (c *Cache) PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64) {
|
||||
ctx, span := trace.StartSpan(ctx, "Cache.PrunePendingDeposits")
|
||||
defer span.End()
|
||||
|
||||
if merkleTreeIndex == 0 {
|
||||
log.Debug("Ignoring 0 deposit removal")
|
||||
return
|
||||
}
|
||||
|
||||
c.depositsLock.Lock()
|
||||
defer c.depositsLock.Unlock()
|
||||
|
||||
cleanDeposits := make([]*ethpb.DepositContainer, 0, len(c.pendingDeposits))
|
||||
for _, dp := range c.pendingDeposits {
|
||||
if dp.Index >= merkleTreeIndex {
|
||||
cleanDeposits = append(cleanDeposits, dp)
|
||||
}
|
||||
}
|
||||
|
||||
c.pendingDeposits = cleanDeposits
|
||||
pendingDepositsCount.Set(float64(len(c.pendingDeposits)))
|
||||
}
|
||||
|
||||
// InsertPendingDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (c *Cache) InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "Cache.InsertPendingDeposit")
|
||||
defer span.End()
|
||||
if d == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
}).Debug("Ignoring nil deposit insertion")
|
||||
return
|
||||
}
|
||||
c.depositsLock.Lock()
|
||||
defer c.depositsLock.Unlock()
|
||||
c.pendingDeposits = append(c.pendingDeposits,
|
||||
ðpb.DepositContainer{Deposit: d, Eth1BlockHeight: blockNum, Index: index, DepositRoot: depositRoot[:]})
|
||||
pendingDepositsCount.Set(float64(len(c.pendingDeposits)))
|
||||
span.AddAttributes(trace.Int64Attribute("count", int64(len(c.pendingDeposits))))
|
||||
}
|
||||
|
||||
// Deposits returns the cached internal deposit tree.
|
||||
func (fd *finalizedDepositsContainer) Deposits() cache.MerkleTree {
|
||||
return fd.depositTree
|
||||
}
|
||||
|
||||
// MerkleTrieIndex represents the last finalized index in
|
||||
// the finalized deposit container.
|
||||
func (fd *finalizedDepositsContainer) MerkleTrieIndex() int64 {
|
||||
return fd.merkleTrieIndex
|
||||
}
|
||||
|
||||
func toFinalizedDepositsContainer(deposits *DepositTree, index int64) finalizedDepositsContainer {
|
||||
return finalizedDepositsContainer{
|
||||
depositTree: deposits,
|
||||
merkleTrieIndex: index,
|
||||
}
|
||||
}
|
||||
|
||||
// PendingDeposits returns a list of deposits until the given block number
|
||||
// (inclusive). If no block is specified then this method returns all pending
|
||||
// deposits.
|
||||
func (c *Cache) PendingDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
ctx, span := trace.StartSpan(ctx, "Cache.PendingDeposits")
|
||||
defer span.End()
|
||||
|
||||
depositCntrs := c.PendingContainers(ctx, untilBlk)
|
||||
|
||||
deposits := make([]*ethpb.Deposit, 0, len(depositCntrs))
|
||||
for _, dep := range depositCntrs {
|
||||
deposits = append(deposits, dep.Deposit)
|
||||
}
|
||||
|
||||
return deposits
|
||||
}
|
||||
|
||||
// PendingContainers returns a list of deposit containers until the given block number
|
||||
// (inclusive).
|
||||
func (c *Cache) PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer {
|
||||
ctx, span := trace.StartSpan(ctx, "Cache.PendingContainers")
|
||||
defer span.End()
|
||||
c.depositsLock.RLock()
|
||||
defer c.depositsLock.RUnlock()
|
||||
|
||||
depositCntrs := make([]*ethpb.DepositContainer, 0, len(c.pendingDeposits))
|
||||
for _, ctnr := range c.pendingDeposits {
|
||||
if untilBlk == nil || untilBlk.Uint64() >= ctnr.Eth1BlockHeight {
|
||||
depositCntrs = append(depositCntrs, ctnr)
|
||||
}
|
||||
}
|
||||
// Sort the deposits by Merkle index.
|
||||
sort.SliceStable(depositCntrs, func(i, j int) bool {
|
||||
return depositCntrs[i].Index < depositCntrs[j].Index
|
||||
})
|
||||
|
||||
span.AddAttributes(trace.Int64Attribute("count", int64(len(depositCntrs))))
|
||||
|
||||
return depositCntrs
|
||||
}
|
||||
134
beacon-chain/cache/depositsnapshot/deposit_inserter.go
vendored
Normal file
134
beacon-chain/cache/depositsnapshot/deposit_inserter.go
vendored
Normal file
@@ -0,0 +1,134 @@
|
||||
package depositsnapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"sort"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var (
|
||||
historicalDepositsCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "beacondb_all_deposits_eip4881",
|
||||
Help: "The number of total deposits in memory",
|
||||
})
|
||||
log = logrus.WithField("prefix", "cache")
|
||||
)
|
||||
|
||||
// InsertDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (c *Cache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "Cache.InsertDeposit")
|
||||
defer span.End()
|
||||
if d == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
"index": index,
|
||||
"deposit root": hex.EncodeToString(depositRoot[:]),
|
||||
}).Warn("Ignoring nil deposit insertion")
|
||||
return errors.New("nil deposit inserted into the cache")
|
||||
}
|
||||
c.depositsLock.Lock()
|
||||
defer c.depositsLock.Unlock()
|
||||
|
||||
if int(index) != len(c.deposits) {
|
||||
return errors.Errorf("wanted deposit with index %d to be inserted but received %d", len(c.deposits), index)
|
||||
}
|
||||
// Keep the slice sorted on insertion in order to avoid costly sorting on retrieval.
|
||||
heightIdx := sort.Search(len(c.deposits), func(i int) bool { return c.deposits[i].Index >= index })
|
||||
depCtr := ðpb.DepositContainer{Deposit: d, Eth1BlockHeight: blockNum, DepositRoot: depositRoot[:], Index: index}
|
||||
newDeposits := append(
|
||||
[]*ethpb.DepositContainer{depCtr},
|
||||
c.deposits[heightIdx:]...)
|
||||
c.deposits = append(c.deposits[:heightIdx], newDeposits...)
|
||||
// Append the deposit to our map, in the event no deposits
|
||||
// exist for the pubkey , it is simply added to the map.
|
||||
pubkey := bytesutil.ToBytes48(d.Data.PublicKey)
|
||||
c.depositsByKey[pubkey] = append(c.depositsByKey[pubkey], depCtr)
|
||||
historicalDepositsCount.Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertDepositContainers inserts a set of deposit containers into our deposit cache.
|
||||
func (c *Cache) InsertDepositContainers(ctx context.Context, ctrs []*ethpb.DepositContainer) {
|
||||
ctx, span := trace.StartSpan(ctx, "Cache.InsertDepositContainers")
|
||||
defer span.End()
|
||||
c.depositsLock.Lock()
|
||||
defer c.depositsLock.Unlock()
|
||||
|
||||
// Initialize slice if nil object provided.
|
||||
if ctrs == nil {
|
||||
ctrs = make([]*ethpb.DepositContainer, 0)
|
||||
}
|
||||
sort.SliceStable(ctrs, func(i int, j int) bool { return ctrs[i].Index < ctrs[j].Index })
|
||||
c.deposits = ctrs
|
||||
for _, ctr := range ctrs {
|
||||
// Use a new value, as the reference
|
||||
// changes in the next iteration.
|
||||
newPtr := ctr
|
||||
pKey := bytesutil.ToBytes48(newPtr.Deposit.Data.PublicKey)
|
||||
c.depositsByKey[pKey] = append(c.depositsByKey[pKey], newPtr)
|
||||
}
|
||||
historicalDepositsCount.Add(float64(len(ctrs)))
|
||||
}
|
||||
|
||||
// InsertFinalizedDeposits inserts deposits up to eth1DepositIndex (inclusive) into the finalized deposits cache.
|
||||
func (c *Cache) InsertFinalizedDeposits(ctx context.Context, eth1DepositIndex int64,
|
||||
executionHash common.Hash, executionNumber uint64) error {
|
||||
ctx, span := trace.StartSpan(ctx, "Cache.InsertFinalizedDeposits")
|
||||
defer span.End()
|
||||
c.depositsLock.Lock()
|
||||
defer c.depositsLock.Unlock()
|
||||
|
||||
depositTrie := c.finalizedDeposits.depositTree
|
||||
insertIndex := int(c.finalizedDeposits.MerkleTrieIndex() + 1)
|
||||
|
||||
// Don't insert into finalized trie if there is no deposit to
|
||||
// insert.
|
||||
if len(c.deposits) == 0 {
|
||||
return nil
|
||||
}
|
||||
// In the event we have less deposits than we need to
|
||||
// finalize we finalize till the index on which we do have it.
|
||||
if len(c.deposits) <= int(eth1DepositIndex) {
|
||||
eth1DepositIndex = int64(len(c.deposits)) - 1
|
||||
}
|
||||
// If we finalize to some lower deposit index, we
|
||||
// ignore it.
|
||||
if int(eth1DepositIndex) < insertIndex {
|
||||
return nil
|
||||
}
|
||||
currIdx := int64(depositTrie.depositCount) - 1
|
||||
|
||||
// Insert deposits into deposit trie.
|
||||
for _, ctr := range c.deposits {
|
||||
if ctr.Index > currIdx && ctr.Index <= eth1DepositIndex {
|
||||
rt, err := ctr.Deposit.Data.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := depositTrie.Insert(rt[:], int(ctr.Index)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := depositTrie.Finalize(eth1DepositIndex, executionHash, executionNumber); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.finalizedDeposits = finalizedDepositsContainer{
|
||||
depositTree: depositTrie,
|
||||
merkleTrieIndex: eth1DepositIndex,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
138
beacon-chain/cache/depositsnapshot/deposit_tree.go
vendored
138
beacon-chain/cache/depositsnapshot/deposit_tree.go
vendored
@@ -4,12 +4,14 @@
|
||||
package depositsnapshot
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/math"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
protodb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -17,12 +19,10 @@ var (
|
||||
ErrEmptyExecutionBlock = errors.New("empty execution block")
|
||||
// ErrInvalidSnapshotRoot occurs when the snapshot root does not match the calculated root.
|
||||
ErrInvalidSnapshotRoot = errors.New("snapshot root is invalid")
|
||||
// ErrInvalidMixInLength occurs when the value for mix in length is 0.
|
||||
ErrInvalidMixInLength = errors.New("mixInLength should be greater than 0")
|
||||
// ErrInvalidDepositCount occurs when the value for mix in length is 0.
|
||||
ErrInvalidDepositCount = errors.New("deposit count should be greater than 0")
|
||||
// ErrInvalidIndex occurs when the index is less than the number of finalized deposits.
|
||||
ErrInvalidIndex = errors.New("index should be greater than finalizedDeposits - 1")
|
||||
// ErrNoDeposits occurs when the number of deposits is 0.
|
||||
ErrNoDeposits = errors.New("number of deposits should be greater than 0")
|
||||
// ErrTooManyDeposits occurs when the number of deposits exceeds the capacity of the tree.
|
||||
ErrTooManyDeposits = errors.New("number of deposits should not be greater than the capacity of the tree")
|
||||
)
|
||||
@@ -30,7 +30,7 @@ var (
|
||||
// DepositTree is the Merkle tree representation of deposits.
|
||||
type DepositTree struct {
|
||||
tree MerkleTreeNode
|
||||
mixInLength uint64 // number of deposits in the tree, reference implementation calls this mix_in_length.
|
||||
depositCount uint64 // number of deposits in the tree, reference implementation calls this mix_in_length.
|
||||
finalizedExecutionBlock executionBlock
|
||||
}
|
||||
|
||||
@@ -39,82 +39,67 @@ type executionBlock struct {
|
||||
Depth uint64
|
||||
}
|
||||
|
||||
// New creates an empty deposit tree.
|
||||
//
|
||||
//nolint:unused
|
||||
func New() *DepositTree {
|
||||
// NewDepositTree creates an empty deposit tree.
|
||||
func NewDepositTree() *DepositTree {
|
||||
var leaves [][32]byte
|
||||
merkle := create(leaves, DepositContractDepth)
|
||||
return &DepositTree{
|
||||
tree: merkle,
|
||||
mixInLength: 0,
|
||||
depositCount: 0,
|
||||
finalizedExecutionBlock: executionBlock{},
|
||||
}
|
||||
}
|
||||
|
||||
// getSnapshot returns a deposit tree snapshot.
|
||||
//
|
||||
//nolint:unused
|
||||
func (d *DepositTree) getSnapshot() (DepositTreeSnapshot, error) {
|
||||
if d.finalizedExecutionBlock == (executionBlock{}) {
|
||||
return DepositTreeSnapshot{}, ErrEmptyExecutionBlock
|
||||
}
|
||||
// GetSnapshot returns a deposit tree snapshot.
|
||||
func (d *DepositTree) GetSnapshot() (DepositTreeSnapshot, error) {
|
||||
var finalized [][32]byte
|
||||
depositCount, finalized := d.tree.GetFinalized(finalized)
|
||||
return fromTreeParts(finalized, depositCount, d.finalizedExecutionBlock)
|
||||
}
|
||||
|
||||
// fromSnapshot returns a deposit tree from a deposit tree snapshot.
|
||||
//
|
||||
//nolint:unused
|
||||
func fromSnapshot(snapshot DepositTreeSnapshot) (DepositTree, error) {
|
||||
func fromSnapshot(snapshot DepositTreeSnapshot) (*DepositTree, error) {
|
||||
root, err := snapshot.CalculateRoot()
|
||||
if err != nil {
|
||||
return DepositTree{}, err
|
||||
return nil, err
|
||||
}
|
||||
if snapshot.depositRoot != root {
|
||||
return DepositTree{}, ErrInvalidSnapshotRoot
|
||||
return nil, ErrInvalidSnapshotRoot
|
||||
}
|
||||
if snapshot.depositCount >= math.PowerOf2(uint64(DepositContractDepth)) {
|
||||
return DepositTree{}, ErrTooManyDeposits
|
||||
return nil, ErrTooManyDeposits
|
||||
}
|
||||
tree, err := fromSnapshotParts(snapshot.finalized, snapshot.depositCount, DepositContractDepth)
|
||||
if err != nil {
|
||||
return DepositTree{}, err
|
||||
return nil, err
|
||||
}
|
||||
if snapshot.depositCount == 0 {
|
||||
return DepositTree{}, ErrNoDeposits
|
||||
}
|
||||
return DepositTree{
|
||||
return &DepositTree{
|
||||
tree: tree,
|
||||
mixInLength: snapshot.depositCount,
|
||||
depositCount: snapshot.depositCount,
|
||||
finalizedExecutionBlock: snapshot.executionBlock,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// finalize marks a deposit as finalized.
|
||||
//
|
||||
//nolint:unused
|
||||
func (d *DepositTree) finalize(eth1data *eth.Eth1Data, executionBlockHeight uint64) error {
|
||||
// Finalize marks a deposit as finalized.
|
||||
func (d *DepositTree) Finalize(eth1DepositIndex int64, executionHash common.Hash, executionNumber uint64) error {
|
||||
var blockHash [32]byte
|
||||
copy(blockHash[:], eth1data.BlockHash)
|
||||
copy(blockHash[:], executionHash[:])
|
||||
d.finalizedExecutionBlock = executionBlock{
|
||||
Hash: blockHash,
|
||||
Depth: executionBlockHeight,
|
||||
Depth: executionNumber,
|
||||
}
|
||||
_, err := d.tree.Finalize(eth1data.DepositCount, DepositContractDepth)
|
||||
depositCount := uint64(eth1DepositIndex + 1)
|
||||
_, err := d.tree.Finalize(depositCount, DepositContractDepth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getProof returns the Deposit tree proof.
|
||||
//
|
||||
//nolint:unused
|
||||
// getProof returns the deposit tree proof.
|
||||
func (d *DepositTree) getProof(index uint64) ([32]byte, [][32]byte, error) {
|
||||
if d.mixInLength <= 0 {
|
||||
return [32]byte{}, nil, ErrInvalidMixInLength
|
||||
if d.depositCount <= 0 {
|
||||
return [32]byte{}, nil, ErrInvalidDepositCount
|
||||
}
|
||||
finalizedDeposits, _ := d.tree.GetFinalized([][32]byte{})
|
||||
if finalizedDeposits != 0 {
|
||||
@@ -125,28 +110,81 @@ func (d *DepositTree) getProof(index uint64) ([32]byte, [][32]byte, error) {
|
||||
}
|
||||
leaf, proof := generateProof(d.tree, index, DepositContractDepth)
|
||||
var mixInLength [32]byte
|
||||
copy(mixInLength[:], bytesutil.Uint64ToBytesLittleEndian32(d.mixInLength))
|
||||
copy(mixInLength[:], bytesutil.Uint64ToBytesLittleEndian32(d.depositCount))
|
||||
proof = append(proof, mixInLength)
|
||||
return leaf, proof, nil
|
||||
}
|
||||
|
||||
// getRoot returns the root of the deposit tree.
|
||||
//
|
||||
//nolint:unused
|
||||
func (d *DepositTree) getRoot() [32]byte {
|
||||
var enc [32]byte
|
||||
binary.LittleEndian.PutUint64(enc[:], d.depositCount)
|
||||
|
||||
root := d.tree.GetRoot()
|
||||
return sha256.Sum256(append(root[:], bytesutil.Uint64ToBytesLittleEndian32(d.mixInLength)...))
|
||||
return hash.Hash(append(root[:], enc[:]...))
|
||||
}
|
||||
|
||||
// pushLeaf adds a new leaf to the tree.
|
||||
//
|
||||
//nolint:unused
|
||||
func (d *DepositTree) pushLeaf(leaf [32]byte) error {
|
||||
var err error
|
||||
d.tree, err = d.tree.PushLeaf(leaf, DepositContractDepth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.mixInLength++
|
||||
d.depositCount++
|
||||
return nil
|
||||
}
|
||||
|
||||
// Insert is defined as part of MerkleTree interface and adds a new leaf to the tree.
|
||||
func (d *DepositTree) Insert(item []byte, _ int) error {
|
||||
var leaf [32]byte
|
||||
copy(leaf[:], item[:32])
|
||||
return d.pushLeaf(leaf)
|
||||
}
|
||||
|
||||
// HashTreeRoot is defined as part of MerkleTree interface and calculates the hash tree root.
|
||||
func (d *DepositTree) HashTreeRoot() ([32]byte, error) {
|
||||
root := d.getRoot()
|
||||
if root == [32]byte{} {
|
||||
return [32]byte{}, errors.New("could not retrieve hash tree root")
|
||||
}
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// NumOfItems is defined as part of MerkleTree interface and returns the number of deposits in the tree.
|
||||
func (d *DepositTree) NumOfItems() int {
|
||||
return int(d.depositCount)
|
||||
}
|
||||
|
||||
// MerkleProof is defined as part of MerkleTree interface and generates a merkle proof.
|
||||
func (d *DepositTree) MerkleProof(index int) ([][]byte, error) {
|
||||
_, proof, err := d.getProof(uint64(index))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
byteSlices := make([][]byte, len(proof))
|
||||
for i, p := range proof {
|
||||
copied := p
|
||||
byteSlices[i] = copied[:]
|
||||
}
|
||||
return byteSlices, nil
|
||||
}
|
||||
|
||||
// Copy performs a deep copy of the tree.
|
||||
func (d *DepositTree) Copy() (*DepositTree, error) {
|
||||
snapshot, err := d.GetSnapshot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fromSnapshot(snapshot)
|
||||
}
|
||||
|
||||
// ToProto returns a proto object of the deposit snapshot of
|
||||
// the tree.
|
||||
func (d *DepositTree) ToProto() (*protodb.DepositSnapshot, error) {
|
||||
snapshot, err := d.GetSnapshot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return snapshot.ToProto(), nil
|
||||
}
|
||||
|
||||
@@ -1,21 +1,13 @@
|
||||
package depositsnapshot
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/container/trie"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
protodb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrZeroIndex occurs when the value of index is 0.
|
||||
ErrZeroIndex = errors.New("index should be greater than 0")
|
||||
)
|
||||
|
||||
// DepositTreeSnapshot represents the data used to create a
|
||||
// deposit tree given a snapshot.
|
||||
//
|
||||
//nolint:unused
|
||||
// DepositTreeSnapshot represents the data used to create a deposit tree given a snapshot.
|
||||
type DepositTreeSnapshot struct {
|
||||
finalized [][32]byte
|
||||
depositRoot [32]byte
|
||||
@@ -27,29 +19,27 @@ type DepositTreeSnapshot struct {
|
||||
func (ds *DepositTreeSnapshot) CalculateRoot() ([32]byte, error) {
|
||||
size := ds.depositCount
|
||||
index := len(ds.finalized)
|
||||
root := Zerohashes[0]
|
||||
root := trie.ZeroHashes[0]
|
||||
for i := 0; i < DepositContractDepth; i++ {
|
||||
if (size & 1) == 1 {
|
||||
if index == 0 {
|
||||
return [32]byte{}, ErrZeroIndex
|
||||
break
|
||||
}
|
||||
index--
|
||||
root = sha256.Sum256(append(ds.finalized[index][:], root[:]...))
|
||||
root = hash.Hash(append(ds.finalized[index][:], root[:]...))
|
||||
} else {
|
||||
root = sha256.Sum256(append(root[:], Zerohashes[i][:]...))
|
||||
root = hash.Hash(append(root[:], trie.ZeroHashes[i][:]...))
|
||||
}
|
||||
size >>= 1
|
||||
}
|
||||
return sha256.Sum256(append(root[:], bytesutil.Uint64ToBytesLittleEndian32(ds.depositCount)...)), nil
|
||||
return hash.Hash(append(root[:], bytesutil.Uint64ToBytesLittleEndian32(ds.depositCount)...)), nil
|
||||
}
|
||||
|
||||
// fromTreeParts constructs the deposit tree from pre-existing data.
|
||||
//
|
||||
//nolint:unused
|
||||
func fromTreeParts(finalised [][32]byte, depositCount uint64, executionBlock executionBlock) (DepositTreeSnapshot, error) {
|
||||
snapshot := DepositTreeSnapshot{
|
||||
finalized: finalised,
|
||||
depositRoot: Zerohashes[0],
|
||||
depositRoot: trie.ZeroHashes[0],
|
||||
depositCount: depositCount,
|
||||
executionBlock: executionBlock,
|
||||
}
|
||||
@@ -60,3 +50,36 @@ func fromTreeParts(finalised [][32]byte, depositCount uint64, executionBlock exe
|
||||
snapshot.depositRoot = root
|
||||
return snapshot, nil
|
||||
}
|
||||
|
||||
// ToProto converts the underlying trie into its corresponding proto object.
|
||||
func (ds *DepositTreeSnapshot) ToProto() *protodb.DepositSnapshot {
|
||||
tree := &protodb.DepositSnapshot{
|
||||
Finalized: make([][]byte, len(ds.finalized)),
|
||||
DepositRoot: bytesutil.SafeCopyBytes(ds.depositRoot[:]),
|
||||
DepositCount: ds.depositCount,
|
||||
ExecutionHash: bytesutil.SafeCopyBytes(ds.executionBlock.Hash[:]),
|
||||
ExecutionDepth: ds.executionBlock.Depth,
|
||||
}
|
||||
for i := range ds.finalized {
|
||||
tree.Finalized[i] = bytesutil.SafeCopyBytes(ds.finalized[i][:])
|
||||
}
|
||||
return tree
|
||||
}
|
||||
|
||||
// DepositTreeFromSnapshotProto generates a deposit tree object from a provided snapshot.
|
||||
func DepositTreeFromSnapshotProto(snapshotProto *protodb.DepositSnapshot) (*DepositTree, error) {
|
||||
finalized := make([][32]byte, len(snapshotProto.Finalized))
|
||||
for i := range snapshotProto.Finalized {
|
||||
finalized[i] = bytesutil.ToBytes32(snapshotProto.Finalized[i])
|
||||
}
|
||||
snapshot := DepositTreeSnapshot{
|
||||
finalized: finalized,
|
||||
depositRoot: bytesutil.ToBytes32(snapshotProto.DepositRoot),
|
||||
depositCount: snapshotProto.DepositCount,
|
||||
executionBlock: executionBlock{
|
||||
Hash: bytesutil.ToBytes32(snapshotProto.ExecutionHash),
|
||||
Depth: snapshotProto.ExecutionDepth,
|
||||
},
|
||||
}
|
||||
return fromSnapshot(snapshot)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package depositsnapshot
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/container/slice"
|
||||
"github.com/prysmaticlabs/prysm/v4/container/trie"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v4/math"
|
||||
)
|
||||
@@ -58,9 +59,8 @@ func create(leaves [][32]byte, depth uint64) MerkleTreeNode {
|
||||
}
|
||||
|
||||
// fromSnapshotParts creates a new Merkle tree from a list of finalized leaves, number of deposits and specified depth.
|
||||
//
|
||||
//nolint:unused
|
||||
func fromSnapshotParts(finalized [][32]byte, deposits uint64, level uint64) (_ MerkleTreeNode, err error) {
|
||||
func fromSnapshotParts(finalized [][32]byte, deposits uint64, level uint64) (MerkleTreeNode, error) {
|
||||
var err error
|
||||
if len(finalized) < 1 || deposits == 0 {
|
||||
return &ZeroNode{
|
||||
depth: level,
|
||||
@@ -96,8 +96,6 @@ func fromSnapshotParts(finalized [][32]byte, deposits uint64, level uint64) (_ M
|
||||
}
|
||||
|
||||
// generateProof returns a merkle proof and root
|
||||
//
|
||||
//nolint:unused
|
||||
func generateProof(tree MerkleTreeNode, index uint64, depth uint64) ([32]byte, [][32]byte) {
|
||||
var proof [][32]byte
|
||||
node := tree
|
||||
@@ -219,7 +217,8 @@ func (n *InnerNode) IsFull() bool {
|
||||
}
|
||||
|
||||
// Finalize marks deposits of the Merkle tree as finalized.
|
||||
func (n *InnerNode) Finalize(depositsToFinalize uint64, depth uint64) (_ MerkleTreeNode, err error) {
|
||||
func (n *InnerNode) Finalize(depositsToFinalize uint64, depth uint64) (MerkleTreeNode, error) {
|
||||
var err error
|
||||
deposits := math.PowerOf2(depth)
|
||||
if deposits <= depositsToFinalize {
|
||||
return &FinalizedNode{deposits, n.GetRoot()}, nil
|
||||
@@ -286,9 +285,9 @@ type ZeroNode struct {
|
||||
// GetRoot returns the root of the Merkle tree.
|
||||
func (z *ZeroNode) GetRoot() [32]byte {
|
||||
if z.depth == DepositContractDepth {
|
||||
return hash.Hash(append(Zerohashes[z.depth-1][:], Zerohashes[z.depth-1][:]...))
|
||||
return hash.Hash(append(trie.ZeroHashes[z.depth-1][:], trie.ZeroHashes[z.depth-1][:]...))
|
||||
}
|
||||
return Zerohashes[z.depth]
|
||||
return trie.ZeroHashes[z.depth]
|
||||
}
|
||||
|
||||
// IsFull returns wh ether there is space left for deposits.
|
||||
@@ -300,7 +299,7 @@ func (_ *ZeroNode) IsFull() bool {
|
||||
|
||||
// Finalize marks deposits of the Merkle tree as finalized.
|
||||
func (_ *ZeroNode) Finalize(depositsToFinalize uint64, depth uint64) (MerkleTreeNode, error) {
|
||||
return nil, nil
|
||||
return &ZeroNode{}, nil
|
||||
}
|
||||
|
||||
// GetFinalized returns a list of hashes of all the finalized nodes and the number of deposits.
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/container/trie"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
@@ -60,45 +62,54 @@ func Test_fromSnapshotParts(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
finalized [][32]byte
|
||||
deposits uint64
|
||||
level uint64
|
||||
want MerkleTreeNode
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
finalized: nil,
|
||||
deposits: 0,
|
||||
level: 0,
|
||||
want: &ZeroNode{},
|
||||
},
|
||||
{
|
||||
name: "single finalized node",
|
||||
finalized: [][32]byte{hexString(t, fmt.Sprintf("%064d", 0))},
|
||||
deposits: 1,
|
||||
level: 0,
|
||||
want: &FinalizedNode{
|
||||
depositCount: 1,
|
||||
hash: [32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple deposits and 1 Finalized",
|
||||
finalized: [][32]byte{hexString(t, fmt.Sprintf("%064d", 0))},
|
||||
deposits: 2,
|
||||
level: 4,
|
||||
want: &InnerNode{
|
||||
left: &InnerNode{&InnerNode{&FinalizedNode{depositCount: 2, hash: hexString(t, fmt.Sprintf("%064d", 0))}, &ZeroNode{1}}, &ZeroNode{2}},
|
||||
right: &ZeroNode{3},
|
||||
},
|
||||
name: "multiple deposits and multiple Finalized",
|
||||
finalized: [][32]byte{hexString(t, fmt.Sprintf("%064d", 1)), hexString(t, fmt.Sprintf("%064d", 2))},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tree, err := fromSnapshotParts(tt.finalized, tt.deposits, tt.level)
|
||||
require.NoError(t, err)
|
||||
if got := tree; !reflect.DeepEqual(got, tt.want) {
|
||||
require.DeepEqual(t, tt.want, got)
|
||||
test := NewDepositTree()
|
||||
for _, leaf := range tt.finalized {
|
||||
err := test.pushLeaf(leaf)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
got, err := test.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
transformed := make([][]byte, len(tt.finalized))
|
||||
for i := 0; i < len(tt.finalized); i++ {
|
||||
transformed[i] = bytesutil.SafeCopyBytes(tt.finalized[i][:])
|
||||
}
|
||||
generatedTrie, err := trie.GenerateTrieFromItems(transformed, 32)
|
||||
require.NoError(t, err)
|
||||
|
||||
want, err := generatedTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, want, got)
|
||||
|
||||
// Test finalization
|
||||
for i := 0; i < len(tt.finalized); i++ {
|
||||
err = test.Finalize(int64(i), tt.finalized[i], 0)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
sShot, err := test.GetSnapshot()
|
||||
require.NoError(t, err)
|
||||
got, err = sShot.CalculateRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(sShot.finalized))
|
||||
require.Equal(t, want, got)
|
||||
|
||||
// Build from the snapshot once more
|
||||
recovered, err := fromSnapshot(sShot)
|
||||
require.NoError(t, err)
|
||||
got, err = recovered.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -125,7 +136,7 @@ func Test_generateProof(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
testCases, err := readTestCases()
|
||||
require.NoError(t, err)
|
||||
tree := New()
|
||||
tree := NewDepositTree()
|
||||
for _, c := range testCases[:tt.leaves] {
|
||||
err = tree.pushLeaf(c.DepositDataRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
93
beacon-chain/cache/depositsnapshot/spec_test.go
vendored
93
beacon-chain/cache/depositsnapshot/spec_test.go
vendored
@@ -1,16 +1,18 @@
|
||||
package depositsnapshot
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/bazelbuild/rules_go/go/tools/bazel"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/container/trie"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/io/file"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
@@ -176,6 +178,9 @@ func readTestCases() ([]testCase, error) {
|
||||
if err != nil {
|
||||
return []testCase{}, err
|
||||
}
|
||||
if len(testCases) == 0 {
|
||||
return nil, errors.New("no test cases found")
|
||||
}
|
||||
return testCases, nil
|
||||
}
|
||||
}
|
||||
@@ -183,11 +188,8 @@ func readTestCases() ([]testCase, error) {
|
||||
}
|
||||
|
||||
func TestRead(t *testing.T) {
|
||||
tcs, err := readTestCases()
|
||||
_, err := readTestCases()
|
||||
require.NoError(t, err)
|
||||
for _, tc := range tcs {
|
||||
t.Log(tc)
|
||||
}
|
||||
}
|
||||
|
||||
func hexStringToByteArray(s string) (b [32]byte, err error) {
|
||||
@@ -222,9 +224,9 @@ func merkleRootFromBranch(leaf [32]byte, branch [][32]byte, index uint64) [32]by
|
||||
for i, l := range branch {
|
||||
ithBit := (index >> i) & 0x1
|
||||
if ithBit == 1 {
|
||||
root = sha256.Sum256(append(l[:], root[:]...))
|
||||
root = hash.Hash(append(l[:], root[:]...))
|
||||
} else {
|
||||
root = sha256.Sum256(append(root[:], l[:]...))
|
||||
root = hash.Hash(append(root[:], l[:]...))
|
||||
}
|
||||
}
|
||||
return root
|
||||
@@ -250,11 +252,11 @@ func cloneFromSnapshot(t *testing.T, snapshot DepositTreeSnapshot, testCases []t
|
||||
err = cp.pushLeaf(c.DepositDataRoot)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
return &cp
|
||||
return cp
|
||||
}
|
||||
|
||||
func TestDepositCases(t *testing.T) {
|
||||
tree := New()
|
||||
tree := NewDepositTree()
|
||||
testCases, err := readTestCases()
|
||||
require.NoError(t, err)
|
||||
for _, c := range testCases {
|
||||
@@ -263,8 +265,35 @@ func TestDepositCases(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
type Test struct {
|
||||
DepositDataRoot [32]byte
|
||||
}
|
||||
|
||||
func TestRootEquivalence(t *testing.T) {
|
||||
var err error
|
||||
tree := NewDepositTree()
|
||||
testCases, err := readTestCases()
|
||||
require.NoError(t, err)
|
||||
|
||||
transformed := make([][]byte, len(testCases[:128]))
|
||||
for i, c := range testCases[:128] {
|
||||
err = tree.pushLeaf(c.DepositDataRoot)
|
||||
require.NoError(t, err)
|
||||
transformed[i] = bytesutil.SafeCopyBytes(c.DepositDataRoot[:])
|
||||
}
|
||||
originalRoot, err := tree.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
generatedTrie, err := trie.GenerateTrieFromItems(transformed, 32)
|
||||
require.NoError(t, err)
|
||||
|
||||
rootA, err := generatedTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, rootA, originalRoot)
|
||||
}
|
||||
|
||||
func TestFinalization(t *testing.T) {
|
||||
tree := New()
|
||||
tree := NewDepositTree()
|
||||
testCases, err := readTestCases()
|
||||
require.NoError(t, err)
|
||||
for _, c := range testCases[:128] {
|
||||
@@ -273,15 +302,11 @@ func TestFinalization(t *testing.T) {
|
||||
}
|
||||
originalRoot := tree.getRoot()
|
||||
require.DeepEqual(t, testCases[127].Eth1Data.DepositRoot, originalRoot)
|
||||
err = tree.finalize(ð.Eth1Data{
|
||||
DepositRoot: testCases[100].Eth1Data.DepositRoot[:],
|
||||
DepositCount: testCases[100].Eth1Data.DepositCount,
|
||||
BlockHash: testCases[100].Eth1Data.BlockHash[:],
|
||||
}, testCases[100].BlockHeight)
|
||||
err = tree.Finalize(int64(testCases[100].Eth1Data.DepositCount-1), testCases[100].Eth1Data.BlockHash, testCases[100].BlockHeight)
|
||||
require.NoError(t, err)
|
||||
// ensure finalization doesn't change root
|
||||
require.Equal(t, tree.getRoot(), originalRoot)
|
||||
snapshotData, err := tree.getSnapshot()
|
||||
snapshotData, err := tree.GetSnapshot()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, testCases[100].Snapshot.DepositTreeSnapshot, snapshotData)
|
||||
// create a copy of the tree from a snapshot by replaying
|
||||
@@ -290,20 +315,16 @@ func TestFinalization(t *testing.T) {
|
||||
// ensure original and copy have the same root
|
||||
require.Equal(t, tree.getRoot(), cp.getRoot())
|
||||
// finalize original again to check double finalization
|
||||
err = tree.finalize(ð.Eth1Data{
|
||||
DepositRoot: testCases[105].Eth1Data.DepositRoot[:],
|
||||
DepositCount: testCases[105].Eth1Data.DepositCount,
|
||||
BlockHash: testCases[105].Eth1Data.BlockHash[:],
|
||||
}, testCases[105].BlockHeight)
|
||||
err = tree.Finalize(int64(testCases[105].Eth1Data.DepositCount-1), testCases[105].Eth1Data.BlockHash, testCases[105].BlockHeight)
|
||||
require.NoError(t, err)
|
||||
// root should still be the same
|
||||
require.Equal(t, originalRoot, tree.getRoot())
|
||||
// create a copy of the tree by taking a snapshot again
|
||||
snapshotData, err = tree.getSnapshot()
|
||||
snapshotData, err = tree.GetSnapshot()
|
||||
require.NoError(t, err)
|
||||
cp = cloneFromSnapshot(t, snapshotData, testCases[106:128])
|
||||
// create a copy of the tree by replaying ALL deposits from nothing
|
||||
fullTreeCopy := New()
|
||||
fullTreeCopy := NewDepositTree()
|
||||
for _, c := range testCases[:128] {
|
||||
err = fullTreeCopy.pushLeaf(c.DepositDataRoot)
|
||||
require.NoError(t, err)
|
||||
@@ -315,7 +336,7 @@ func TestFinalization(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSnapshotCases(t *testing.T) {
|
||||
tree := New()
|
||||
tree := NewDepositTree()
|
||||
testCases, err := readTestCases()
|
||||
require.NoError(t, err)
|
||||
for _, c := range testCases {
|
||||
@@ -323,33 +344,29 @@ func TestSnapshotCases(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
for _, c := range testCases {
|
||||
err = tree.finalize(ð.Eth1Data{
|
||||
DepositRoot: c.Eth1Data.DepositRoot[:],
|
||||
DepositCount: c.Eth1Data.DepositCount,
|
||||
BlockHash: c.Eth1Data.BlockHash[:],
|
||||
}, c.BlockHeight)
|
||||
err = tree.Finalize(int64(c.Eth1Data.DepositCount-1), c.Eth1Data.BlockHash, c.BlockHeight)
|
||||
require.NoError(t, err)
|
||||
s, err := tree.getSnapshot()
|
||||
s, err := tree.GetSnapshot()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, c.Snapshot.DepositTreeSnapshot, s)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptyTreeSnapshot(t *testing.T) {
|
||||
_, err := New().getSnapshot()
|
||||
require.ErrorContains(t, "empty execution block", err)
|
||||
}
|
||||
|
||||
func TestInvalidSnapshot(t *testing.T) {
|
||||
invalidSnapshot := DepositTreeSnapshot{
|
||||
finalized: nil,
|
||||
depositRoot: Zerohashes[0],
|
||||
depositRoot: trie.ZeroHashes[0],
|
||||
depositCount: 0,
|
||||
executionBlock: executionBlock{
|
||||
Hash: Zerohashes[0],
|
||||
Hash: trie.ZeroHashes[0],
|
||||
Depth: 0,
|
||||
},
|
||||
}
|
||||
_, err := fromSnapshot(invalidSnapshot)
|
||||
require.ErrorContains(t, "snapshot root is invalid", err)
|
||||
}
|
||||
|
||||
func TestEmptyTree(t *testing.T) {
|
||||
tree := NewDepositTree()
|
||||
require.Equal(t, fmt.Sprintf("%x", tree.getRoot()), "d70a234731285c6804c2a4f56711ddb8c82c99740f207854891028af34e27e5e")
|
||||
}
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
// Code generated by gen_zerohashes. DO NOT EDIT.
|
||||
package depositsnapshot
|
||||
|
||||
var Zerohashes = [][32]byte{
|
||||
// 0000000000000000000000000000000000000000000000000000000000000000
|
||||
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
// f5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b
|
||||
{245, 165, 253, 66, 209, 106, 32, 48, 39, 152, 239, 110, 211, 9, 151, 155, 67, 0, 61, 35, 32, 217, 240, 232, 234, 152, 49, 169, 39, 89, 251, 75},
|
||||
// db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71
|
||||
{219, 86, 17, 78, 0, 253, 212, 193, 248, 92, 137, 43, 243, 90, 201, 168, 146, 137, 170, 236, 177, 235, 208, 169, 108, 222, 96, 106, 116, 139, 93, 113},
|
||||
// c78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c
|
||||
{199, 128, 9, 253, 240, 127, 197, 106, 17, 241, 34, 55, 6, 88, 163, 83, 170, 165, 66, 237, 99, 228, 76, 75, 193, 95, 244, 205, 16, 90, 179, 60},
|
||||
// 536d98837f2dd165a55d5eeae91485954472d56f246df256bf3cae19352a123c
|
||||
{83, 109, 152, 131, 127, 45, 209, 101, 165, 93, 94, 234, 233, 20, 133, 149, 68, 114, 213, 111, 36, 109, 242, 86, 191, 60, 174, 25, 53, 42, 18, 60},
|
||||
// 9efde052aa15429fae05bad4d0b1d7c64da64d03d7a1854a588c2cb8430c0d30
|
||||
{158, 253, 224, 82, 170, 21, 66, 159, 174, 5, 186, 212, 208, 177, 215, 198, 77, 166, 77, 3, 215, 161, 133, 74, 88, 140, 44, 184, 67, 12, 13, 48},
|
||||
// d88ddfeed400a8755596b21942c1497e114c302e6118290f91e6772976041fa1
|
||||
{216, 141, 223, 238, 212, 0, 168, 117, 85, 150, 178, 25, 66, 193, 73, 126, 17, 76, 48, 46, 97, 24, 41, 15, 145, 230, 119, 41, 118, 4, 31, 161},
|
||||
// 87eb0ddba57e35f6d286673802a4af5975e22506c7cf4c64bb6be5ee11527f2c
|
||||
{135, 235, 13, 219, 165, 126, 53, 246, 210, 134, 103, 56, 2, 164, 175, 89, 117, 226, 37, 6, 199, 207, 76, 100, 187, 107, 229, 238, 17, 82, 127, 44},
|
||||
// 26846476fd5fc54a5d43385167c95144f2643f533cc85bb9d16b782f8d7db193
|
||||
{38, 132, 100, 118, 253, 95, 197, 74, 93, 67, 56, 81, 103, 201, 81, 68, 242, 100, 63, 83, 60, 200, 91, 185, 209, 107, 120, 47, 141, 125, 177, 147},
|
||||
// 506d86582d252405b840018792cad2bf1259f1ef5aa5f887e13cb2f0094f51e1
|
||||
{80, 109, 134, 88, 45, 37, 36, 5, 184, 64, 1, 135, 146, 202, 210, 191, 18, 89, 241, 239, 90, 165, 248, 135, 225, 60, 178, 240, 9, 79, 81, 225},
|
||||
// ffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b
|
||||
{255, 255, 10, 215, 230, 89, 119, 47, 149, 52, 193, 149, 200, 21, 239, 196, 1, 78, 241, 225, 218, 237, 68, 4, 192, 99, 133, 209, 17, 146, 233, 43},
|
||||
// 6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220
|
||||
{108, 240, 65, 39, 219, 5, 68, 28, 216, 51, 16, 122, 82, 190, 133, 40, 104, 137, 14, 67, 23, 230, 160, 42, 180, 118, 131, 170, 117, 150, 66, 32},
|
||||
// b7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f
|
||||
{183, 208, 95, 135, 95, 20, 0, 39, 239, 81, 24, 162, 36, 123, 187, 132, 206, 143, 47, 15, 17, 35, 98, 48, 133, 218, 247, 150, 12, 50, 159, 95},
|
||||
// df6af5f5bbdb6be9ef8aa618e4bf8073960867171e29676f8b284dea6a08a85e
|
||||
{223, 106, 245, 245, 187, 219, 107, 233, 239, 138, 166, 24, 228, 191, 128, 115, 150, 8, 103, 23, 30, 41, 103, 111, 139, 40, 77, 234, 106, 8, 168, 94},
|
||||
// b58d900f5e182e3c50ef74969ea16c7726c549757cc23523c369587da7293784
|
||||
{181, 141, 144, 15, 94, 24, 46, 60, 80, 239, 116, 150, 158, 161, 108, 119, 38, 197, 73, 117, 124, 194, 53, 35, 195, 105, 88, 125, 167, 41, 55, 132},
|
||||
// d49a7502ffcfb0340b1d7885688500ca308161a7f96b62df9d083b71fcc8f2bb
|
||||
{212, 154, 117, 2, 255, 207, 176, 52, 11, 29, 120, 133, 104, 133, 0, 202, 48, 129, 97, 167, 249, 107, 98, 223, 157, 8, 59, 113, 252, 200, 242, 187},
|
||||
// 8fe6b1689256c0d385f42f5bbe2027a22c1996e110ba97c171d3e5948de92beb
|
||||
{143, 230, 177, 104, 146, 86, 192, 211, 133, 244, 47, 91, 190, 32, 39, 162, 44, 25, 150, 225, 16, 186, 151, 193, 113, 211, 229, 148, 141, 233, 43, 235},
|
||||
// 8d0d63c39ebade8509e0ae3c9c3876fb5fa112be18f905ecacfecb92057603ab
|
||||
{141, 13, 99, 195, 158, 186, 222, 133, 9, 224, 174, 60, 156, 56, 118, 251, 95, 161, 18, 190, 24, 249, 5, 236, 172, 254, 203, 146, 5, 118, 3, 171},
|
||||
// 95eec8b2e541cad4e91de38385f2e046619f54496c2382cb6cacd5b98c26f5a4
|
||||
{149, 238, 200, 178, 229, 65, 202, 212, 233, 29, 227, 131, 133, 242, 224, 70, 97, 159, 84, 73, 108, 35, 130, 203, 108, 172, 213, 185, 140, 38, 245, 164},
|
||||
// f893e908917775b62bff23294dbbe3a1cd8e6cc1c35b4801887b646a6f81f17f
|
||||
{248, 147, 233, 8, 145, 119, 117, 182, 43, 255, 35, 41, 77, 187, 227, 161, 205, 142, 108, 193, 195, 91, 72, 1, 136, 123, 100, 106, 111, 129, 241, 127},
|
||||
// cddba7b592e3133393c16194fac7431abf2f5485ed711db282183c819e08ebaa
|
||||
{205, 219, 167, 181, 146, 227, 19, 51, 147, 193, 97, 148, 250, 199, 67, 26, 191, 47, 84, 133, 237, 113, 29, 178, 130, 24, 60, 129, 158, 8, 235, 170},
|
||||
// 8a8d7fe3af8caa085a7639a832001457dfb9128a8061142ad0335629ff23ff9c
|
||||
{138, 141, 127, 227, 175, 140, 170, 8, 90, 118, 57, 168, 50, 0, 20, 87, 223, 185, 18, 138, 128, 97, 20, 42, 208, 51, 86, 41, 255, 35, 255, 156},
|
||||
// feb3c337d7a51a6fbf00b9e34c52e1c9195c969bd4e7a0bfd51d5c5bed9c1167
|
||||
{254, 179, 195, 55, 215, 165, 26, 111, 191, 0, 185, 227, 76, 82, 225, 201, 25, 92, 150, 155, 212, 231, 160, 191, 213, 29, 92, 91, 237, 156, 17, 103},
|
||||
// e71f0aa83cc32edfbefa9f4d3e0174ca85182eec9f3a09f6a6c0df6377a510d7
|
||||
{231, 31, 10, 168, 60, 195, 46, 223, 190, 250, 159, 77, 62, 1, 116, 202, 133, 24, 46, 236, 159, 58, 9, 246, 166, 192, 223, 99, 119, 165, 16, 215},
|
||||
// 31206fa80a50bb6abe29085058f16212212a60eec8f049fecb92d8c8e0a84bc0
|
||||
{49, 32, 111, 168, 10, 80, 187, 106, 190, 41, 8, 80, 88, 241, 98, 18, 33, 42, 96, 238, 200, 240, 73, 254, 203, 146, 216, 200, 224, 168, 75, 192},
|
||||
// 21352bfecbeddde993839f614c3dac0a3ee37543f9b412b16199dc158e23b544
|
||||
{33, 53, 43, 254, 203, 237, 221, 233, 147, 131, 159, 97, 76, 61, 172, 10, 62, 227, 117, 67, 249, 180, 18, 177, 97, 153, 220, 21, 142, 35, 181, 68},
|
||||
// 619e312724bb6d7c3153ed9de791d764a366b389af13c58bf8a8d90481a46765
|
||||
{97, 158, 49, 39, 36, 187, 109, 124, 49, 83, 237, 157, 231, 145, 215, 100, 163, 102, 179, 137, 175, 19, 197, 139, 248, 168, 217, 4, 129, 164, 103, 101},
|
||||
// 7cdd2986268250628d0c10e385c58c6191e6fbe05191bcc04f133f2cea72c1c4
|
||||
{124, 221, 41, 134, 38, 130, 80, 98, 141, 12, 16, 227, 133, 197, 140, 97, 145, 230, 251, 224, 81, 145, 188, 192, 79, 19, 63, 44, 234, 114, 193, 196},
|
||||
// 848930bd7ba8cac54661072113fb278869e07bb8587f91392933374d017bcbe1
|
||||
{132, 137, 48, 189, 123, 168, 202, 197, 70, 97, 7, 33, 19, 251, 39, 136, 105, 224, 123, 184, 88, 127, 145, 57, 41, 51, 55, 77, 1, 123, 203, 225},
|
||||
// 8869ff2c22b28cc10510d9853292803328be4fb0e80495e8bb8d271f5b889636
|
||||
{136, 105, 255, 44, 34, 178, 140, 193, 5, 16, 217, 133, 50, 146, 128, 51, 40, 190, 79, 176, 232, 4, 149, 232, 187, 141, 39, 31, 91, 136, 150, 54},
|
||||
// b5fe28e79f1b850f8658246ce9b6a1e7b49fc06db7143e8fe0b4f2b0c5523a5c
|
||||
{181, 254, 40, 231, 159, 27, 133, 15, 134, 88, 36, 108, 233, 182, 161, 231, 180, 159, 192, 109, 183, 20, 62, 143, 224, 180, 242, 176, 197, 82, 58, 92},
|
||||
// 985e929f70af28d0bdd1a90a808f977f597c7c778c489e98d3bd8910d31ac0f7
|
||||
{152, 94, 146, 159, 112, 175, 40, 208, 189, 209, 169, 10, 128, 143, 151, 127, 89, 124, 124, 119, 140, 72, 158, 152, 211, 189, 137, 16, 211, 26, 192, 247},
|
||||
}
|
||||
57
beacon-chain/cache/interfaces.go
vendored
Normal file
57
beacon-chain/cache/interfaces.go
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// DepositCache combines the interfaces for retrieving and inserting deposit information.
|
||||
type DepositCache interface {
|
||||
DepositFetcher
|
||||
DepositInserter
|
||||
}
|
||||
|
||||
// DepositFetcher defines a struct which can retrieve deposit information from a store.
|
||||
type DepositFetcher interface {
|
||||
AllDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit
|
||||
AllDepositContainers(ctx context.Context) []*ethpb.DepositContainer
|
||||
DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int)
|
||||
DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte)
|
||||
InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte)
|
||||
PendingDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit
|
||||
PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer
|
||||
PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64)
|
||||
PruneProofs(ctx context.Context, untilDepositIndex int64) error
|
||||
FinalizedFetcher
|
||||
}
|
||||
|
||||
// DepositInserter defines a struct which can insert deposit information from a store.
|
||||
type DepositInserter interface {
|
||||
InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) error
|
||||
InsertDepositContainers(ctx context.Context, ctrs []*ethpb.DepositContainer)
|
||||
InsertFinalizedDeposits(ctx context.Context, eth1DepositIndex int64, executionHash common.Hash, executionNumber uint64) error
|
||||
}
|
||||
|
||||
// FinalizedFetcher is a smaller interface defined to be the bare minimum to satisfy “Service”.
|
||||
// It extends the "DepositFetcher" interface with additional methods for fetching finalized deposits.
|
||||
type FinalizedFetcher interface {
|
||||
FinalizedDeposits(ctx context.Context) (FinalizedDeposits, error)
|
||||
NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit
|
||||
}
|
||||
|
||||
// FinalizedDeposits defines a method to access a merkle tree containing deposits and their indexes.
|
||||
type FinalizedDeposits interface {
|
||||
Deposits() MerkleTree
|
||||
MerkleTrieIndex() int64
|
||||
}
|
||||
|
||||
// MerkleTree defines methods for constructing and manipulating a merkle tree.
|
||||
type MerkleTree interface {
|
||||
HashTreeRoot() ([32]byte, error)
|
||||
NumOfItems() int
|
||||
Insert(item []byte, index int) error
|
||||
MerkleProof(index int) ([][]byte, error)
|
||||
}
|
||||
@@ -31,7 +31,7 @@ func Test_BaseReward(t *testing.T) {
|
||||
valIdx: 2,
|
||||
st: genState(1),
|
||||
want: 0,
|
||||
errString: "index 2 out of range",
|
||||
errString: "validator index 2 does not exist",
|
||||
},
|
||||
{
|
||||
name: "active balance is 32eth",
|
||||
@@ -89,7 +89,7 @@ func Test_BaseRewardWithTotalBalance(t *testing.T) {
|
||||
valIdx: 2,
|
||||
activeBalance: 1,
|
||||
want: 0,
|
||||
errString: "index 2 out of range",
|
||||
errString: "validator index 2 does not exist",
|
||||
},
|
||||
{
|
||||
name: "active balance is 1",
|
||||
|
||||
@@ -28,10 +28,10 @@ func TestSyncCommitteeIndices_CanGet(t *testing.T) {
|
||||
}
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoAltair(ðpb.BeaconStateAltair{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
return st
|
||||
}
|
||||
|
||||
@@ -69,6 +69,15 @@ func TestSyncCommitteeIndices_CanGet(t *testing.T) {
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "no active validators, epoch 100",
|
||||
args: args{
|
||||
state: getState(t, 0), // Regression test for divide by zero. Issue #13051.
|
||||
epoch: 100,
|
||||
},
|
||||
wantErr: true,
|
||||
errString: "no active validator indices",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
@@ -42,7 +42,6 @@ go_library(
|
||||
"//math:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/slashings:go_default_library",
|
||||
@@ -100,7 +99,6 @@ go_test(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation/aggregation:go_default_library",
|
||||
|
||||
@@ -51,7 +51,11 @@ func ProcessVoluntaryExits(
|
||||
beaconState state.BeaconState,
|
||||
exits []*ethpb.SignedVoluntaryExit,
|
||||
) (state.BeaconState, error) {
|
||||
maxExitEpoch, churn := v.ValidatorsMaxExitEpochAndChurn(beaconState)
|
||||
// Avoid calculating the epoch churn if no exits exist.
|
||||
if len(exits) == 0 {
|
||||
return beaconState, nil
|
||||
}
|
||||
maxExitEpoch, churn := v.MaxExitEpochAndChurn(beaconState)
|
||||
var exitEpoch primitives.Epoch
|
||||
for idx, exit := range exits {
|
||||
if exit == nil || exit.Exit == nil {
|
||||
|
||||
@@ -82,7 +82,7 @@ func ProcessRandaoNoVerify(
|
||||
for i, x := range blockRandaoReveal {
|
||||
latestMixSlice[i] ^= x
|
||||
}
|
||||
if err := beaconState.UpdateRandaoMixesAtIndex(uint64(currentEpoch%latestMixesLength), latestMixSlice); err != nil {
|
||||
if err := beaconState.UpdateRandaoMixesAtIndex(uint64(currentEpoch%latestMixesLength), [32]byte(latestMixSlice)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return beaconState, nil
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/ssz"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
)
|
||||
@@ -236,7 +235,7 @@ func BLSChangesSignatureBatch(
|
||||
return nil, errors.Wrap(err, "could not convert bytes to public key")
|
||||
}
|
||||
batch.PublicKeys[i] = publicKey
|
||||
htr, err := signing.SigningData(change.Message.HashTreeRoot, domain)
|
||||
htr, err := signing.Data(change.Message.HashTreeRoot, domain)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute BLSToExecutionChange signing data")
|
||||
}
|
||||
@@ -251,7 +250,7 @@ func BLSChangesSignatureBatch(
|
||||
// is from a previous fork.
|
||||
func VerifyBLSChangeSignature(
|
||||
st state.ReadOnlyBeaconState,
|
||||
change *ethpbv2.SignedBLSToExecutionChange,
|
||||
change *ethpb.SignedBLSToExecutionChange,
|
||||
) error {
|
||||
c := params.BeaconConfig()
|
||||
domain, err := signing.ComputeDomain(c.DomainBLSToExecutionChange, c.GenesisForkVersion, st.GenesisValidatorsRoot())
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/ssz"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
@@ -152,7 +151,7 @@ func TestProcessBLSToExecutionChange(t *testing.T) {
|
||||
}
|
||||
|
||||
_, err = blocks.ProcessBLSToExecutionChange(st, signed)
|
||||
require.ErrorContains(t, "out of range", err)
|
||||
require.ErrorContains(t, "out of bounds", err)
|
||||
})
|
||||
|
||||
t.Run("signature does not verify", func(t *testing.T) {
|
||||
@@ -1209,8 +1208,7 @@ func TestBLSChangesSignatureBatch(t *testing.T) {
|
||||
require.Equal(t, true, verify)
|
||||
|
||||
// Verify a single change
|
||||
change := migration.V1Alpha1SignedBLSToExecChangeToV2(signedChanges[0])
|
||||
require.NoError(t, blocks.VerifyBLSChangeSignature(st, change))
|
||||
require.NoError(t, blocks.VerifyBLSChangeSignature(st, signedChanges[0]))
|
||||
}
|
||||
|
||||
func TestBLSChangesSignatureBatchWrongFork(t *testing.T) {
|
||||
@@ -1274,8 +1272,7 @@ func TestBLSChangesSignatureBatchWrongFork(t *testing.T) {
|
||||
require.Equal(t, false, verify)
|
||||
|
||||
// Verify a single change
|
||||
change := migration.V1Alpha1SignedBLSToExecChangeToV2(signedChanges[0])
|
||||
require.ErrorIs(t, signing.ErrSigFailedToVerify, blocks.VerifyBLSChangeSignature(st, change))
|
||||
require.ErrorIs(t, signing.ErrSigFailedToVerify, blocks.VerifyBLSChangeSignature(st, signedChanges[0]))
|
||||
}
|
||||
|
||||
func TestBLSChangesSignatureBatchFromBellatrix(t *testing.T) {
|
||||
@@ -1362,7 +1359,6 @@ func TestBLSChangesSignatureBatchFromBellatrix(t *testing.T) {
|
||||
require.Equal(t, true, verify)
|
||||
|
||||
// Verify a single change
|
||||
change := migration.V1Alpha1SignedBLSToExecChangeToV2(signedChanges[0])
|
||||
require.NoError(t, blocks.VerifyBLSChangeSignature(st, change))
|
||||
require.NoError(t, blocks.VerifyBLSChangeSignature(st, signedChanges[0]))
|
||||
params.OverrideBeaconConfig(savedConfig)
|
||||
}
|
||||
|
||||
@@ -112,7 +112,7 @@ func ProcessRegistryUpdates(ctx context.Context, state state.BeaconState) (state
|
||||
if isActive && belowEjectionBalance {
|
||||
// Here is fine to do a quadratic loop since this should
|
||||
// barely happen
|
||||
maxExitEpoch, churn := validators.ValidatorsMaxExitEpochAndChurn(state)
|
||||
maxExitEpoch, churn := validators.MaxExitEpochAndChurn(state)
|
||||
state, _, err = validators.InitiateValidatorExit(ctx, state, primitives.ValidatorIndex(idx), maxExitEpoch, churn)
|
||||
if err != nil && !errors.Is(err, validators.ValidatorAlreadyExitedErr) {
|
||||
return nil, errors.Wrapf(err, "could not initiate exit for validator %d", idx)
|
||||
@@ -137,9 +137,10 @@ func ProcessRegistryUpdates(ctx context.Context, state state.BeaconState) (state
|
||||
return nil, errors.Wrap(err, "could not get active validator count")
|
||||
}
|
||||
|
||||
churnLimit, err := helpers.ValidatorChurnLimit(activeValidatorCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get churn limit")
|
||||
churnLimit := helpers.ValidatorActivationChurnLimit(activeValidatorCount)
|
||||
|
||||
if state.Version() >= version.Deneb {
|
||||
churnLimit = helpers.ValidatorActivationChurnLimitDeneb(activeValidatorCount)
|
||||
}
|
||||
|
||||
// Prevent churn limit cause index out of bound.
|
||||
@@ -348,7 +349,7 @@ func ProcessRandaoMixesReset(state state.BeaconState) (state.BeaconState, error)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := state.UpdateRandaoMixesAtIndex(uint64(nextEpoch%randaoMixLength), mix); err != nil {
|
||||
if err := state.UpdateRandaoMixesAtIndex(uint64(nextEpoch%randaoMixLength), [32]byte(mix)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -311,8 +311,7 @@ func TestProcessRegistryUpdates_EligibleToActivate(t *testing.T) {
|
||||
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 6, Root: make([]byte, fieldparams.RootLength)},
|
||||
}
|
||||
limit, err := helpers.ValidatorChurnLimit(0)
|
||||
require.NoError(t, err)
|
||||
limit := helpers.ValidatorActivationChurnLimit(0)
|
||||
for i := uint64(0); i < limit+10; i++ {
|
||||
base.Validators = append(base.Validators, ðpb.Validator{
|
||||
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
@@ -338,6 +337,42 @@ func TestProcessRegistryUpdates_EligibleToActivate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessRegistryUpdates_EligibleToActivate_Cancun(t *testing.T) {
|
||||
base := ðpb.BeaconStateDeneb{
|
||||
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 6, Root: make([]byte, fieldparams.RootLength)},
|
||||
}
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.MinPerEpochChurnLimit = 10
|
||||
cfg.ChurnLimitQuotient = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
for i := uint64(0); i < 10; i++ {
|
||||
base.Validators = append(base.Validators, ðpb.Validator{
|
||||
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ActivationEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
})
|
||||
}
|
||||
beaconState, err := state_native.InitializeFromProtoDeneb(base)
|
||||
require.NoError(t, err)
|
||||
currentEpoch := time.CurrentEpoch(beaconState)
|
||||
newState, err := epoch.ProcessRegistryUpdates(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
for i, validator := range newState.Validators() {
|
||||
assert.Equal(t, currentEpoch+1, validator.ActivationEligibilityEpoch, "Could not update registry %d, unexpected activation eligibility epoch", i)
|
||||
// Note: In Deneb, only validators indices before `MaxPerEpochActivationChurnLimit` should be activated.
|
||||
if uint64(i) < params.BeaconConfig().MaxPerEpochActivationChurnLimit && validator.ActivationEpoch != helpers.ActivationExitEpoch(currentEpoch) {
|
||||
t.Errorf("Could not update registry %d, validators failed to activate: wanted activation epoch %d, got %d",
|
||||
i, helpers.ActivationExitEpoch(currentEpoch), validator.ActivationEpoch)
|
||||
}
|
||||
if uint64(i) >= params.BeaconConfig().MaxPerEpochActivationChurnLimit && validator.ActivationEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
t.Errorf("Could not update registry %d, validators should not have been activated, wanted activation epoch: %d, got %d",
|
||||
i, params.BeaconConfig().FarFutureEpoch, validator.ActivationEpoch)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessRegistryUpdates_ActivationCompletes(t *testing.T) {
|
||||
base := ðpb.BeaconState{
|
||||
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
|
||||
@@ -22,6 +22,9 @@ const (
|
||||
|
||||
// BLSToExecutionChangeReceived is sent after a BLS to execution change object has been received from gossip or rpc.
|
||||
BLSToExecutionChangeReceived
|
||||
|
||||
// BlobSidecarReceived is sent after a blob sidecar is received from gossip or rpc.
|
||||
BlobSidecarReceived = 6
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -52,3 +55,8 @@ type SyncCommitteeContributionReceivedData struct {
|
||||
type BLSToExecutionChangeReceivedData struct {
|
||||
Change *ethpb.SignedBLSToExecutionChange
|
||||
}
|
||||
|
||||
// BlobSidecarReceivedData is the data sent with BlobSidecarReceived events.
|
||||
type BlobSidecarReceivedData struct {
|
||||
Blob *ethpb.SignedBlobSidecar
|
||||
}
|
||||
|
||||
@@ -99,7 +99,7 @@ func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := IsCurrentPeriodSyncCommittee(state, 12390192)
|
||||
require.ErrorContains(t, "index 12390192 out of range", err)
|
||||
require.ErrorContains(t, "validator index 12390192 does not exist", err)
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
|
||||
@@ -182,7 +182,7 @@ func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := IsNextPeriodSyncCommittee(state, 120391029)
|
||||
require.ErrorContains(t, "index 120391029 out of range", err)
|
||||
require.ErrorContains(t, "validator index 120391029 does not exist", err)
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
|
||||
@@ -282,7 +282,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 129301923)
|
||||
require.ErrorContains(t, "index 129301923 out of range", err)
|
||||
require.ErrorContains(t, "validator index 129301923 does not exist", err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
|
||||
}
|
||||
|
||||
@@ -367,7 +367,7 @@ func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
index, err := NextPeriodSyncSubcommitteeIndices(state, 21093019)
|
||||
require.ErrorContains(t, "index 21093019 out of range", err)
|
||||
require.ErrorContains(t, "validator index 21093019 does not exist", err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
|
||||
}
|
||||
|
||||
|
||||
@@ -136,6 +136,10 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(indices) == 0 {
|
||||
return nil, errors.New("no active validator indices")
|
||||
}
|
||||
|
||||
if err := UpdateCommitteeCache(ctx, s, epoch); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update committee cache")
|
||||
}
|
||||
@@ -206,10 +210,7 @@ func ActivationExitEpoch(epoch primitives.Epoch) primitives.Epoch {
|
||||
return epoch + 1 + params.BeaconConfig().MaxSeedLookahead
|
||||
}
|
||||
|
||||
// ValidatorChurnLimit returns the number of validators that are allowed to
|
||||
// enter and exit validator pool for an epoch.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// calculateChurnLimit based on the formula in the spec.
|
||||
//
|
||||
// def get_validator_churn_limit(state: BeaconState) -> uint64:
|
||||
// """
|
||||
@@ -217,12 +218,32 @@ func ActivationExitEpoch(epoch primitives.Epoch) primitives.Epoch {
|
||||
// """
|
||||
// active_validator_indices = get_active_validator_indices(state, get_current_epoch(state))
|
||||
// return max(MIN_PER_EPOCH_CHURN_LIMIT, uint64(len(active_validator_indices)) // CHURN_LIMIT_QUOTIENT)
|
||||
func ValidatorChurnLimit(activeValidatorCount uint64) (uint64, error) {
|
||||
func calculateChurnLimit(activeValidatorCount uint64) uint64 {
|
||||
churnLimit := activeValidatorCount / params.BeaconConfig().ChurnLimitQuotient
|
||||
if churnLimit < params.BeaconConfig().MinPerEpochChurnLimit {
|
||||
churnLimit = params.BeaconConfig().MinPerEpochChurnLimit
|
||||
return params.BeaconConfig().MinPerEpochChurnLimit
|
||||
}
|
||||
return churnLimit, nil
|
||||
return churnLimit
|
||||
}
|
||||
|
||||
// ValidatorActivationChurnLimit returns the maximum number of validators that can be activated in a slot.
|
||||
func ValidatorActivationChurnLimit(activeValidatorCount uint64) uint64 {
|
||||
return calculateChurnLimit(activeValidatorCount)
|
||||
}
|
||||
|
||||
// ValidatorExitChurnLimit returns the maximum number of validators that can be exited in a slot.
|
||||
func ValidatorExitChurnLimit(activeValidatorCount uint64) uint64 {
|
||||
return calculateChurnLimit(activeValidatorCount)
|
||||
}
|
||||
|
||||
// ValidatorActivationChurnLimitDeneb returns the maximum number of validators that can be activated in a slot post Deneb.
|
||||
func ValidatorActivationChurnLimitDeneb(activeValidatorCount uint64) uint64 {
|
||||
limit := calculateChurnLimit(activeValidatorCount)
|
||||
// New in Deneb.
|
||||
if limit > params.BeaconConfig().MaxPerEpochActivationChurnLimit {
|
||||
return params.BeaconConfig().MaxPerEpochActivationChurnLimit
|
||||
}
|
||||
return limit
|
||||
}
|
||||
|
||||
// BeaconProposerIndex returns proposer index of a current slot.
|
||||
|
||||
@@ -367,9 +367,50 @@ func TestChurnLimit_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
|
||||
require.NoError(t, err)
|
||||
resultChurn, err := ValidatorChurnLimit(validatorCount)
|
||||
resultChurn := ValidatorActivationChurnLimit(validatorCount)
|
||||
assert.Equal(t, test.wantedChurn, resultChurn, "ValidatorActivationChurnLimit(%d)", test.validatorCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChurnLimitDeneb_OK(t *testing.T) {
|
||||
tests := []struct {
|
||||
validatorCount int
|
||||
wantedChurn uint64
|
||||
}{
|
||||
{1000, 4},
|
||||
{100000, 4},
|
||||
{1000000, params.BeaconConfig().MaxPerEpochActivationChurnLimit},
|
||||
{2000000, params.BeaconConfig().MaxPerEpochActivationChurnLimit},
|
||||
}
|
||||
|
||||
defer ClearCache()
|
||||
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
|
||||
// Create validators
|
||||
validators := make([]*ethpb.Validator, test.validatorCount)
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize beacon state
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Slot: 1,
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.wantedChurn, resultChurn, "ValidatorChurnLimit(%d)", test.validatorCount)
|
||||
|
||||
// Get active validator count
|
||||
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test churn limit calculation
|
||||
resultChurn := ValidatorActivationChurnLimitDeneb(validatorCount)
|
||||
assert.Equal(t, test.wantedChurn, resultChurn)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -519,12 +560,24 @@ func TestActiveValidatorIndices(t *testing.T) {
|
||||
},
|
||||
want: []primitives.ValidatorIndex{0, 2, 3},
|
||||
},
|
||||
{
|
||||
name: "impossible_zero_validators", // Regression test for issue #13051
|
||||
args: args{
|
||||
state: ðpb.BeaconState{
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
Validators: make([]*ethpb.Validator, 0),
|
||||
},
|
||||
epoch: 10,
|
||||
},
|
||||
wantedErr: "no active validator indices",
|
||||
},
|
||||
}
|
||||
defer ClearCache()
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.args.state)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetValidators(tt.args.state.Validators))
|
||||
got, err := ActiveValidatorIndices(context.Background(), s, tt.args.epoch)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
|
||||
@@ -80,10 +80,7 @@ func ComputeWeakSubjectivityPeriod(ctx context.Context, st state.ReadOnlyBeaconS
|
||||
T := cfg.MaxEffectiveBalance / cfg.GweiPerEth
|
||||
|
||||
// Validator churn limit.
|
||||
delta, err := ValidatorChurnLimit(N)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot obtain active validator churn limit: %w", err)
|
||||
}
|
||||
delta := ValidatorExitChurnLimit(N)
|
||||
|
||||
// Balance top-ups.
|
||||
Delta := uint64(cfg.SlotsPerEpoch.Mul(cfg.MaxDeposits))
|
||||
|
||||
@@ -92,12 +92,12 @@ func ComputeDomainAndSign(st state.ReadOnlyBeaconState, epoch primitives.Epoch,
|
||||
// domain=domain,
|
||||
// ))
|
||||
func ComputeSigningRoot(object fssz.HashRoot, domain []byte) ([32]byte, error) {
|
||||
return SigningData(object.HashTreeRoot, domain)
|
||||
return Data(object.HashTreeRoot, domain)
|
||||
}
|
||||
|
||||
// SigningData computes the signing data by utilising the provided root function and then
|
||||
// Data computes the signing data by utilising the provided root function and then
|
||||
// returning the signing data of the container object.
|
||||
func SigningData(rootFunc func() ([32]byte, error), domain []byte) ([32]byte, error) {
|
||||
func Data(rootFunc func() ([32]byte, error), domain []byte) ([32]byte, error) {
|
||||
objRoot, err := rootFunc()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
@@ -152,7 +152,7 @@ func VerifyBlockHeaderSigningRoot(blkHdr *ethpb.BeaconBlockHeader, pub, signatur
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not convert bytes to signature")
|
||||
}
|
||||
root, err := SigningData(blkHdr.HashTreeRoot, domain)
|
||||
root, err := Data(blkHdr.HashTreeRoot, domain)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute signing root")
|
||||
}
|
||||
@@ -191,7 +191,7 @@ func BlockSignatureBatch(pub, signature, domain []byte, rootFunc func() ([32]byt
|
||||
return nil, errors.Wrap(err, "could not convert bytes to public key")
|
||||
}
|
||||
// utilize custom block hashing function
|
||||
root, err := SigningData(rootFunc, domain)
|
||||
root, err := Data(rootFunc, domain)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute signing root")
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stateutil"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/container/trie"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
@@ -221,6 +222,18 @@ func OptimizedGenesisBeaconState(genesisTime uint64, preState state.BeaconState,
|
||||
|
||||
// EmptyGenesisState returns an empty beacon state object.
|
||||
func EmptyGenesisState() (state.BeaconState, error) {
|
||||
blockRoots := make([][]byte, fieldparams.BlockRootsLength)
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
stateRoots := make([][]byte, fieldparams.StateRootsLength)
|
||||
for i := range stateRoots {
|
||||
stateRoots[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
mixes := make([][]byte, fieldparams.RandaoMixesLength)
|
||||
for i := range mixes {
|
||||
mixes[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
st := ðpb.BeaconState{
|
||||
// Misc fields.
|
||||
Slot: 0,
|
||||
@@ -229,6 +242,9 @@ func EmptyGenesisState() (state.BeaconState, error) {
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: 0,
|
||||
},
|
||||
BlockRoots: blockRoots,
|
||||
StateRoots: stateRoots,
|
||||
RandaoMixes: mixes,
|
||||
// Validator registry fields.
|
||||
Validators: []*ethpb.Validator{},
|
||||
Balances: []uint64{},
|
||||
|
||||
@@ -103,8 +103,8 @@ func TestGenesisState_HashEquality(t *testing.T) {
|
||||
pbstate, err := state_native.ProtobufBeaconStatePhase0(state.ToProto())
|
||||
require.NoError(t, err)
|
||||
|
||||
root1, err1 := hash.HashProto(pbState1)
|
||||
root2, err2 := hash.HashProto(pbstate)
|
||||
root1, err1 := hash.Proto(pbState1)
|
||||
root2, err2 := hash.Proto(pbstate)
|
||||
|
||||
if err1 != nil || err2 != nil {
|
||||
t.Fatalf("Failed to marshal state to bytes: %v %v", err1, err2)
|
||||
|
||||
@@ -382,10 +382,18 @@ func TestProcessEpochPrecompute_CanProcess(t *testing.T) {
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
JustificationBits: bitfield.Bitvector4{0x00},
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
Validators: []*ethpb.Validator{
|
||||
{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MinDepositAmount,
|
||||
},
|
||||
},
|
||||
Balances: []uint64{
|
||||
params.BeaconConfig().MinDepositAmount,
|
||||
},
|
||||
}
|
||||
s, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetValidators([]*ethpb.Validator{}))
|
||||
newState, err := transition.ProcessEpochPrecompute(context.Background(), s)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, uint64(0), newState.Slashings()[2], "Unexpected slashed balance")
|
||||
|
||||
@@ -21,9 +21,9 @@ import (
|
||||
// an already exited validator
|
||||
var ValidatorAlreadyExitedErr = errors.New("validator already exited")
|
||||
|
||||
// ValidatorsMaxExitEpochAndChurn returns the maximum non-FAR_FUTURE_EPOCH exit
|
||||
// MaxExitEpochAndChurn returns the maximum non-FAR_FUTURE_EPOCH exit
|
||||
// epoch and the number of them
|
||||
func ValidatorsMaxExitEpochAndChurn(s state.BeaconState) (maxExitEpoch primitives.Epoch, churn uint64) {
|
||||
func MaxExitEpochAndChurn(s state.BeaconState) (maxExitEpoch primitives.Epoch, churn uint64) {
|
||||
farFutureEpoch := params.BeaconConfig().FarFutureEpoch
|
||||
err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
e := val.ExitEpoch()
|
||||
@@ -82,10 +82,7 @@ func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primiti
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrap(err, "could not get active validator count")
|
||||
}
|
||||
currentChurn, err := helpers.ValidatorChurnLimit(activeValidatorCount)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrap(err, "could not get churn limit")
|
||||
}
|
||||
currentChurn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
|
||||
|
||||
if churn >= currentChurn {
|
||||
exitQueueEpoch, err = exitQueueEpoch.SafeAdd(1)
|
||||
@@ -137,7 +134,7 @@ func SlashValidator(
|
||||
slashedIdx primitives.ValidatorIndex,
|
||||
penaltyQuotient uint64,
|
||||
proposerRewardQuotient uint64) (state.BeaconState, error) {
|
||||
maxExitEpoch, churn := ValidatorsMaxExitEpochAndChurn(s)
|
||||
maxExitEpoch, churn := MaxExitEpochAndChurn(s)
|
||||
s, _, err := InitiateValidatorExit(ctx, s, slashedIdx, maxExitEpoch, churn)
|
||||
if err != nil && !errors.Is(err, ValidatorAlreadyExitedErr) {
|
||||
return nil, errors.Wrapf(err, "could not initiate validator %d exit", slashedIdx)
|
||||
@@ -235,10 +232,7 @@ func ExitedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validato
|
||||
exitQueueChurn++
|
||||
}
|
||||
}
|
||||
churn, err := helpers.ValidatorChurnLimit(activeValidatorCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get churn limit")
|
||||
}
|
||||
churn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
|
||||
if churn < exitQueueChurn {
|
||||
exitQueueEpoch++
|
||||
}
|
||||
@@ -276,10 +270,7 @@ func EjectedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validat
|
||||
exitQueueChurn++
|
||||
}
|
||||
}
|
||||
churn, err := helpers.ValidatorChurnLimit(activeValidatorCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get churn limit")
|
||||
}
|
||||
churn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
|
||||
if churn < exitQueueChurn {
|
||||
exitQueueEpoch++
|
||||
}
|
||||
|
||||
@@ -410,7 +410,7 @@ func TestValidatorMaxExitEpochAndChurn(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.state)
|
||||
require.NoError(t, err)
|
||||
epoch, churn := ValidatorsMaxExitEpochAndChurn(s)
|
||||
epoch, churn := MaxExitEpochAndChurn(s)
|
||||
require.Equal(t, tt.wantedEpoch, epoch)
|
||||
require.Equal(t, tt.wantedChurn, churn)
|
||||
}
|
||||
|
||||
50
beacon-chain/das/BUILD.bazel
Normal file
50
beacon-chain/das/BUILD.bazel
Normal file
@@ -0,0 +1,50 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"availability.go",
|
||||
"blocking.go",
|
||||
"cache.go",
|
||||
"error.go",
|
||||
"iface.go",
|
||||
"mock.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/das",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"availability_test.go",
|
||||
"cache_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
158
beacon-chain/das/availability.go
Normal file
158
beacon-chain/das/availability.go
Normal file
@@ -0,0 +1,158 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type kzgBatch func([][]byte, []*ethpb.BlobSidecar) error
|
||||
|
||||
type LazilyPersistentStore struct {
|
||||
db BlobsDB
|
||||
cache *cache
|
||||
verifyKZG kzgBatch
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &LazilyPersistentStore{}
|
||||
|
||||
func NewLazilyPersistentStore(db BlobsDB) *LazilyPersistentStore {
|
||||
return &LazilyPersistentStore{
|
||||
db: db,
|
||||
cache: newCache(),
|
||||
verifyKZG: kzg.IsDataAvailable,
|
||||
}
|
||||
}
|
||||
|
||||
// PersistOnceCommitted adds blobs to the working blob cache (in-memory or disk backed is an implementation
|
||||
// detail). Blobs stored in this cache will be persisted for at least as long as the node is
|
||||
// running. Once IsDataAvailable succeeds, all blobs referenced by the given block are guaranteed
|
||||
// to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStore) Persist(ctx context.Context, current primitives.Slot, sc ...*ethpb.BlobSidecar) ([]*ethpb.BlobSidecar, error) {
|
||||
if len(sc) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if len(sc) == 1 {
|
||||
|
||||
}
|
||||
var key cacheKey
|
||||
var entry *cacheEntry
|
||||
persisted := make([]*ethpb.BlobSidecar, 0, len(sc))
|
||||
for i := range sc {
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(sc[i].Slot), slots.ToEpoch(current)) {
|
||||
continue
|
||||
}
|
||||
if sc[i].Index >= fieldparams.MaxBlobsPerBlock {
|
||||
log.WithField("block_root", sc[i].BlockRoot).WithField("index", sc[i].Index).Error("discarding BlobSidecar with index >= MaxBlobsPerBlock")
|
||||
continue
|
||||
}
|
||||
skey := keyFromSidecar(sc[i])
|
||||
if key != skey {
|
||||
key = skey
|
||||
entry = s.cache.ensure(key)
|
||||
}
|
||||
if entry.stash(sc[i]) {
|
||||
persisted = append(persisted, sc[i])
|
||||
}
|
||||
}
|
||||
return persisted, nil
|
||||
}
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// - BlobSidecars already in the db are assumed to have been previously verified against the block.
|
||||
// - BlobSidecars waiting for verification in the cache will be persisted to the db after verification.
|
||||
// - When BlobSidecars are written to the db, their cache entries are cleared.
|
||||
// - BlobSidecar cache entries with commitments that do not match the block will be evicted.
|
||||
// - BlobSidecar cachee entries with commitments that fail proof verification will be evicted.
|
||||
func (s *LazilyPersistentStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
blockCommitments := commitmentsToCheck(b, current)
|
||||
if len(blockCommitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
key := keyFromBlock(b)
|
||||
entry := s.cache.ensure(key)
|
||||
// holding the lock over the course of the DA check simplifies everything
|
||||
entry.Lock()
|
||||
defer entry.Unlock()
|
||||
if err := s.daCheck(ctx, b.Root(), blockCommitments, entry); err != nil {
|
||||
return err
|
||||
}
|
||||
// If there is no error, DA has been successful, so we can clean up the cache.
|
||||
s.cache.delete(key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *LazilyPersistentStore) daCheck(ctx context.Context, root [32]byte, blockCommitments [][]byte, entry *cacheEntry) error {
|
||||
sidecars, cacheErr := entry.filter(root, blockCommitments)
|
||||
if cacheErr == nil {
|
||||
if err := s.verifyKZG(blockCommitments, sidecars); err != nil {
|
||||
s.cache.delete(keyFromSidecar(sidecars[0]))
|
||||
return err
|
||||
}
|
||||
// We have all the committed sidecars in cache, and they all have valid proofs.
|
||||
// If flushing them to backing storage succeeds, then we can confirm DA.
|
||||
return s.db.SaveBlobSidecar(ctx, sidecars)
|
||||
}
|
||||
|
||||
// Before returning the cache error, check if we have the data in the db.
|
||||
dbidx, err := s.persisted(ctx, root, entry)
|
||||
// persisted() accounts for db.ErrNotFound, so this is a real database error.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notInDb, err := dbidx.missing(blockCommitments)
|
||||
// This is a database integrity sanity check - it should never fail.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// All commitments were found in the db, due to a previous successful DA check.
|
||||
if len(notInDb) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return cacheErr
|
||||
}
|
||||
|
||||
// persisted populate the db cache, which contains a mapping from Index->KzgCommitment for BlobSidecars previously verified
|
||||
// (proof verification) and saved to the backend.
|
||||
func (s *LazilyPersistentStore) persisted(ctx context.Context, root [32]byte, entry *cacheEntry) (dbidx, error) {
|
||||
if entry.dbidxInitialized() {
|
||||
return entry.dbidx(), nil
|
||||
}
|
||||
sidecars, err := s.db.BlobSidecarsByRoot(ctx, root)
|
||||
if err != nil {
|
||||
if errors.Is(err, db.ErrNotFound) {
|
||||
// No BlobSidecars, initialize with empty idx.
|
||||
return entry.ensureDbidx(), nil
|
||||
}
|
||||
return entry.dbidx(), err
|
||||
}
|
||||
// Ensure all sidecars found in the db are represented in the cache and return the cache value.
|
||||
return entry.ensureDbidx(sidecars...), nil
|
||||
}
|
||||
|
||||
func commitmentsToCheck(b blocks.ROBlock, current primitives.Slot) [][]byte {
|
||||
if b.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(current)) {
|
||||
return nil
|
||||
}
|
||||
kzgCommitments, err := b.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return kzgCommitments
|
||||
}
|
||||
318
beacon-chain/das/availability_test.go
Normal file
318
beacon-chain/das/availability_test.go
Normal file
@@ -0,0 +1,318 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
func Test_commitmentsToCheck(t *testing.T) {
|
||||
windowSlots, err := slots.EpochEnd(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
commits := [][]byte{
|
||||
bytesutil.PadTo([]byte("a"), 48),
|
||||
bytesutil.PadTo([]byte("b"), 48),
|
||||
bytesutil.PadTo([]byte("c"), 48),
|
||||
bytesutil.PadTo([]byte("d"), 48),
|
||||
}
|
||||
cases := []struct {
|
||||
name string
|
||||
commits [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "pre deneb",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
bb := util.NewBeaconBlockBellatrix()
|
||||
sb, err := blocks.NewSignedBeaconBlock(bb)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "commitments within da",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
d := util.NewBeaconBlockDeneb()
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
d.Block.Slot = 100
|
||||
sb, err := blocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
commits: commits,
|
||||
slot: 100,
|
||||
},
|
||||
{
|
||||
name: "commitments outside da",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
d := util.NewBeaconBlockDeneb()
|
||||
// block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
sb, err := blocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
slot: windowSlots + 1,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
b := c.block(t)
|
||||
co := commitmentsToCheck(b, c.slot)
|
||||
require.Equal(t, len(c.commits), len(co))
|
||||
for i := 0; i < len(c.commits); i++ {
|
||||
require.Equal(t, true, bytes.Equal(c.commits[i], co[i]))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func daAlwaysSucceeds(_ [][]byte, _ []*ethpb.BlobSidecar) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type mockDA struct {
|
||||
t *testing.T
|
||||
cmts [][]byte
|
||||
scs []*ethpb.BlobSidecar
|
||||
err error
|
||||
}
|
||||
|
||||
func (m *mockDA) expectedArguments(gotC [][]byte, gotSC []*ethpb.BlobSidecar) error {
|
||||
require.Equal(m.t, len(m.cmts), len(gotC))
|
||||
require.Equal(m.t, len(gotSC), len(m.scs))
|
||||
for i := range m.cmts {
|
||||
require.Equal(m.t, true, bytes.Equal(m.cmts[i], gotC[i]))
|
||||
}
|
||||
for i := range m.scs {
|
||||
require.Equal(m.t, m.scs[i], gotSC[i])
|
||||
}
|
||||
return m.err
|
||||
}
|
||||
|
||||
func TestLazilyPersistent_Missing(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := &mockBlobsDB{}
|
||||
as := NewLazilyPersistentStore(db)
|
||||
|
||||
blk, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
|
||||
expectedCommitments, err := blk.Block().Body().BlobKzgCommitments()
|
||||
require.NoError(t, err)
|
||||
vf := &mockDA{
|
||||
t: t,
|
||||
cmts: expectedCommitments,
|
||||
scs: scs,
|
||||
}
|
||||
as.verifyKZG = vf.expectedArguments
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
_, err = as.Persist(ctx, 1, scs[2])
|
||||
require.NoError(t, err)
|
||||
err = as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NotNil(t, err)
|
||||
missingErr := MissingIndicesError{}
|
||||
require.Equal(t, true, errors.As(err, &missingErr))
|
||||
require.DeepEqual(t, []uint64{0, 1}, missingErr.Missing())
|
||||
|
||||
// All but one persisted, return missing idx
|
||||
_, err = as.Persist(ctx, 1, scs[0])
|
||||
require.NoError(t, err)
|
||||
err = as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, true, errors.As(err, &missingErr))
|
||||
require.DeepEqual(t, []uint64{1}, missingErr.Missing())
|
||||
|
||||
// All persisted, return nil
|
||||
_, err = as.Persist(ctx, 1, scs[1])
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, as.IsDataAvailable(ctx, 1, blk))
|
||||
}
|
||||
|
||||
func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := &mockBlobsDB{}
|
||||
as := NewLazilyPersistentStore(db)
|
||||
|
||||
blk, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
|
||||
vf := &mockDA{
|
||||
t: t,
|
||||
err: errors.New("kzg check should not run"),
|
||||
}
|
||||
as.verifyKZG = vf.expectedArguments
|
||||
scs[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
_, err := as.Persist(ctx, 1, scs[0])
|
||||
require.NoError(t, err)
|
||||
err = as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NotNil(t, err)
|
||||
mismatchErr := CommitmentMismatchError{}
|
||||
require.Equal(t, true, errors.As(err, &mismatchErr))
|
||||
require.DeepEqual(t, []uint64{0}, mismatchErr.Mismatch())
|
||||
|
||||
// the next time we call the DA check, the mismatched commitment should be evicted
|
||||
err = as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NotNil(t, err)
|
||||
missingErr := MissingIndicesError{}
|
||||
require.Equal(t, true, errors.As(err, &missingErr))
|
||||
require.DeepEqual(t, []uint64{0, 1, 2}, missingErr.Missing())
|
||||
}
|
||||
|
||||
func TestPersisted(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
blk, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
db := &mockBlobsDB{
|
||||
BlobSidecarsByRootCallback: func(ctx context.Context, beaconBlockRoot [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||
return scs, nil
|
||||
},
|
||||
}
|
||||
vf := &mockDA{
|
||||
t: t,
|
||||
err: errors.New("kzg check should not run"),
|
||||
}
|
||||
as := NewLazilyPersistentStore(db)
|
||||
as.verifyKZG = vf.expectedArguments
|
||||
entry := &cacheEntry{}
|
||||
dbidx, err := as.persisted(ctx, blk.Root(), entry)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, dbidx[0] == nil)
|
||||
for i := range scs {
|
||||
require.Equal(t, *dbidx[i], bytesutil.ToBytes48(scs[i].KzgCommitment))
|
||||
}
|
||||
|
||||
expectedCommitments, err := blk.Block().Body().BlobKzgCommitments()
|
||||
require.NoError(t, err)
|
||||
missing, err := dbidx.missing(expectedCommitments)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(missing))
|
||||
|
||||
// test that the caching is working by returning the wrong set of sidecars
|
||||
// and making sure that dbidx still thinks none are missing
|
||||
db = &mockBlobsDB{
|
||||
BlobSidecarsByRootCallback: func(ctx context.Context, beaconBlockRoot [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||
return scs[1:], nil
|
||||
},
|
||||
}
|
||||
as = NewLazilyPersistentStore(db)
|
||||
// note, using the same entry value
|
||||
dbidx, err = as.persisted(ctx, blk.Root(), entry)
|
||||
require.NoError(t, err)
|
||||
// same assertions should pass as when all sidecars returned by db
|
||||
missing, err = dbidx.missing(expectedCommitments)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(missing))
|
||||
|
||||
// do it again, but with a fresh cache entry - we should see a missing sidecar
|
||||
newEntry := &cacheEntry{}
|
||||
dbidx, err = as.persisted(ctx, blk.Root(), newEntry)
|
||||
require.NoError(t, err)
|
||||
missing, err = dbidx.missing(expectedCommitments)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(missing))
|
||||
// only element in missing should be the zero index
|
||||
require.Equal(t, uint64(0), missing[0])
|
||||
}
|
||||
|
||||
func TestLazilyPersistent_DBFallback(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
blk, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
// Generate the same sidecars index 0 so we can mess with its commitment
|
||||
_, scscp := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 1)
|
||||
db := &mockBlobsDB{
|
||||
BlobSidecarsByRootCallback: func(ctx context.Context, beaconBlockRoot [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||
return scs, nil
|
||||
},
|
||||
}
|
||||
vf := &mockDA{
|
||||
t: t,
|
||||
err: errors.New("kzg check should not run"),
|
||||
}
|
||||
as := NewLazilyPersistentStore(db)
|
||||
as.verifyKZG = vf.expectedArguments
|
||||
|
||||
// Set up the mismatched commit, but we don't expect this to error because
|
||||
// the db contains the sidecars.
|
||||
scscp[0].BlockRoot = scs[0].BlockRoot
|
||||
scscp[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
|
||||
_, err := as.Persist(ctx, 1, scscp[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
// This should pass since the db is giving us all the right sidecars
|
||||
require.NoError(t, as.IsDataAvailable(ctx, 1, blk))
|
||||
|
||||
// now using an empty db, we should fail
|
||||
as.db = &mockBlobsDB{}
|
||||
|
||||
// but we should have pruned, so we'll get a missing error, not mismatch
|
||||
err = as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NotNil(t, err)
|
||||
missingErr := MissingIndicesError{}
|
||||
require.Equal(t, true, errors.As(err, &missingErr))
|
||||
require.DeepEqual(t, []uint64{0, 1, 2}, missingErr.Missing())
|
||||
|
||||
// put the bad value back in the cache
|
||||
persisted, err := as.Persist(ctx, 1, scscp[0])
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(persisted))
|
||||
// now we'll get a mismatch error
|
||||
err = as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NotNil(t, err)
|
||||
mismatchErr := CommitmentMismatchError{}
|
||||
require.Equal(t, true, errors.As(err, &mismatchErr))
|
||||
require.DeepEqual(t, []uint64{0}, mismatchErr.Mismatch())
|
||||
}
|
||||
|
||||
func TestLazyPersistOnceCommitted(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6)
|
||||
as := NewLazilyPersistentStore(&mockBlobsDB{})
|
||||
// stashes as expected
|
||||
p, err := as.Persist(ctx, 1, scs...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 6, len(p))
|
||||
// ignores duplicates
|
||||
p, err = as.Persist(ctx, 1, scs...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(p))
|
||||
|
||||
// ignores index out of bound
|
||||
scs[0].Index = 6
|
||||
p, err = as.Persist(ctx, 1, scs[0])
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(p))
|
||||
|
||||
_, more := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4)
|
||||
// ignores sidecars before the retention period
|
||||
slotOOB, err := slots.EpochStart(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
p, err = as.Persist(ctx, 32+slotOOB, more[0])
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(p))
|
||||
|
||||
// doesn't ignore new sidecars with a different block root
|
||||
p, err = as.Persist(ctx, 1, more...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 4, len(p))
|
||||
}
|
||||
93
beacon-chain/das/blocking.go
Normal file
93
beacon-chain/das/blocking.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
errors "github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// AsyncStore wraps NewCachingDBVerifiedStore and blocks until IsDataAvailable is ready.
|
||||
// If the context given to IsDataAvailable is cancelled, the result of IsDataAvailable will be ctx.Error().
|
||||
type AsyncStore struct {
|
||||
notif *idxNotifiers
|
||||
s AvailabilityStore
|
||||
}
|
||||
|
||||
func (bs *AsyncStore) PersistOnceCommitted(ctx context.Context, current primitives.Slot, sc ...*ethpb.BlobSidecar) ([]*ethpb.BlobSidecar, error) {
|
||||
if len(sc) < 1 {
|
||||
return nil, nil
|
||||
}
|
||||
seen := bs.notif.ensure(keyFromSidecar(sc[0]))
|
||||
persisted, err := bs.s.Persist(ctx, current, sc...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i := range persisted {
|
||||
seen <- persisted[i].Index
|
||||
}
|
||||
return persisted, nil
|
||||
}
|
||||
|
||||
func (bs *AsyncStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
key := keyFromBlock(b)
|
||||
for {
|
||||
err := bs.s.IsDataAvailable(ctx, current, b)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
mie := MissingIndicesError{}
|
||||
if !errors.As(err, &mie) {
|
||||
return err
|
||||
}
|
||||
waitFor := make(map[uint64]struct{})
|
||||
for _, m := range mie.Missing() {
|
||||
waitFor[m] = struct{}{}
|
||||
}
|
||||
if err := waitForIndices(ctx, bs.notif.ensure(key), waitFor); err != nil {
|
||||
return err
|
||||
}
|
||||
bs.notif.reset(key)
|
||||
}
|
||||
}
|
||||
|
||||
func waitForIndices(ctx context.Context, idxSeen chan uint64, waitFor map[uint64]struct{}) error {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case idx := <-idxSeen:
|
||||
delete(waitFor, idx)
|
||||
if len(waitFor) == 0 {
|
||||
return nil
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type idxNotifiers struct {
|
||||
sync.RWMutex
|
||||
entries map[cacheKey]chan uint64
|
||||
}
|
||||
|
||||
func (c *idxNotifiers) ensure(key cacheKey) chan uint64 {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
e, ok := c.entries[key]
|
||||
if !ok {
|
||||
e = make(chan uint64, fieldparams.MaxBlobsPerBlock)
|
||||
c.entries[key] = e
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
func (c *idxNotifiers) reset(key cacheKey) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
delete(c.entries, key)
|
||||
}
|
||||
184
beacon-chain/das/cache.go
Normal file
184
beacon-chain/das/cache.go
Normal file
@@ -0,0 +1,184 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
errors "github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
errDBCommitmentMismatch = errors.New("blob/block commitment mismatch")
|
||||
)
|
||||
|
||||
// cacheKey includes the slot so that we can easily iterate through the cache and compare
|
||||
// slots for eviction purposes. Whether the input is the block or the sidecar, we always have
|
||||
// the root+slot when interacting with the cache, so it isn't an inconvenience to use both.
|
||||
type cacheKey struct {
|
||||
slot primitives.Slot
|
||||
root [32]byte
|
||||
}
|
||||
|
||||
type cache struct {
|
||||
sync.RWMutex
|
||||
entries map[cacheKey]*cacheEntry
|
||||
}
|
||||
|
||||
func newCache() *cache {
|
||||
return &cache{entries: make(map[cacheKey]*cacheEntry)}
|
||||
}
|
||||
|
||||
// keyFromSidecar is a convenience method for constructing a cacheKey from a BlobSidecar value.
|
||||
func keyFromSidecar(sc *ethpb.BlobSidecar) cacheKey {
|
||||
return cacheKey{slot: sc.Slot, root: bytesutil.ToBytes32(sc.BlockRoot)}
|
||||
}
|
||||
|
||||
// keyFromBlock is a convenience method for constructing a cacheKey from a ROBlock value.
|
||||
func keyFromBlock(b blocks.ROBlock) cacheKey {
|
||||
return cacheKey{slot: b.Block().Slot(), root: b.Root()}
|
||||
}
|
||||
|
||||
// ensure returns the entry for the given key, creating it if it isn't already present.
|
||||
func (c *cache) ensure(key cacheKey) *cacheEntry {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
e, ok := c.entries[key]
|
||||
if !ok {
|
||||
e = &cacheEntry{}
|
||||
c.entries[key] = e
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// delete removes the cache entry from the cache.
|
||||
func (c *cache) delete(key cacheKey) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
delete(c.entries, key)
|
||||
}
|
||||
|
||||
// dbidx is a compact representation of the set of BlobSidecars in the database for a given root,
|
||||
// organized as a map from BlobSidecar.Index->BlobSidecar.KzgCommitment.
|
||||
// This representation is convenient for comparison to a block's commitments.
|
||||
type dbidx [fieldparams.MaxBlobsPerBlock]*[48]byte
|
||||
|
||||
// missing compares the set of BlobSidecars observed in the backing store to the set of commitments
|
||||
// observed in a block - cmts is the BlobKzgCommitments field from a block.
|
||||
func (idx dbidx) missing(cmts [][]byte) ([]uint64, error) {
|
||||
m := make([]uint64, 0, len(cmts))
|
||||
for i := range cmts {
|
||||
if idx[i] == nil {
|
||||
m = append(m, uint64(i))
|
||||
continue
|
||||
}
|
||||
c := *idx[i]
|
||||
if c != bytesutil.ToBytes48(cmts[i]) {
|
||||
return nil, errors.Wrapf(errDBCommitmentMismatch,
|
||||
"index=%d, db=%#x, block=%#x", i, c, cmts[i])
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// cacheEntry represents 2 different types of caches for a given block.
|
||||
// scs is a fixed-length cache of BlobSidecars.
|
||||
// dbx is a compact representation of BlobSidecars observed in the backing store.
|
||||
// dbx assumes that all writes to the backing store go through the same cache.
|
||||
type cacheEntry struct {
|
||||
sync.RWMutex
|
||||
scs [fieldparams.MaxBlobsPerBlock]*ethpb.BlobSidecar
|
||||
dbx dbidx
|
||||
dbRead bool
|
||||
}
|
||||
|
||||
// stash adds an item to the in-memory cache of BlobSidecars.
|
||||
// Only the first BlobSidecar of a given Index will be kept in the cache.
|
||||
// The return value represents whether the given BlobSidecar was stashed.
|
||||
// A false value means there was already a BlobSidecar with the given Index.
|
||||
func (e *cacheEntry) stash(sc *ethpb.BlobSidecar) bool {
|
||||
if e.scs[sc.Index] == nil {
|
||||
e.scs[sc.Index] = sc
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (e *cacheEntry) dbidx() dbidx {
|
||||
return e.dbx
|
||||
}
|
||||
|
||||
func (e *cacheEntry) dbidxInitialized() bool {
|
||||
return e.dbRead
|
||||
}
|
||||
|
||||
// filter evicts sidecars that are not commited to by the block and returns custom
|
||||
// errors if the cache is missing any of the commitments, or if the commitments in
|
||||
// the cache do not match those found in the block. If err is nil, then all expected
|
||||
// commitments were found in the cache and the sidecar slice return value can be used
|
||||
// to perform a DA check against the cached sidecars.
|
||||
func (e *cacheEntry) filter(root [32]byte, blkCmts [][]byte) ([]*ethpb.BlobSidecar, error) {
|
||||
// Evict any blobs that are out of range.
|
||||
for i := len(blkCmts); i < fieldparams.MaxBlobsPerBlock; i++ {
|
||||
if e.scs[i] == nil {
|
||||
continue
|
||||
}
|
||||
log.WithField("block_root", root).
|
||||
WithField("index", i).
|
||||
WithField("cached_commitment", fmt.Sprintf("%#x", e.scs[i].KzgCommitment)).
|
||||
Warn("Evicting BlobSidecar with index > maximum blob commitment")
|
||||
e.scs[i] = nil
|
||||
}
|
||||
// Generate a MissingIndicesError for any missing indices.
|
||||
// Generate a CommitmentMismatchError for any mismatched commitments.
|
||||
missing := make([]uint64, 0, len(blkCmts))
|
||||
mismatch := make([]uint64, 0, len(blkCmts))
|
||||
for i := range blkCmts {
|
||||
if e.scs[i] == nil {
|
||||
missing = append(missing, uint64(i))
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(blkCmts[i], e.scs[i].KzgCommitment) {
|
||||
mismatch = append(mismatch, uint64(i))
|
||||
log.WithField("block_root", root).
|
||||
WithField("index", i).
|
||||
WithField("expected_commitment", fmt.Sprintf("%#x", blkCmts[i])).
|
||||
WithField("cached_commitment", fmt.Sprintf("%#x", e.scs[i].KzgCommitment)).
|
||||
Error("Evicting BlobSidecar with incorrect commitment")
|
||||
e.scs[i] = nil
|
||||
continue
|
||||
}
|
||||
}
|
||||
if len(mismatch) > 0 {
|
||||
return nil, NewCommitmentMismatchError(mismatch)
|
||||
}
|
||||
if len(missing) > 0 {
|
||||
return nil, NewMissingIndicesError(missing)
|
||||
}
|
||||
return e.scs[0:len(blkCmts)], nil
|
||||
}
|
||||
|
||||
// ensureDbidx updates the db cache representation to include the given BlobSidecars.
|
||||
func (e *cacheEntry) ensureDbidx(scs ...*ethpb.BlobSidecar) dbidx {
|
||||
if e.dbRead == false {
|
||||
e.dbRead = true
|
||||
}
|
||||
for i := range scs {
|
||||
if scs[i].Index >= fieldparams.MaxBlobsPerBlock {
|
||||
continue
|
||||
}
|
||||
// Don't overwrite.
|
||||
if e.dbx[scs[i].Index] != nil {
|
||||
continue
|
||||
}
|
||||
c := bytesutil.ToBytes48(scs[i].KzgCommitment)
|
||||
e.dbx[scs[i].Index] = &c
|
||||
}
|
||||
return e.dbx
|
||||
}
|
||||
122
beacon-chain/das/cache_test.go
Normal file
122
beacon-chain/das/cache_test.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestCacheEnsureDelete(t *testing.T) {
|
||||
c := newCache()
|
||||
require.Equal(t, 0, len(c.entries))
|
||||
root := bytesutil.ToBytes32([]byte("root"))
|
||||
slot := primitives.Slot(1234)
|
||||
k := cacheKey{root: root, slot: slot}
|
||||
entry := c.ensure(k)
|
||||
require.Equal(t, 1, len(c.entries))
|
||||
require.Equal(t, c.entries[k], entry)
|
||||
|
||||
c.delete(k)
|
||||
require.Equal(t, 0, len(c.entries))
|
||||
var nilEntry *cacheEntry
|
||||
require.Equal(t, nilEntry, c.entries[k])
|
||||
}
|
||||
|
||||
func TestNewEntry(t *testing.T) {
|
||||
entry := &cacheEntry{}
|
||||
require.Equal(t, false, entry.dbidxInitialized())
|
||||
entry.ensureDbidx()
|
||||
require.Equal(t, true, entry.dbidxInitialized())
|
||||
}
|
||||
|
||||
func TestDbidxBounds(t *testing.T) {
|
||||
scs := generateMinimalBlobSidecars(2)
|
||||
entry := &cacheEntry{}
|
||||
entry.ensureDbidx(scs...)
|
||||
//require.Equal(t, 2, len(entry.dbidx()))
|
||||
for i := range scs {
|
||||
require.Equal(t, bytesutil.ToBytes48(scs[i].KzgCommitment), *entry.dbidx()[i])
|
||||
}
|
||||
|
||||
var nilPtr *[48]byte
|
||||
// test that duplicate sidecars are ignored
|
||||
orig := entry.dbidx()
|
||||
copy(scs[0].KzgCommitment[0:4], []byte("derp"))
|
||||
edited := bytesutil.ToBytes48(scs[0].KzgCommitment)
|
||||
require.Equal(t, false, *entry.dbidx()[0] == edited)
|
||||
entry.ensureDbidx(scs[0])
|
||||
for i := 2; i < fieldparams.MaxBlobsPerBlock; i++ {
|
||||
require.Equal(t, entry.dbidx()[i], nilPtr)
|
||||
}
|
||||
require.Equal(t, entry.dbidx(), orig)
|
||||
|
||||
// test that excess sidecars are discarded
|
||||
oob := generateMinimalBlobSidecars(fieldparams.MaxBlobsPerBlock + 1)
|
||||
entry = &cacheEntry{}
|
||||
entry.ensureDbidx(oob...)
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(entry.dbidx()))
|
||||
}
|
||||
|
||||
func TestDbidxMissing(t *testing.T) {
|
||||
scs := generateMinimalBlobSidecars(6)
|
||||
missing := []uint64{0, 1, 2, 3, 4, 5}
|
||||
blockCommits := commitsForSidecars(scs)
|
||||
cases := []struct {
|
||||
name string
|
||||
nMissing int
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "all missing",
|
||||
nMissing: len(scs),
|
||||
},
|
||||
{
|
||||
name: "none missing",
|
||||
nMissing: 0,
|
||||
},
|
||||
{
|
||||
name: "2 missing",
|
||||
nMissing: 2,
|
||||
},
|
||||
{
|
||||
name: "3 missing",
|
||||
nMissing: 3,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
l := len(scs)
|
||||
entry := &cacheEntry{}
|
||||
d := entry.ensureDbidx(scs[0 : l-c.nMissing]...)
|
||||
m, err := d.missing(blockCommits)
|
||||
if c.err == nil {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.DeepEqual(t, m, missing[l-c.nMissing:])
|
||||
require.Equal(t, c.nMissing, len(m))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func commitsForSidecars(scs []*ethpb.BlobSidecar) [][]byte {
|
||||
m := make([][]byte, len(scs))
|
||||
for i := range scs {
|
||||
m[i] = scs[i].KzgCommitment
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func generateMinimalBlobSidecars(n int) []*ethpb.BlobSidecar {
|
||||
scs := make([]*ethpb.BlobSidecar, n)
|
||||
for i := 0; i < n; i++ {
|
||||
scs[i] = ðpb.BlobSidecar{
|
||||
Index: uint64(i),
|
||||
KzgCommitment: bytesutil.PadTo([]byte{byte(i)}, 48),
|
||||
}
|
||||
}
|
||||
return scs
|
||||
}
|
||||
76
beacon-chain/das/error.go
Normal file
76
beacon-chain/das/error.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
errors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
errDAIncomplete = errors.New("some BlobSidecars are not available at this time")
|
||||
errDAEquivocated = errors.New("cache contains BlobSidecars that do not match block commitments")
|
||||
errMixedRoots = errors.New("BlobSidecars must all be for the same block")
|
||||
)
|
||||
|
||||
// The following errors are exported so that gossip verification can use errors.Is to determine the correct pubsub.ValidationResult.
|
||||
var (
|
||||
// ErrInvalidInclusionProof is returned when the inclusion proof check on the BlobSidecar fails.
|
||||
ErrInvalidInclusionProof = errors.New("BlobSidecar inclusion proof is invalid")
|
||||
// ErrInvalidBlockSignature is returned when the BlobSidecar.SignedBeaconBlockHeader signature cannot be verified against the block root.
|
||||
ErrInvalidBlockSignature = errors.New("SignedBeaconBlockHeader signature could not verified")
|
||||
// ErrInvalidCommitment is returned when the kzg_commitment cannot be verified against the kzg_proof and blob.
|
||||
ErrInvalidCommitment = errors.New("BlobSidecar.kzg_commitment verification failed")
|
||||
)
|
||||
|
||||
func NewMissingIndicesError(missing []uint64) MissingIndicesError {
|
||||
return MissingIndicesError{indices: missing}
|
||||
}
|
||||
|
||||
type MissingIndicesError struct {
|
||||
indices []uint64
|
||||
}
|
||||
|
||||
var _ error = MissingIndicesError{}
|
||||
|
||||
func (m MissingIndicesError) Error() string {
|
||||
is := make([]string, 0, len(m.indices))
|
||||
for i := range m.indices {
|
||||
is = append(is, fmt.Sprintf("%d", m.indices[i]))
|
||||
}
|
||||
return fmt.Sprintf("%s at indices %s", errDAIncomplete.Error(), strings.Join(is, ","))
|
||||
}
|
||||
|
||||
func (m MissingIndicesError) Missing() []uint64 {
|
||||
return m.indices
|
||||
}
|
||||
|
||||
func (m MissingIndicesError) Unwrap() error {
|
||||
return errDAIncomplete
|
||||
}
|
||||
|
||||
func NewCommitmentMismatchError(mismatch []uint64) CommitmentMismatchError {
|
||||
return CommitmentMismatchError{mismatch: mismatch}
|
||||
}
|
||||
|
||||
type CommitmentMismatchError struct {
|
||||
mismatch []uint64
|
||||
}
|
||||
|
||||
var _ error = CommitmentMismatchError{}
|
||||
|
||||
func (m CommitmentMismatchError) Error() string {
|
||||
is := make([]string, 0, len(m.mismatch))
|
||||
for i := range m.mismatch {
|
||||
is = append(is, fmt.Sprintf("%d", m.mismatch[i]))
|
||||
}
|
||||
return fmt.Sprintf("%s at indices %s", errDAEquivocated.Error(), strings.Join(is, ","))
|
||||
}
|
||||
|
||||
func (m CommitmentMismatchError) Mismatch() []uint64 {
|
||||
return m.mismatch
|
||||
}
|
||||
|
||||
func (m CommitmentMismatchError) Unwrap() error {
|
||||
return errDAEquivocated
|
||||
}
|
||||
22
beacon-chain/das/iface.go
Normal file
22
beacon-chain/das/iface.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// BlobsDB specifies the persistence store methods needed by the AvailabilityStore.
|
||||
type BlobsDB interface {
|
||||
BlobSidecarsByRoot(ctx context.Context, beaconBlockRoot [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error)
|
||||
SaveBlobSidecar(ctx context.Context, sidecars []*ethpb.BlobSidecar) error
|
||||
}
|
||||
|
||||
// AvailabilityStore describes a component that can verify and save sidecars for a given block, and confirm previously
|
||||
// verified and saved sidecars.
|
||||
type AvailabilityStore interface {
|
||||
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
Persist(ctx context.Context, current primitives.Slot, sc ...*ethpb.BlobSidecar) ([]*ethpb.BlobSidecar, error)
|
||||
}
|
||||
51
beacon-chain/das/mock.go
Normal file
51
beacon-chain/das/mock.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
type MockAvailabilityStore struct {
|
||||
VerifyAvailabilityCallback func(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
PersistBlobsCallback func(ctx context.Context, current primitives.Slot, sc ...*ethpb.BlobSidecar) ([]*ethpb.BlobSidecar, error)
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &MockAvailabilityStore{}
|
||||
|
||||
func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
if m.VerifyAvailabilityCallback != nil {
|
||||
return m.VerifyAvailabilityCallback(ctx, current, b)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockAvailabilityStore) Persist(ctx context.Context, current primitives.Slot, sc ...*ethpb.BlobSidecar) ([]*ethpb.BlobSidecar, error) {
|
||||
if m.PersistBlobsCallback != nil {
|
||||
return m.PersistBlobsCallback(ctx, current, sc...)
|
||||
}
|
||||
return sc, nil
|
||||
}
|
||||
|
||||
type mockBlobsDB struct {
|
||||
BlobSidecarsByRootCallback func(ctx context.Context, root [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error)
|
||||
SaveBlobSidecarCallback func(ctx context.Context, sidecars []*ethpb.BlobSidecar) error
|
||||
}
|
||||
|
||||
var _ BlobsDB = &mockBlobsDB{}
|
||||
|
||||
func (b *mockBlobsDB) BlobSidecarsByRoot(ctx context.Context, root [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||
if b.BlobSidecarsByRootCallback != nil {
|
||||
return b.BlobSidecarsByRootCallback(ctx, root, indices...)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (b *mockBlobsDB) SaveBlobSidecar(ctx context.Context, sidecars []*ethpb.BlobSidecar) error {
|
||||
if b.SaveBlobSidecarCallback != nil {
|
||||
return b.SaveBlobSidecarCallback(ctx, sidecars)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -13,9 +13,9 @@ func NewDB(ctx context.Context, dirPath string) (Database, error) {
|
||||
return kv.NewKVStore(ctx, dirPath)
|
||||
}
|
||||
|
||||
// NewDBFilename uses the KVStoreDatafilePath so that if this layer of
|
||||
// NewFileName uses the KVStoreDatafilePath so that if this layer of
|
||||
// indirection between db.NewDB->kv.NewKVStore ever changes, it will be easy to remember
|
||||
// to also change this filename indirection at the same time.
|
||||
func NewDBFilename(dirPath string) string {
|
||||
return kv.KVStoreDatafilePath(dirPath)
|
||||
func NewFileName(dirPath string) string {
|
||||
return kv.StoreDatafilePath(dirPath)
|
||||
}
|
||||
|
||||
@@ -96,7 +96,7 @@ type NoHeadAccessDatabase interface {
|
||||
|
||||
// Blob operations.
|
||||
SaveBlobSidecar(ctx context.Context, sidecars []*ethpb.BlobSidecar) error
|
||||
DeleteBlobSidecar(ctx context.Context, beaconBlockRoot [32]byte) error
|
||||
DeleteBlobSidecars(ctx context.Context, beaconBlockRoot [32]byte) error
|
||||
|
||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
|
||||
}
|
||||
@@ -170,7 +170,7 @@ type SlasherDatabase interface {
|
||||
// Database interface with full access.
|
||||
type Database interface {
|
||||
io.Closer
|
||||
backup.BackupExporter
|
||||
backup.Exporter
|
||||
HeadAccessDatabase
|
||||
|
||||
DatabasePath() string
|
||||
|
||||
@@ -13,6 +13,7 @@ go_library(
|
||||
"error.go",
|
||||
"execution_chain.go",
|
||||
"finalized_block_roots.go",
|
||||
"flags.go",
|
||||
"genesis.go",
|
||||
"key.go",
|
||||
"kv.go",
|
||||
@@ -38,6 +39,7 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/genesis:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -65,6 +67,7 @@ go_library(
|
||||
"@com_github_prysmaticlabs_prombbolt//:go_default_library",
|
||||
"@com_github_schollz_progressbar_v3//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@io_etcd_go_bbolt//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
@@ -83,6 +86,7 @@ go_test(
|
||||
"encoding_test.go",
|
||||
"execution_chain_test.go",
|
||||
"finalized_block_roots_test.go",
|
||||
"flags_test.go",
|
||||
"genesis_test.go",
|
||||
"init_test.go",
|
||||
"kv_test.go",
|
||||
@@ -103,6 +107,7 @@ go_test(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/genesis:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -119,6 +124,7 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
|
||||
"@io_etcd_go_bbolt//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
|
||||
@@ -12,11 +12,20 @@ import (
|
||||
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var (
|
||||
errBlobSlotMismatch = errors.New("sidecar slot mismatch")
|
||||
errBlobParentMismatch = errors.New("sidecar parent root mismatch")
|
||||
errBlobRootMismatch = errors.New("sidecar root mismatch")
|
||||
errBlobProposerMismatch = errors.New("sidecar proposer index mismatch")
|
||||
errBlobSidecarLimit = errors.New("sidecar exceeds maximum number of blobs")
|
||||
errEmptySidecar = errors.New("nil or empty blob sidecars")
|
||||
errNewerBlobExists = errors.New("Will not overwrite newer blobs in db")
|
||||
)
|
||||
|
||||
// A blob rotating key is represented as bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
|
||||
type blobRotatingKey []byte
|
||||
|
||||
@@ -39,100 +48,126 @@ func (rk blobRotatingKey) BlockRoot() []byte {
|
||||
|
||||
// SaveBlobSidecar saves the blobs for a given epoch in the sidecar bucket. When we receive a blob:
|
||||
//
|
||||
// 1. Convert slot using a modulo operator to [0, maxSlots] where maxSlots = MAX_BLOB_EPOCHS*SLOTS_PER_EPOCH
|
||||
// 1. Convert slot using a modulo operator to [0, maxSlots] where maxSlots = MAX_EPOCHS_TO_PERSIST_BLOBS*SLOTS_PER_EPOCH
|
||||
//
|
||||
// 2. Compute key for blob as bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
|
||||
//
|
||||
// 3. Begin the save algorithm: If the incoming blob has a slot bigger than the saved slot at the spot
|
||||
// in the rotating keys buffer, we overwrite all elements for that slot. Otherwise, we merge the blob with an existing one.
|
||||
// Trying to replace a newer blob with an older one is an error.
|
||||
func (s *Store) SaveBlobSidecar(ctx context.Context, scs []*ethpb.BlobSidecar) error {
|
||||
if len(scs) == 0 {
|
||||
return errEmptySidecar
|
||||
}
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlobSidecar")
|
||||
defer span.End()
|
||||
|
||||
sortSideCars(scs)
|
||||
if err := s.verifySideCars(scs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
first := scs[0]
|
||||
newKey := blobSidecarKey(first)
|
||||
prefix := newKey.BufferPrefix()
|
||||
var prune []blobRotatingKey
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
encodedBlobSidecar, err := encode(ctx, ðpb.BlobSidecars{Sidecars: scs})
|
||||
var existing []byte
|
||||
sc := ðpb.BlobSidecars{}
|
||||
bkt := tx.Bucket(blobsBucket)
|
||||
c := bkt.Cursor()
|
||||
for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() {
|
||||
key := blobRotatingKey(k)
|
||||
ks := key.Slot()
|
||||
if ks < first.Slot {
|
||||
// Mark older blobs at the same position of the ring buffer for deletion.
|
||||
prune = append(prune, key)
|
||||
continue
|
||||
}
|
||||
if ks > first.Slot {
|
||||
// We shouldn't be overwriting newer blobs with older blobs. Something is wrong.
|
||||
return errNewerBlobExists
|
||||
}
|
||||
// The slot isn't older or newer, so it must be equal.
|
||||
// If the roots match, then we want to merge the new sidecars with the existing data.
|
||||
if bytes.Equal(first.BlockRoot, key.BlockRoot()) {
|
||||
existing = v
|
||||
if err := decode(ctx, v, sc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// If the slot is equal but the roots don't match, leave the existing key alone and allow the sidecar
|
||||
// to be written to the new key with the same prefix. In this case sc will be empty, so it will just
|
||||
// contain the incoming sidecars when we write it.
|
||||
}
|
||||
sc.Sidecars = append(sc.Sidecars, scs...)
|
||||
sortSidecars(sc.Sidecars)
|
||||
var err error
|
||||
sc.Sidecars, err = validUniqueSidecars(sc.Sidecars)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bkt := tx.Bucket(blobsBucket)
|
||||
c := bkt.Cursor()
|
||||
newKey := blobSidecarKey(scs[0])
|
||||
rotatingBufferPrefix := newKey.BufferPrefix()
|
||||
var replacingKey blobRotatingKey
|
||||
for k, _ := c.Seek(rotatingBufferPrefix); bytes.HasPrefix(k, rotatingBufferPrefix); k, _ = c.Next() {
|
||||
if len(k) != 0 {
|
||||
replacingKey = k
|
||||
break
|
||||
encoded, err := encode(ctx, sc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// don't write if the merged result is the same as before
|
||||
if len(existing) == len(encoded) && bytes.Equal(existing, encoded) {
|
||||
return nil
|
||||
}
|
||||
// Only prune if we're actually going through with the update.
|
||||
for _, k := range prune {
|
||||
if err := bkt.Delete(k); err != nil {
|
||||
// note: attempting to delete a key that does not exist should not return an error.
|
||||
log.WithError(err).Warnf("Could not delete blob key %#x.", k)
|
||||
}
|
||||
}
|
||||
// If there is no element stored at blob.slot % MAX_SLOTS_TO_PERSIST_BLOBS, then we simply
|
||||
// store the blob by key and exit early.
|
||||
if len(replacingKey) != 0 {
|
||||
oldSlot := replacingKey.Slot()
|
||||
oldEpoch := slots.ToEpoch(oldSlot)
|
||||
// The blob we are replacing is too old, so we delete it.
|
||||
if slots.ToEpoch(scs[0].Slot) >= oldEpoch.Add(uint64(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest)) {
|
||||
if err := bkt.Delete(replacingKey); err != nil {
|
||||
log.WithError(err).Warnf("Could not delete blob with key %#x", replacingKey)
|
||||
}
|
||||
} else {
|
||||
// Otherwise, we need to merge the new blob with the old blob.
|
||||
enc := bkt.Get(replacingKey)
|
||||
sc := ðpb.BlobSidecars{}
|
||||
if err := decode(ctx, enc, sc); err != nil {
|
||||
return err
|
||||
}
|
||||
sc.Sidecars = append(sc.Sidecars, scs...)
|
||||
sortSideCars(sc.Sidecars)
|
||||
encodedBlobSidecar, err = encode(ctx, ðpb.BlobSidecars{Sidecars: sc.Sidecars})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return bkt.Put(newKey, encodedBlobSidecar)
|
||||
return bkt.Put(newKey, encoded)
|
||||
})
|
||||
}
|
||||
|
||||
// verifySideCars ensures that all sidecars have the same slot, parent root, block root, and proposer index, and no more than MAX_BLOB_EPOCHS.
|
||||
func (s *Store) verifySideCars(scs []*ethpb.BlobSidecar) error {
|
||||
// validUniqueSidecars ensures that all sidecars have the same slot, parent root, block root, and proposer index, and
|
||||
// there are no more than MAX_BLOBS_PER_BLOCK sidecars.
|
||||
func validUniqueSidecars(scs []*ethpb.BlobSidecar) ([]*ethpb.BlobSidecar, error) {
|
||||
if len(scs) == 0 {
|
||||
return errors.New("nil or empty blob sidecars")
|
||||
}
|
||||
if uint64(len(scs)) > fieldparams.MaxBlobsPerBlock {
|
||||
return fmt.Errorf("too many sidecars: %d > %d", len(scs), fieldparams.MaxBlobsPerBlock)
|
||||
return nil, errEmptySidecar
|
||||
}
|
||||
|
||||
sl := scs[0].Slot
|
||||
pr := scs[0].BlockParentRoot
|
||||
r := scs[0].BlockRoot
|
||||
p := scs[0].ProposerIndex
|
||||
|
||||
for _, sc := range scs {
|
||||
if sc.Slot != sl {
|
||||
return fmt.Errorf("sidecar slot mismatch: %d != %d", sc.Slot, sl)
|
||||
}
|
||||
if !bytes.Equal(sc.BlockParentRoot, pr) {
|
||||
return fmt.Errorf("sidecar parent root mismatch: %x != %x", sc.BlockParentRoot, pr)
|
||||
}
|
||||
if !bytes.Equal(sc.BlockRoot, r) {
|
||||
return fmt.Errorf("sidecar root mismatch: %x != %x", sc.BlockRoot, r)
|
||||
}
|
||||
if sc.ProposerIndex != p {
|
||||
return fmt.Errorf("sidecar proposer index mismatch: %d != %d", sc.ProposerIndex, p)
|
||||
}
|
||||
// If there's only 1 sidecar, we've got nothing to compare.
|
||||
if len(scs) == 1 {
|
||||
return scs, nil
|
||||
}
|
||||
return nil
|
||||
|
||||
prev := scs[0]
|
||||
didx := 1
|
||||
for i := 1; i < len(scs); i++ {
|
||||
sc := scs[i]
|
||||
if sc.Slot != prev.Slot {
|
||||
return nil, errors.Wrapf(errBlobSlotMismatch, "%d != %d", sc.Slot, prev.Slot)
|
||||
}
|
||||
if !bytes.Equal(sc.BlockParentRoot, prev.BlockParentRoot) {
|
||||
return nil, errors.Wrapf(errBlobParentMismatch, "%x != %x", sc.BlockParentRoot, prev.BlockParentRoot)
|
||||
}
|
||||
if !bytes.Equal(sc.BlockRoot, prev.BlockRoot) {
|
||||
return nil, errors.Wrapf(errBlobRootMismatch, "%x != %x", sc.BlockRoot, prev.BlockRoot)
|
||||
}
|
||||
if sc.ProposerIndex != prev.ProposerIndex {
|
||||
return nil, errors.Wrapf(errBlobProposerMismatch, "%d != %d", sc.ProposerIndex, prev.ProposerIndex)
|
||||
}
|
||||
// skip duplicate
|
||||
if sc.Index == prev.Index {
|
||||
continue
|
||||
}
|
||||
if didx != i {
|
||||
scs[didx] = scs[i]
|
||||
}
|
||||
prev = scs[i]
|
||||
didx += 1
|
||||
}
|
||||
|
||||
if didx > fieldparams.MaxBlobsPerBlock {
|
||||
return nil, errors.Wrapf(errBlobSidecarLimit, "%d > %d", didx, fieldparams.MaxBlobsPerBlock)
|
||||
}
|
||||
return scs[0:didx], nil
|
||||
}
|
||||
|
||||
// sortSideCars sorts the sidecars by their index.
|
||||
func sortSideCars(scs []*ethpb.BlobSidecar) {
|
||||
// sortSidecars sorts the sidecars by their index.
|
||||
func sortSidecars(scs []*ethpb.BlobSidecar) {
|
||||
sort.Slice(scs, func(i, j int) bool {
|
||||
return scs[i].Index < scs[j].Index
|
||||
})
|
||||
@@ -172,6 +207,37 @@ func (s *Store) BlobSidecarsByRoot(ctx context.Context, root [32]byte, indices .
|
||||
return filterForIndices(sc, indices...)
|
||||
}
|
||||
|
||||
func (s *Store) BlobIndicesAvailable(ctx context.Context, root [32]byte) ([]uint64, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobIndicesAvailable")
|
||||
defer span.End()
|
||||
|
||||
var enc []byte
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
c := tx.Bucket(blobsBucket).Cursor()
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
if bytes.HasSuffix(k, root[:]) {
|
||||
enc = v
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if enc == nil {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
scs := ðpb.BlobSidecars{}
|
||||
if err := decode(ctx, enc, scs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
idxs := make([]uint64, len(scs.Sidecars))
|
||||
for i := range scs.Sidecars {
|
||||
idxs[i] = scs.Sidecars[i].Index
|
||||
}
|
||||
return idxs, nil
|
||||
}
|
||||
|
||||
func filterForIndices(sc *ethpb.BlobSidecars, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||
if len(indices) == 0 {
|
||||
return sc.Sidecars, nil
|
||||
@@ -191,7 +257,7 @@ func filterForIndices(sc *ethpb.BlobSidecars, indices ...uint64) ([]*ethpb.BlobS
|
||||
}
|
||||
|
||||
// BlobSidecarsBySlot retrieves BlobSidecars for the given slot.
|
||||
// If the `indices` argument is omitted, all blobs for the root will be returned.
|
||||
// If the `indices` argument is omitted, all blobs for the slot will be returned.
|
||||
// Otherwise, the result will be filtered to only include the specified indices.
|
||||
// An error will result if an invalid index is specified.
|
||||
// The bucket size is bounded by 131072 entries. That's the most blobs a node will keep before rotating it out.
|
||||
@@ -226,8 +292,8 @@ func (s *Store) BlobSidecarsBySlot(ctx context.Context, slot types.Slot, indices
|
||||
return filterForIndices(sc, indices...)
|
||||
}
|
||||
|
||||
// DeleteBlobSidecar returns true if the blobs are in the db.
|
||||
func (s *Store) DeleteBlobSidecar(ctx context.Context, beaconBlockRoot [32]byte) error {
|
||||
// DeleteBlobSidecars returns true if the blobs are in the db.
|
||||
func (s *Store) DeleteBlobSidecars(ctx context.Context, beaconBlockRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.DeleteBlobSidecar")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
@@ -255,7 +321,6 @@ func blobSidecarKey(blob *ethpb.BlobSidecar) blobRotatingKey {
|
||||
|
||||
func slotKey(slot types.Slot) []byte {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
maxEpochsToPersistBlobs := params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
|
||||
maxSlotsToPersistBlobs := types.Slot(maxEpochsToPersistBlobs.Mul(uint64(slotsPerEpoch)))
|
||||
return bytesutil.SlotToBytesBigEndian(slot.ModSlot(maxSlotsToPersistBlobs))
|
||||
}
|
||||
@@ -265,14 +330,14 @@ func checkEpochsForBlobSidecarsRequestBucket(db *bolt.DB) error {
|
||||
b := tx.Bucket(chainMetadataBucket)
|
||||
v := b.Get(blobRetentionEpochsKey)
|
||||
if v == nil {
|
||||
if err := b.Put(blobRetentionEpochsKey, bytesutil.Uint64ToBytesBigEndian(uint64(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest))); err != nil {
|
||||
if err := b.Put(blobRetentionEpochsKey, bytesutil.Uint64ToBytesBigEndian(uint64(maxEpochsToPersistBlobs))); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
e := bytesutil.BytesToUint64BigEndian(v)
|
||||
if e != uint64(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest) {
|
||||
return fmt.Errorf("epochs for blobs request value in DB %d does not match config value %d", e, params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
if e != uint64(maxEpochsToPersistBlobs) {
|
||||
return fmt.Errorf("epochs for blobs request value in DB %d does not match config value %d", e, maxEpochsToPersistBlobs)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
|
||||
@@ -3,10 +3,13 @@ package kv
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"flag"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -15,6 +18,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assertions"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/urfave/cli/v2"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
@@ -87,7 +91,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
|
||||
// we'll request indices 0 and 3, so make a slice with those indices for comparison
|
||||
expect := make([]*ethpb.BlobSidecar, 2)
|
||||
@@ -104,7 +108,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot), uint64(len(scs)))
|
||||
require.ErrorIs(t, err, ErrNotFound)
|
||||
@@ -123,7 +127,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
@@ -143,7 +147,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
|
||||
// we'll request indices 0 and 3, so make a slice with those indices for comparison
|
||||
expect := make([]*ethpb.BlobSidecar, 2)
|
||||
@@ -161,7 +165,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
|
||||
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot, uint64(len(scs)))
|
||||
require.ErrorIs(t, err, ErrNotFound)
|
||||
@@ -171,11 +175,11 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
require.NoError(t, db.DeleteBlobSidecar(ctx, bytesutil.ToBytes32(scs[0].BlockRoot)))
|
||||
require.NoError(t, db.DeleteBlobSidecars(ctx, bytesutil.ToBytes32(scs[0].BlockRoot)))
|
||||
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.ErrorIs(t, ErrNotFound, err)
|
||||
require.Equal(t, 0, len(got))
|
||||
@@ -294,6 +298,24 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
})
|
||||
t.Run("save equivocating blobs", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock/2)
|
||||
eScs := generateEquivocatingBlobSidecars(t, fieldparams.MaxBlobsPerBlock/2)
|
||||
|
||||
for i, sc := range scs {
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{eScs[i]}))
|
||||
}
|
||||
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
|
||||
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(eScs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(eScs, got))
|
||||
})
|
||||
}
|
||||
|
||||
func generateBlobSidecars(t *testing.T, n uint64) []*ethpb.BlobSidecar {
|
||||
@@ -314,7 +336,6 @@ func generateBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSidecar {
|
||||
kzgProof := make([]byte, 48)
|
||||
_, err = rand.Read(kzgProof)
|
||||
require.NoError(t, err)
|
||||
|
||||
return ðpb.BlobSidecar{
|
||||
BlockRoot: bytesutil.PadTo([]byte{'a'}, 32),
|
||||
Index: index,
|
||||
@@ -327,26 +348,56 @@ func generateBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSidecar {
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_verifySideCars(t *testing.T) {
|
||||
s := setupDB(t)
|
||||
func generateEquivocatingBlobSidecars(t *testing.T, n uint64) []*ethpb.BlobSidecar {
|
||||
blobSidecars := make([]*ethpb.BlobSidecar, n)
|
||||
for i := uint64(0); i < n; i++ {
|
||||
blobSidecars[i] = generateEquivocatingBlobSidecar(t, i)
|
||||
}
|
||||
return blobSidecars
|
||||
}
|
||||
|
||||
func generateEquivocatingBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSidecar {
|
||||
blob := make([]byte, 131072)
|
||||
_, err := rand.Read(blob)
|
||||
require.NoError(t, err)
|
||||
kzgCommitment := make([]byte, 48)
|
||||
_, err = rand.Read(kzgCommitment)
|
||||
require.NoError(t, err)
|
||||
kzgProof := make([]byte, 48)
|
||||
_, err = rand.Read(kzgProof)
|
||||
require.NoError(t, err)
|
||||
|
||||
return ðpb.BlobSidecar{
|
||||
BlockRoot: bytesutil.PadTo([]byte{'c'}, 32),
|
||||
Index: index,
|
||||
Slot: 100,
|
||||
BlockParentRoot: bytesutil.PadTo([]byte{'b'}, 32),
|
||||
ProposerIndex: 102,
|
||||
Blob: blob,
|
||||
KzgCommitment: kzgCommitment,
|
||||
KzgProof: kzgProof,
|
||||
}
|
||||
}
|
||||
|
||||
func Test_validUniqueSidecars_validation(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
scs []*ethpb.BlobSidecar
|
||||
error string
|
||||
name string
|
||||
scs []*ethpb.BlobSidecar
|
||||
err error
|
||||
}{
|
||||
{name: "empty", scs: []*ethpb.BlobSidecar{}, error: "nil or empty blob sidecars"},
|
||||
{name: "too many sidecars", scs: generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock+1), error: "too many sidecars: 7 > 6"},
|
||||
{name: "invalid slot", scs: []*ethpb.BlobSidecar{{Slot: 1}, {Slot: 2}}, error: "sidecar slot mismatch: 2 != 1"},
|
||||
{name: "invalid proposer index", scs: []*ethpb.BlobSidecar{{ProposerIndex: 1}, {ProposerIndex: 2}}, error: "sidecar proposer index mismatch: 2 != 1"},
|
||||
{name: "invalid root", scs: []*ethpb.BlobSidecar{{BlockRoot: []byte{1}}, {BlockRoot: []byte{2}}}, error: "sidecar root mismatch: 02 != 01"},
|
||||
{name: "invalid parent root", scs: []*ethpb.BlobSidecar{{BlockParentRoot: []byte{1}}, {BlockParentRoot: []byte{2}}}, error: "sidecar parent root mismatch: 02 != 01"},
|
||||
{name: "happy path", scs: []*ethpb.BlobSidecar{{Index: 0}, {Index: 1}}, error: ""},
|
||||
{name: "empty", scs: []*ethpb.BlobSidecar{}, err: errEmptySidecar},
|
||||
{name: "too many sidecars", scs: generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock+1), err: errBlobSidecarLimit},
|
||||
{name: "invalid slot", scs: []*ethpb.BlobSidecar{{Slot: 1}, {Slot: 2}}, err: errBlobSlotMismatch},
|
||||
{name: "invalid proposer index", scs: []*ethpb.BlobSidecar{{ProposerIndex: 1}, {ProposerIndex: 2}}, err: errBlobProposerMismatch},
|
||||
{name: "invalid root", scs: []*ethpb.BlobSidecar{{BlockRoot: []byte{1}}, {BlockRoot: []byte{2}}}, err: errBlobRootMismatch},
|
||||
{name: "invalid parent root", scs: []*ethpb.BlobSidecar{{BlockParentRoot: []byte{1}}, {BlockParentRoot: []byte{2}}}, err: errBlobParentMismatch},
|
||||
{name: "happy path", scs: []*ethpb.BlobSidecar{{Index: 0}, {Index: 1}}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := s.verifySideCars(tt.scs)
|
||||
if tt.error != "" {
|
||||
require.Equal(t, tt.error, err.Error())
|
||||
_, err := validUniqueSidecars(tt.scs)
|
||||
if tt.err != nil {
|
||||
require.ErrorIs(t, err, tt.err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -354,6 +405,62 @@ func TestStore_verifySideCars(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_validUniqueSidecars_dedup(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
scs []*ethpb.BlobSidecar
|
||||
expected []*ethpb.BlobSidecar
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "duplicate sidecar",
|
||||
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 1}},
|
||||
expected: []*ethpb.BlobSidecar{{Index: 1}},
|
||||
},
|
||||
{
|
||||
name: "single sidecar",
|
||||
scs: []*ethpb.BlobSidecar{{Index: 1}},
|
||||
expected: []*ethpb.BlobSidecar{{Index: 1}},
|
||||
},
|
||||
{
|
||||
name: "multiple duplicates",
|
||||
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 2}, {Index: 3}, {Index: 3}},
|
||||
expected: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}},
|
||||
},
|
||||
{
|
||||
name: "ok number after de-dupe, > 6 before",
|
||||
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 2}, {Index: 2}, {Index: 2}, {Index: 3}, {Index: 3}},
|
||||
expected: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}},
|
||||
},
|
||||
{
|
||||
name: "max unique, no dupes",
|
||||
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}},
|
||||
expected: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}},
|
||||
},
|
||||
{
|
||||
name: "too many unique",
|
||||
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}, {Index: 7}},
|
||||
err: errBlobSidecarLimit,
|
||||
},
|
||||
{
|
||||
name: "too many unique with dupes",
|
||||
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 1}, {Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}, {Index: 7}},
|
||||
err: errBlobSidecarLimit,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
u, err := validUniqueSidecars(c.scs)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.Equal(t, len(c.expected), len(u))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_sortSidecars(t *testing.T) {
|
||||
scs := []*ethpb.BlobSidecar{
|
||||
{Index: 6},
|
||||
@@ -364,7 +471,7 @@ func TestStore_sortSidecars(t *testing.T) {
|
||||
{Index: 5},
|
||||
{},
|
||||
}
|
||||
sortSideCars(scs)
|
||||
sortSidecars(scs)
|
||||
for i := 0; i < len(scs)-1; i++ {
|
||||
require.Equal(t, uint64(i), scs[i].Index)
|
||||
}
|
||||
@@ -412,9 +519,12 @@ func Test_checkEpochsForBlobSidecarsRequestBucket(t *testing.T) {
|
||||
require.NoError(t, checkEpochsForBlobSidecarsRequestBucket(dbStore.db)) // First write
|
||||
require.NoError(t, checkEpochsForBlobSidecarsRequestBucket(dbStore.db)) // First check
|
||||
|
||||
nConfig := params.BeaconNetworkConfig()
|
||||
nConfig.MinEpochsForBlobsSidecarsRequest = 42069
|
||||
params.OverrideBeaconNetworkConfig(nConfig)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.Uint64(flags.BlobRetentionEpoch.Name, 0, "")
|
||||
require.NoError(t, set.Set(flags.BlobRetentionEpoch.Name, strconv.FormatUint(42069, 10)))
|
||||
cliCtx := cli.NewContext(&cli.App{}, set, nil)
|
||||
require.NoError(t, ConfigureBlobRetentionEpoch(cliCtx))
|
||||
require.ErrorContains(t, "epochs for blobs request value in DB 4096 does not match config value 42069", checkEpochsForBlobSidecarsRequestBucket(dbStore.db))
|
||||
}
|
||||
|
||||
|
||||
@@ -111,10 +111,10 @@ var blockTests = []struct {
|
||||
name: "deneb blind",
|
||||
newBlock: func(slot primitives.Slot, root []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
b := util.NewBlindedBeaconBlockDeneb()
|
||||
b.Block.Slot = slot
|
||||
b.Message.Slot = slot
|
||||
if root != nil {
|
||||
b.Block.ParentRoot = root
|
||||
b.Block.Body.BlobKzgCommitments = [][]byte{
|
||||
b.Message.ParentRoot = root
|
||||
b.Message.Body.BlobKzgCommitments = [][]byte{
|
||||
bytesutil.PadTo([]byte{0x05}, 48),
|
||||
bytesutil.PadTo([]byte{0x06}, 48),
|
||||
bytesutil.PadTo([]byte{0x07}, 48),
|
||||
|
||||
33
beacon-chain/db/kv/flags.go
Normal file
33
beacon-chain/db/kv/flags.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var maxEpochsToPersistBlobs = params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
|
||||
|
||||
// ConfigureBlobRetentionEpoch sets the for blob retention based on command-line context. It sets the local config `maxEpochsToPersistBlobs`.
|
||||
// If the flag is not set, the spec default `MinEpochsForBlobsSidecarsRequest` is used.
|
||||
// An error if the input epoch is smaller than the spec default value.
|
||||
func ConfigureBlobRetentionEpoch(cliCtx *cli.Context) error {
|
||||
// Check if the blob retention epoch flag is set.
|
||||
if cliCtx.IsSet(flags.BlobRetentionEpoch.Name) {
|
||||
// Retrieve and cast the epoch value.
|
||||
epochValue := cliCtx.Uint64(flags.BlobRetentionEpoch.Name)
|
||||
e := primitives.Epoch(epochValue)
|
||||
|
||||
// Validate the epoch value against the spec default.
|
||||
if e < params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest {
|
||||
return fmt.Errorf("%s smaller than spec default, %d < %d", flags.BlobRetentionEpoch.Name, e, params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
}
|
||||
|
||||
maxEpochsToPersistBlobs = e
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user