mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
159 Commits
initialize
...
tracked-va
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
45d2219cf5 | ||
|
|
660b4d7678 | ||
|
|
b3f0194a79 | ||
|
|
be60504512 | ||
|
|
1857496159 | ||
|
|
ccf61e1700 | ||
|
|
4edbd2f9ef | ||
|
|
5179af1438 | ||
|
|
c0f9689e30 | ||
|
|
ff8240a04f | ||
|
|
847498c648 | ||
|
|
2633684339 | ||
|
|
ab3f1963e2 | ||
|
|
b87d02eeb3 | ||
|
|
bcb4155523 | ||
|
|
77f10b9e0e | ||
|
|
928b707ef1 | ||
|
|
91c15247e5 | ||
|
|
5ef5b65ffe | ||
|
|
9ae97786c5 | ||
|
|
66d1bb54f6 | ||
|
|
4d98049054 | ||
|
|
d5ff25b59d | ||
|
|
a265cf08fa | ||
|
|
f2ade3caff | ||
|
|
e6ffc0701e | ||
|
|
61c296e075 | ||
|
|
f264680739 | ||
|
|
8fe024f6a1 | ||
|
|
6b7dd833a3 | ||
|
|
060527032b | ||
|
|
421f7a75e0 | ||
|
|
a29ecb6bbe | ||
|
|
54656e172f | ||
|
|
b6965f0174 | ||
|
|
97c8adb003 | ||
|
|
2fe8614115 | ||
|
|
09accc7132 | ||
|
|
53f1f11c6d | ||
|
|
48fe9d9c4d | ||
|
|
4386c244e1 | ||
|
|
7ac522d8ff | ||
|
|
b98e9c510e | ||
|
|
d4017743d3 | ||
|
|
c85a23251c | ||
|
|
52cf3a155d | ||
|
|
83ed320826 | ||
|
|
0a6457fc3a | ||
|
|
f3458a3f4a | ||
|
|
8da7bdeaa6 | ||
|
|
74b07cf48c | ||
|
|
616cdc1e8b | ||
|
|
361712e886 | ||
|
|
9ec8c6c4b5 | ||
|
|
073cf19b69 | ||
|
|
6ac8090599 | ||
|
|
4aa54107e4 | ||
|
|
ffc443b5f2 | ||
|
|
d6c5692dc0 | ||
|
|
1086bdf2b3 | ||
|
|
2afa63b442 | ||
|
|
5a5193c59d | ||
|
|
c8d3ed02cb | ||
|
|
30fcf5366a | ||
|
|
f776b968ad | ||
|
|
de094b0078 | ||
|
|
0a4ed8279b | ||
|
|
f307a369a5 | ||
|
|
dc91c963b9 | ||
|
|
7238848d81 | ||
|
|
80cafaa6df | ||
|
|
8a0545c3d7 | ||
|
|
9c61117b71 | ||
|
|
6c22edeecc | ||
|
|
57cc4950c0 | ||
|
|
2c981d5564 | ||
|
|
492c8af83f | ||
|
|
e40d2cbd2c | ||
|
|
3fa6d3bd9d | ||
|
|
56f0eb1437 | ||
|
|
7fc5c714a1 | ||
|
|
cfbfccb203 | ||
|
|
884b663455 | ||
|
|
0f1d16c599 | ||
|
|
c11e3392d4 | ||
|
|
f498463843 | ||
|
|
cf4ffc97e2 | ||
|
|
3824e8a463 | ||
|
|
21ca4e008f | ||
|
|
6af44a1466 | ||
|
|
2e29164582 | ||
|
|
6d499bc9fc | ||
|
|
7786cb5684 | ||
|
|
003b70c34b | ||
|
|
71edf96c7d | ||
|
|
ddafedc268 | ||
|
|
7e5738bfcd | ||
|
|
315c05b351 | ||
|
|
3662cf6009 | ||
|
|
98d8b50b0e | ||
|
|
1a1cc25bd1 | ||
|
|
bc9c7193a9 | ||
|
|
7ac3c01b5b | ||
|
|
ed6f69e868 | ||
|
|
222b360c66 | ||
|
|
170a864239 | ||
|
|
b5cfd0d35d | ||
|
|
28181710b0 | ||
|
|
2f756b7ec4 | ||
|
|
df4ca54a76 | ||
|
|
875e3e5e7d | ||
|
|
a5317f8117 | ||
|
|
38b92c0171 | ||
|
|
a03b34af77 | ||
|
|
4c14bd8be2 | ||
|
|
62b8e63a0a | ||
|
|
eec3b0b7fe | ||
|
|
2bffb83a00 | ||
|
|
45fd3eb1bf | ||
|
|
963a1b4cb7 | ||
|
|
77c845043d | ||
|
|
342bb0fcef | ||
|
|
93e6bd7929 | ||
|
|
3015eea4e3 | ||
|
|
2f42f7e313 | ||
|
|
a7c86a6d1b | ||
|
|
2399451869 | ||
|
|
19d9a1915d | ||
|
|
261921ae4c | ||
|
|
6ad8a104dd | ||
|
|
50e53265a1 | ||
|
|
3392fdb21d | ||
|
|
dd3c9652c3 | ||
|
|
022a53f8f2 | ||
|
|
2fa3547644 | ||
|
|
7c213ce161 | ||
|
|
ed3d7d49ec | ||
|
|
068139a78a | ||
|
|
8dd7361b6a | ||
|
|
41ea1d230a | ||
|
|
9e25026519 | ||
|
|
9e9559df60 | ||
|
|
7a5a6c7e54 | ||
|
|
be317c439d | ||
|
|
f43383a3fb | ||
|
|
22f6f787e1 | ||
|
|
44b3986025 | ||
|
|
6cb845660a | ||
|
|
de2c866707 | ||
|
|
74ddb84e0a | ||
|
|
0c6a068fd5 | ||
|
|
fad92472d8 | ||
|
|
2a44e8e6ec | ||
|
|
e3d27f29c7 | ||
|
|
e011f05403 | ||
|
|
b8cd77945d | ||
|
|
9a7f521f8a | ||
|
|
102f94f914 | ||
|
|
0c0a497651 |
7
.github/PULL_REQUEST_TEMPLATE.md
vendored
7
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -10,6 +10,7 @@
|
||||
in review.
|
||||
4. Note that PRs updating dependencies and new Go versions are not accepted.
|
||||
Please file an issue instead.
|
||||
5. A changelog entry is required for user facing issues.
|
||||
-->
|
||||
|
||||
**What type of PR is this?**
|
||||
@@ -28,3 +29,9 @@
|
||||
Fixes #
|
||||
|
||||
**Other notes for review**
|
||||
|
||||
**Acknowledgements**
|
||||
|
||||
- [ ] I have read [CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
|
||||
- [ ] I have made an appropriate entry to [CHANGELOG.md](https://github.com/prysmaticlabs/prysm/blob/develop/CHANGELOG.md).
|
||||
- [ ] I have added a description to this PR with sufficient context for reviewers to understand this PR.
|
||||
|
||||
33
.github/workflows/changelog.yml
vendored
Normal file
33
.github/workflows/changelog.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
|
||||
jobs:
|
||||
changed_files:
|
||||
runs-on: ubuntu-latest
|
||||
name: Check CHANGELOG.md
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: changelog modified
|
||||
id: changelog-modified
|
||||
uses: tj-actions/changed-files@v45
|
||||
with:
|
||||
files: CHANGELOG.md
|
||||
|
||||
- name: List all changed files
|
||||
env:
|
||||
ALL_CHANGED_FILES: ${{ steps.changelog-modified.outputs.all_changed_files }}
|
||||
run: |
|
||||
if [[ ${ALL_CHANGED_FILES[*]} =~ (^|[[:space:]])"CHANGELOG.md"($|[[:space:]]) ]];
|
||||
then
|
||||
echo "CHANGELOG.md was modified.";
|
||||
exit 0;
|
||||
else
|
||||
echo "CHANGELOG.md was not modified.";
|
||||
echo "Please see CHANGELOG.md and follow the instructions to add your changes to that file."
|
||||
echo "In some rare scenarios, a changelog entry is not required and this CI check can be ignored."
|
||||
exit 1;
|
||||
fi
|
||||
22
.github/workflows/go.yml
vendored
22
.github/workflows/go.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Go mod tidy checker
|
||||
id: gomodtidy
|
||||
@@ -27,11 +27,11 @@ jobs:
|
||||
GO111MODULE: on
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up Go 1.22
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.22.3'
|
||||
go-version: '1.22.6'
|
||||
- name: Run Gosec Security Scanner
|
||||
run: | # https://github.com/securego/gosec/issues/469
|
||||
export PATH=$PATH:$(go env GOPATH)/bin
|
||||
@@ -43,16 +43,16 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go 1.22
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.22.3'
|
||||
go-version: '1.22.6'
|
||||
id: go
|
||||
|
||||
- name: Golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
uses: golangci/golangci-lint-action@v5
|
||||
with:
|
||||
version: v1.55.2
|
||||
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
|
||||
@@ -62,13 +62,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.22.3'
|
||||
go-version: '1.22.6'
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Get dependencies
|
||||
run: |
|
||||
|
||||
@@ -6,7 +6,7 @@ run:
|
||||
- proto
|
||||
- tools/analyzers
|
||||
timeout: 10m
|
||||
go: '1.22.3'
|
||||
go: '1.22.6'
|
||||
|
||||
linters:
|
||||
enable-all: true
|
||||
|
||||
@@ -26,7 +26,6 @@ approval_rules:
|
||||
only_changed_files:
|
||||
paths:
|
||||
- "*pb.go"
|
||||
- "*pb.gw.go"
|
||||
- "*.bazel"
|
||||
options:
|
||||
ignore_commits_by:
|
||||
@@ -69,7 +68,6 @@ approval_rules:
|
||||
changed_files:
|
||||
ignore:
|
||||
- "*pb.go"
|
||||
- "*pb.gw.go"
|
||||
- "*.bazel"
|
||||
options:
|
||||
ignore_commits_by:
|
||||
|
||||
@@ -55,13 +55,6 @@ alias(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# Protobuf gRPC gateway compiler
|
||||
alias(
|
||||
name = "grpc_gateway_proto_compiler",
|
||||
actual = "@com_github_grpc_ecosystem_grpc_gateway_v2//protoc-gen-grpc-gateway:go_gen_grpc_gateway",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
gometalinter(
|
||||
name = "gometalinter",
|
||||
config = "//:.gometalinter.json",
|
||||
|
||||
2934
CHANGELOG.md
Normal file
2934
CHANGELOG.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -6,6 +6,9 @@ Excited by our work and want to get involved in building out our sharding releas
|
||||
|
||||
You can explore our [Open Issues](https://github.com/prysmaticlabs/prysm/issues) in-the works for our different releases. Feel free to fork our repo and start creating PR’s after assigning yourself to an issue of interest. We are always chatting on [Discord](https://discord.gg/CTYGPUJ) drop us a line there if you want to get more involved or have any questions on our implementation!
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Please, **do not send pull requests for trivial changes**, such as typos, these will be rejected. These types of pull requests incur a cost to reviewers and do not provide much value to the project. If you are unsure, please open an issue first to discuss the change.
|
||||
|
||||
## Contribution Steps
|
||||
|
||||
**1. Set up Prysm following the instructions in README.md.**
|
||||
@@ -120,15 +123,19 @@ $ git push myrepo feature-in-progress-branch
|
||||
|
||||
Navigate to your fork of the repo on GitHub. On the upper left where the current branch is listed, change the branch to your feature-in-progress-branch. Open the files that you have worked on and check to make sure they include your changes.
|
||||
|
||||
**16. Create a pull request.**
|
||||
**16. Add an entry to CHANGELOG.md.**
|
||||
|
||||
Navigate your browser to https://github.com/prysmaticlabs/prysm and click on the new pull request button. In the “base” box on the left, leave the default selection “base master”, the branch that you want your changes to be applied to. In the “compare” box on the right, select feature-in-progress-branch, the branch containing the changes you want to apply. You will then be asked to answer a few questions about your pull request. After you complete the questionnaire, the pull request will appear in the list of pull requests at https://github.com/prysmaticlabs/prysm/pulls.
|
||||
If your change is user facing, you must include a CHANGELOG.md entry. See the [Maintaining CHANGELOG.md](#maintaining-changelogmd) section for more information.
|
||||
|
||||
**17. Respond to comments by Core Contributors.**
|
||||
**17. Create a pull request.**
|
||||
|
||||
Navigate your browser to https://github.com/prysmaticlabs/prysm and click on the new pull request button. In the “base” box on the left, leave the default selection “base master”, the branch that you want your changes to be applied to. In the “compare” box on the right, select feature-in-progress-branch, the branch containing the changes you want to apply. You will then be asked to answer a few questions about your pull request. After you complete the questionnaire, the pull request will appear in the list of pull requests at https://github.com/prysmaticlabs/prysm/pulls. Ensure that you have added an entry to CHANGELOG.md if your PR is a user-facing change. See the [Maintaining CHANGELOG.md](#maintaining-changelogmd) section for more information.
|
||||
|
||||
**18. Respond to comments by Core Contributors.**
|
||||
|
||||
Core Contributors may ask questions and request that you make edits. If you set notifications at the top of the page to “not watching,” you will still be notified by email whenever someone comments on the page of a pull request you have created. If you are asked to modify your pull request, repeat steps 8 through 15, then leave a comment to notify the Core Contributors that the pull request is ready for further review.
|
||||
|
||||
**18. If the number of commits becomes excessive, you may be asked to squash your commits.**
|
||||
**19. If the number of commits becomes excessive, you may be asked to squash your commits.**
|
||||
|
||||
You can do this with an interactive rebase. Start by running the following command to determine the commit that is the base of your branch...
|
||||
|
||||
@@ -136,7 +143,7 @@ Core Contributors may ask questions and request that you make edits. If you set
|
||||
$ git merge-base feature-in-progress-branch prysm/master
|
||||
```
|
||||
|
||||
**19. The previous command will return a commit-hash that you should use in the following command.**
|
||||
**20. The previous command will return a commit-hash that you should use in the following command.**
|
||||
|
||||
```
|
||||
$ git rebase -i commit-hash
|
||||
@@ -160,13 +167,30 @@ squash hash add a feature
|
||||
|
||||
Save and close the file, then a commit command will appear in the terminal that squashes the smaller commits into one. Check to be sure the commit message accurately reflects your changes and then hit enter to execute it.
|
||||
|
||||
**20. Update your pull request with the following command.**
|
||||
**21. Update your pull request with the following command.**
|
||||
|
||||
```
|
||||
$ git push myrepo feature-in-progress-branch -f
|
||||
```
|
||||
|
||||
**21. Finally, again leave a comment to the Core Contributors on the pull request to let them know that the pull request has been updated.**
|
||||
**22. Finally, again leave a comment to the Core Contributors on the pull request to let them know that the pull request has been updated.**
|
||||
|
||||
## Maintaining CHANGELOG.md
|
||||
|
||||
This project follows the changelog guidelines from [keepachangelog.com](https://keepachangelog.com/en/1.1.0/).
|
||||
|
||||
All PRs with user facing changes should have an entry in the CHANGELOG.md file and the change should be categorized in the appropriate category within the "Unreleased" section. The categories are:
|
||||
|
||||
- `Added` for new features.
|
||||
- `Changed` for changes in existing functionality.
|
||||
- `Deprecated` for soon-to-be removed features.
|
||||
- `Removed` for now removed features.
|
||||
- `Fixed` for any bug fixes.
|
||||
- `Security` in case of vulnerabilities. Please see the [Security Policy](SECURITY.md) for responsible disclosure before adding a change with this category.
|
||||
|
||||
### Releasing
|
||||
|
||||
When a new release is made, the "Unreleased" section should be moved to a new section with the release version and the current date. Then a new "Unreleased" section is made at the top of the file with the categories listed above.
|
||||
|
||||
## Contributor Responsibilities
|
||||
|
||||
|
||||
34
WORKSPACE
34
WORKSPACE
@@ -101,9 +101,9 @@ http_archive(
|
||||
|
||||
http_archive(
|
||||
name = "aspect_bazel_lib",
|
||||
sha256 = "f5ea76682b209cc0bd90d0f5a3b26d2f7a6a2885f0c5f615e72913f4805dbb0d",
|
||||
strip_prefix = "bazel-lib-2.5.0",
|
||||
url = "https://github.com/aspect-build/bazel-lib/releases/download/v2.5.0/bazel-lib-v2.5.0.tar.gz",
|
||||
sha256 = "a272d79bb0ac6b6965aa199b1f84333413452e87f043b53eca7f347a23a478e8",
|
||||
strip_prefix = "bazel-lib-2.9.3",
|
||||
url = "https://github.com/bazel-contrib/bazel-lib/releases/download/v2.9.3/bazel-lib-v2.9.3.tar.gz",
|
||||
)
|
||||
|
||||
load("@aspect_bazel_lib//lib:repositories.bzl", "aspect_bazel_lib_dependencies", "aspect_bazel_lib_register_toolchains")
|
||||
@@ -165,7 +165,7 @@ load("@rules_oci//oci:pull.bzl", "oci_pull")
|
||||
oci_pull(
|
||||
name = "linux_debian11_multiarch_base", # Debian bullseye
|
||||
digest = "sha256:b82f113425c5b5c714151aaacd8039bc141821cdcd3c65202d42bdf9c43ae60b", # 2023-12-12
|
||||
image = "gcr.io/distroless/cc-debian11",
|
||||
image = "gcr.io/prysmaticlabs/distroless/cc-debian11",
|
||||
platforms = [
|
||||
"linux/amd64",
|
||||
"linux/arm64/v8",
|
||||
@@ -227,7 +227,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.5.0-alpha.3"
|
||||
consensus_spec_version = "v1.5.0-alpha.8"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -243,7 +243,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-+byv+GUOQytex5GgtjBGVoNDseJZbiBdAjEtlgCbjEo=",
|
||||
integrity = "sha256-BsGIbEyJuYrzhShGl0tHhR4lP5Qwno8R3k8a6YBR/DA=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -259,7 +259,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-JJUy/jT1h3kGQkinTuzL7gMOA1+qgmPgJXVrYuH63Cg=",
|
||||
integrity = "sha256-DkdvhPP2KiqUOpwFXQIFDCWCwsUDIC/xhTBD+TZevm0=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -275,7 +275,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-T2VM4Qd0SwgGnTjWxjOX297DqEsovO9Ueij1UEJy48Y=",
|
||||
integrity = "sha256-vkZqV0HB8A2Uc56C1Us/p5G57iaHL+zw2No93Xt6M/4=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -290,7 +290,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-OP9BCBcQ7i+93bwj7ktY8pZ5uWsGjgTe4XTp7BDhX+I=",
|
||||
integrity = "sha256-D/HPAW61lKqjoWwl7N0XvhdX+67dCEFAy8JxVzqBGtU=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
@@ -342,6 +342,22 @@ filegroup(
|
||||
url = "https://github.com/eth-clients/holesky/archive/874c199423ccd180607320c38cbaca05d9a1573a.tar.gz", # 2024-06-18
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "sepolia_testnet",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "configs",
|
||||
srcs = [
|
||||
"metadata/config.yaml",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-cY/UgpCcYEhQf7JefD65FI8tn/A+rAvKhcm2/qiVdqY=",
|
||||
strip_prefix = "sepolia-f2c219a93c4491cee3d90c18f2f8e82aed850eab",
|
||||
url = "https://github.com/eth-clients/sepolia/archive/f2c219a93c4491cee3d90c18f2f8e82aed850eab.tar.gz", # 2024-09-19
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "com_google_protobuf",
|
||||
sha256 = "9bd87b8280ef720d3240514f884e56a712f2218f0d693b48050c836028940a42",
|
||||
|
||||
@@ -21,6 +21,7 @@ go_library(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
@@ -28,7 +29,6 @@ go_library(
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -19,11 +19,11 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -146,7 +146,7 @@ func (c *Client) do(ctx context.Context, method string, path string, body io.Rea
|
||||
|
||||
u := c.baseURL.ResolveReference(&url.URL{Path: path})
|
||||
|
||||
span.AddAttributes(trace.StringAttribute("url", u.String()),
|
||||
span.SetAttributes(trace.StringAttribute("url", u.String()),
|
||||
trace.StringAttribute("method", method))
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, method, u.String(), body)
|
||||
@@ -259,7 +259,7 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
|
||||
func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.client.RegisterValidator")
|
||||
defer span.End()
|
||||
span.AddAttributes(trace.Int64Attribute("num_reqs", int64(len(svr))))
|
||||
span.SetAttributes(trace.Int64Attribute("num_reqs", int64(len(svr))))
|
||||
|
||||
if len(svr) == 0 {
|
||||
err := errors.Wrap(errMalformedRequest, "empty validator registration list")
|
||||
@@ -278,7 +278,11 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
|
||||
}
|
||||
|
||||
_, err = c.do(ctx, http.MethodPost, postRegisterValidatorPath, bytes.NewBuffer(body))
|
||||
return err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.WithField("num_registrations", len(svr)).Info("successfully registered validator(s) on builder")
|
||||
return nil
|
||||
}
|
||||
|
||||
var errResponseVersionMismatch = errors.New("builder API response uses a different version than requested in " + api.VersionHeader + " header")
|
||||
|
||||
@@ -93,6 +93,7 @@ func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
|
||||
EventType: EventConnectionError,
|
||||
Data: []byte(errors.Wrap(err, client.ErrConnectionIssue.Error()).Error()),
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
|
||||
@@ -40,7 +40,7 @@ func TestNewEventStream(t *testing.T) {
|
||||
|
||||
func TestEventStream(t *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/eth/v1/events", func(w http.ResponseWriter, r *http.Request) {
|
||||
mux.HandleFunc("/eth/v1/events", func(w http.ResponseWriter, _ *http.Request) {
|
||||
flusher, ok := w.(http.Flusher)
|
||||
require.Equal(t, true, ok)
|
||||
for i := 1; i <= 3; i++ {
|
||||
@@ -79,3 +79,23 @@ func TestEventStream(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEventStreamRequestError(t *testing.T) {
|
||||
topics := []string{"head"}
|
||||
eventsChannel := make(chan *Event, 1)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// use valid url that will result in failed request with nil body
|
||||
stream, err := NewEventStream(ctx, http.DefaultClient, "http://badhost:1234", topics)
|
||||
require.NoError(t, err)
|
||||
|
||||
// error will happen when request is made, should be received over events channel
|
||||
go stream.Subscribe(eventsChannel)
|
||||
|
||||
event := <-eventsChannel
|
||||
if event.EventType != EventConnectionError {
|
||||
t.Errorf("Expected event type %q, got %q", EventConnectionError, event.EventType)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"gateway.go",
|
||||
"log.go",
|
||||
"modifiers.go",
|
||||
"options.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/gateway",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//validator:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//api/server/middleware:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//connectivity:go_default_library",
|
||||
"@org_golang_google_grpc//credentials:go_default_library",
|
||||
"@org_golang_google_grpc//credentials/insecure:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["gateway_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,212 +0,0 @@
|
||||
// Package gateway defines a grpc-gateway server that serves HTTP-JSON traffic and acts a proxy between HTTP and gRPC.
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/middleware"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
var _ runtime.Service = (*Gateway)(nil)
|
||||
|
||||
// PbMux serves grpc-gateway requests for selected patterns using registered protobuf handlers.
|
||||
type PbMux struct {
|
||||
Registrations []PbHandlerRegistration // Protobuf registrations to be registered in Mux.
|
||||
Patterns []string // URL patterns that will be handled by Mux.
|
||||
Mux *gwruntime.ServeMux // The router that will be used for grpc-gateway requests.
|
||||
}
|
||||
|
||||
// PbHandlerRegistration is a function that registers a protobuf handler.
|
||||
type PbHandlerRegistration func(context.Context, *gwruntime.ServeMux, *grpc.ClientConn) error
|
||||
|
||||
// MuxHandler is a function that implements the mux handler functionality.
|
||||
type MuxHandler func(
|
||||
h http.HandlerFunc,
|
||||
w http.ResponseWriter,
|
||||
req *http.Request,
|
||||
)
|
||||
|
||||
// Config parameters for setting up the gateway service.
|
||||
type config struct {
|
||||
maxCallRecvMsgSize uint64
|
||||
remoteCert string
|
||||
gatewayAddr string
|
||||
remoteAddr string
|
||||
allowedOrigins []string
|
||||
muxHandler MuxHandler
|
||||
pbHandlers []*PbMux
|
||||
router *mux.Router
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// Gateway is the gRPC gateway to serve HTTP JSON traffic as a proxy and forward it to the gRPC server.
|
||||
type Gateway struct {
|
||||
cfg *config
|
||||
conn *grpc.ClientConn
|
||||
server *http.Server
|
||||
cancel context.CancelFunc
|
||||
ctx context.Context
|
||||
startFailure error
|
||||
}
|
||||
|
||||
// New returns a new instance of the Gateway.
|
||||
func New(ctx context.Context, opts ...Option) (*Gateway, error) {
|
||||
g := &Gateway{
|
||||
ctx: ctx,
|
||||
cfg: &config{},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if err := opt(g); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if g.cfg.router == nil {
|
||||
g.cfg.router = mux.NewRouter()
|
||||
}
|
||||
return g, nil
|
||||
}
|
||||
|
||||
// Start the gateway service.
|
||||
func (g *Gateway) Start() {
|
||||
ctx, cancel := context.WithCancel(g.ctx)
|
||||
g.cancel = cancel
|
||||
|
||||
conn, err := g.dial(ctx, "tcp", g.cfg.remoteAddr)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to connect to gRPC server")
|
||||
g.startFailure = err
|
||||
return
|
||||
}
|
||||
g.conn = conn
|
||||
|
||||
for _, h := range g.cfg.pbHandlers {
|
||||
for _, r := range h.Registrations {
|
||||
if err := r(ctx, h.Mux, g.conn); err != nil {
|
||||
log.WithError(err).Error("Failed to register handler")
|
||||
g.startFailure = err
|
||||
return
|
||||
}
|
||||
}
|
||||
for _, p := range h.Patterns {
|
||||
g.cfg.router.PathPrefix(p).Handler(h.Mux)
|
||||
}
|
||||
}
|
||||
|
||||
corsMux := middleware.CorsHandler(g.cfg.allowedOrigins).Middleware(g.cfg.router)
|
||||
|
||||
if g.cfg.muxHandler != nil {
|
||||
g.cfg.router.PathPrefix("/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
g.cfg.muxHandler(corsMux.ServeHTTP, w, r)
|
||||
})
|
||||
}
|
||||
|
||||
g.server = &http.Server{
|
||||
Addr: g.cfg.gatewayAddr,
|
||||
Handler: corsMux,
|
||||
ReadHeaderTimeout: time.Second,
|
||||
}
|
||||
|
||||
go func() {
|
||||
log.WithField("address", g.cfg.gatewayAddr).Info("Starting gRPC gateway")
|
||||
if err := g.server.ListenAndServe(); err != http.ErrServerClosed {
|
||||
log.WithError(err).Error("Failed to start gRPC gateway")
|
||||
g.startFailure = err
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Status of grpc gateway. Returns an error if this service is unhealthy.
|
||||
func (g *Gateway) Status() error {
|
||||
if g.startFailure != nil {
|
||||
return g.startFailure
|
||||
}
|
||||
if s := g.conn.GetState(); s != connectivity.Ready {
|
||||
return fmt.Errorf("grpc server is %s", s)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop the gateway with a graceful shutdown.
|
||||
func (g *Gateway) Stop() error {
|
||||
if g.server != nil {
|
||||
shutdownCtx, shutdownCancel := context.WithTimeout(g.ctx, 2*time.Second)
|
||||
defer shutdownCancel()
|
||||
if err := g.server.Shutdown(shutdownCtx); err != nil {
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
log.Warn("Existing connections terminated")
|
||||
} else {
|
||||
log.WithError(err).Error("Failed to gracefully shut down server")
|
||||
}
|
||||
}
|
||||
}
|
||||
if g.cancel != nil {
|
||||
g.cancel()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// dial the gRPC server.
|
||||
func (g *Gateway) dial(ctx context.Context, network, addr string) (*grpc.ClientConn, error) {
|
||||
switch network {
|
||||
case "tcp":
|
||||
return g.dialTCP(ctx, addr)
|
||||
case "unix":
|
||||
return g.dialUnix(ctx, addr)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported network type %q", network)
|
||||
}
|
||||
}
|
||||
|
||||
// dialTCP creates a client connection via TCP.
|
||||
// "addr" must be a valid TCP address with a port number.
|
||||
func (g *Gateway) dialTCP(ctx context.Context, addr string) (*grpc.ClientConn, error) {
|
||||
var security grpc.DialOption
|
||||
if len(g.cfg.remoteCert) > 0 {
|
||||
creds, err := credentials.NewClientTLSFromFile(g.cfg.remoteCert, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
security = grpc.WithTransportCredentials(creds)
|
||||
} else {
|
||||
// Use insecure credentials when there's no remote cert provided.
|
||||
security = grpc.WithTransportCredentials(insecure.NewCredentials())
|
||||
}
|
||||
opts := []grpc.DialOption{
|
||||
security,
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(g.cfg.maxCallRecvMsgSize))),
|
||||
}
|
||||
return grpc.DialContext(ctx, addr, opts...)
|
||||
}
|
||||
|
||||
// dialUnix creates a client connection via a unix domain socket.
|
||||
// "addr" must be a valid path to the socket.
|
||||
func (g *Gateway) dialUnix(ctx context.Context, addr string) (*grpc.ClientConn, error) {
|
||||
d := func(addr string, timeout time.Duration) (net.Conn, error) {
|
||||
return net.DialTimeout("unix", addr, timeout)
|
||||
}
|
||||
f := func(ctx context.Context, addr string) (net.Conn, error) {
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
return d(addr, time.Until(deadline))
|
||||
}
|
||||
return d(addr, 0)
|
||||
}
|
||||
opts := []grpc.DialOption{
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithContextDialer(f),
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(g.cfg.maxCallRecvMsgSize))),
|
||||
}
|
||||
return grpc.DialContext(ctx, addr, opts...)
|
||||
}
|
||||
@@ -1,107 +0,0 @@
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func TestGateway_Customized(t *testing.T) {
|
||||
r := mux.NewRouter()
|
||||
cert := "cert"
|
||||
origins := []string{"origin"}
|
||||
size := uint64(100)
|
||||
|
||||
opts := []Option{
|
||||
WithRouter(r),
|
||||
WithRemoteCert(cert),
|
||||
WithAllowedOrigins(origins),
|
||||
WithMaxCallRecvMsgSize(size),
|
||||
WithMuxHandler(func(
|
||||
_ http.HandlerFunc,
|
||||
_ http.ResponseWriter,
|
||||
_ *http.Request,
|
||||
) {
|
||||
}),
|
||||
}
|
||||
|
||||
g, err := New(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, r, g.cfg.router)
|
||||
assert.Equal(t, cert, g.cfg.remoteCert)
|
||||
require.Equal(t, 1, len(g.cfg.allowedOrigins))
|
||||
assert.Equal(t, origins[0], g.cfg.allowedOrigins[0])
|
||||
assert.Equal(t, size, g.cfg.maxCallRecvMsgSize)
|
||||
}
|
||||
|
||||
func TestGateway_StartStop(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
ctx := cli.NewContext(&app, set, nil)
|
||||
|
||||
gatewayPort := ctx.Int(flags.GRPCGatewayPort.Name)
|
||||
gatewayHost := ctx.String(flags.GRPCGatewayHost.Name)
|
||||
rpcHost := ctx.String(flags.RPCHost.Name)
|
||||
selfAddress := fmt.Sprintf("%s:%d", rpcHost, ctx.Int(flags.RPCPort.Name))
|
||||
gatewayAddress := fmt.Sprintf("%s:%d", gatewayHost, gatewayPort)
|
||||
|
||||
opts := []Option{
|
||||
WithGatewayAddr(gatewayAddress),
|
||||
WithRemoteAddr(selfAddress),
|
||||
WithMuxHandler(func(
|
||||
_ http.HandlerFunc,
|
||||
_ http.ResponseWriter,
|
||||
_ *http.Request,
|
||||
) {
|
||||
}),
|
||||
}
|
||||
|
||||
g, err := New(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
g.Start()
|
||||
go func() {
|
||||
require.LogsContain(t, hook, "Starting gRPC gateway")
|
||||
require.LogsDoNotContain(t, hook, "Starting API middleware")
|
||||
}()
|
||||
err = g.Stop()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGateway_NilHandler_NotFoundHandlerRegistered(t *testing.T) {
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
ctx := cli.NewContext(&app, set, nil)
|
||||
|
||||
gatewayPort := ctx.Int(flags.GRPCGatewayPort.Name)
|
||||
gatewayHost := ctx.String(flags.GRPCGatewayHost.Name)
|
||||
rpcHost := ctx.String(flags.RPCHost.Name)
|
||||
selfAddress := fmt.Sprintf("%s:%d", rpcHost, ctx.Int(flags.RPCPort.Name))
|
||||
gatewayAddress := fmt.Sprintf("%s:%d", gatewayHost, gatewayPort)
|
||||
|
||||
opts := []Option{
|
||||
WithGatewayAddr(gatewayAddress),
|
||||
WithRemoteAddr(selfAddress),
|
||||
}
|
||||
|
||||
g, err := New(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
writer := httptest.NewRecorder()
|
||||
g.cfg.router.ServeHTTP(writer, &http.Request{Method: "GET", Host: "localhost", URL: &url.URL{Path: "/foo"}})
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code)
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
package gateway
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "gateway")
|
||||
@@ -1,30 +0,0 @@
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func HttpResponseModifier(ctx context.Context, w http.ResponseWriter, _ proto.Message) error {
|
||||
md, ok := gwruntime.ServerMetadataFromContext(ctx)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
// set http status code
|
||||
if vals := md.HeaderMD.Get("x-http-code"); len(vals) > 0 {
|
||||
code, err := strconv.Atoi(vals[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// delete the headers to not expose any grpc-metadata in http response
|
||||
delete(md.HeaderMD, "x-http-code")
|
||||
delete(w.Header(), "Grpc-Metadata-X-Http-Code")
|
||||
w.WriteHeader(code)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
)
|
||||
|
||||
type Option func(g *Gateway) error
|
||||
|
||||
func WithPbHandlers(handlers []*PbMux) Option {
|
||||
return func(g *Gateway) error {
|
||||
g.cfg.pbHandlers = handlers
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithMuxHandler(m MuxHandler) Option {
|
||||
return func(g *Gateway) error {
|
||||
g.cfg.muxHandler = m
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithGatewayAddr(addr string) Option {
|
||||
return func(g *Gateway) error {
|
||||
g.cfg.gatewayAddr = addr
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithRemoteAddr(addr string) Option {
|
||||
return func(g *Gateway) error {
|
||||
g.cfg.remoteAddr = addr
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithRouter allows adding a custom mux router to the gateway.
|
||||
func WithRouter(r *mux.Router) Option {
|
||||
return func(g *Gateway) error {
|
||||
g.cfg.router = r
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithAllowedOrigins allows adding a set of allowed origins to the gateway.
|
||||
func WithAllowedOrigins(origins []string) Option {
|
||||
return func(g *Gateway) error {
|
||||
g.cfg.allowedOrigins = origins
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithRemoteCert allows adding a custom certificate to the gateway,
|
||||
func WithRemoteCert(cert string) Option {
|
||||
return func(g *Gateway) error {
|
||||
g.cfg.remoteCert = cert
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxCallRecvMsgSize allows specifying the maximum allowed gRPC message size.
|
||||
func WithMaxCallRecvMsgSize(size uint64) Option {
|
||||
return func(g *Gateway) error {
|
||||
g.cfg.maxCallRecvMsgSize = size
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithTimeout allows changing the timeout value for API calls.
|
||||
func WithTimeout(seconds uint64) Option {
|
||||
return func(g *Gateway) error {
|
||||
g.cfg.timeout = time.Second * time.Duration(seconds)
|
||||
gwruntime.DefaultContextTimeout = time.Second * time.Duration(seconds)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -22,9 +22,7 @@ go_test(
|
||||
deps = [
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -2,8 +2,6 @@ package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -81,16 +79,3 @@ func AppendHeaders(parent context.Context, headers []string) context.Context {
|
||||
}
|
||||
return parent
|
||||
}
|
||||
|
||||
// AppendCustomErrorHeader sets a CustomErrorMetadataKey gRPC header on the passed in context,
|
||||
// using the passed in error data as the header's value. The data is serialized as JSON.
|
||||
func AppendCustomErrorHeader(ctx context.Context, errorData interface{}) error {
|
||||
j, err := json.Marshal(errorData)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not marshal error data into JSON: %w", err)
|
||||
}
|
||||
if err := grpc.SetHeader(ctx, metadata.Pairs(CustomErrorMetadataKey, string(j))); err != nil {
|
||||
return fmt.Errorf("could not set custom error header: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,15 +2,11 @@ package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
@@ -62,17 +58,3 @@ func TestAppendHeaders(t *testing.T) {
|
||||
assert.Equal(t, "value=1", md.Get("first")[0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestAppendCustomErrorHeader(t *testing.T) {
|
||||
stream := &runtime.ServerTransportStream{}
|
||||
ctx := grpc.NewContextWithServerTransportStream(context.Background(), stream)
|
||||
data := &customErrorData{Message: "foo"}
|
||||
require.NoError(t, AppendCustomErrorHeader(ctx, data))
|
||||
// The stream used in test setup sets the metadata key in lowercase.
|
||||
value, ok := stream.Header()[strings.ToLower(CustomErrorMetadataKey)]
|
||||
require.Equal(t, true, ok, "Failed to retrieve custom error metadata value")
|
||||
expected, err := json.Marshal(data)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, string(expected), value[0])
|
||||
|
||||
}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
package api
|
||||
|
||||
import "net/http"
|
||||
|
||||
const (
|
||||
VersionHeader = "Eth-Consensus-Version"
|
||||
ExecutionPayloadBlindedHeader = "Eth-Execution-Payload-Blinded"
|
||||
@@ -10,3 +12,9 @@ const (
|
||||
EventStreamMediaType = "text/event-stream"
|
||||
KeepAlive = "keep-alive"
|
||||
)
|
||||
|
||||
// SetSSEHeaders sets the headers needed for a server-sent event response.
|
||||
func SetSSEHeaders(w http.ResponseWriter) {
|
||||
w.Header().Set("Content-Type", EventStreamMediaType)
|
||||
w.Header().Set("Connection", KeepAlive)
|
||||
}
|
||||
|
||||
31
api/server/httprest/BUILD.bazel
Normal file
31
api/server/httprest/BUILD.bazel
Normal file
@@ -0,0 +1,31 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"log.go",
|
||||
"options.go",
|
||||
"server.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/server/httprest",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/server/middleware:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["server_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
5
api/server/httprest/log.go
Normal file
5
api/server/httprest/log.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package httprest
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "httprest")
|
||||
44
api/server/httprest/options.go
Normal file
44
api/server/httprest/options.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package httprest
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"net/http"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/middleware"
|
||||
)
|
||||
|
||||
// Option is a http rest server functional parameter type.
|
||||
type Option func(g *Server) error
|
||||
|
||||
// WithMiddlewares sets the list of middlewares to be applied on routes.
|
||||
func WithMiddlewares(mw []middleware.Middleware) Option {
|
||||
return func(g *Server) error {
|
||||
g.cfg.middlewares = mw
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithHTTPAddr sets the full address ( host and port ) of the server.
|
||||
func WithHTTPAddr(addr string) Option {
|
||||
return func(g *Server) error {
|
||||
g.cfg.httpAddr = addr
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithRouter sets the internal router of the server, this is required.
|
||||
func WithRouter(r *http.ServeMux) Option {
|
||||
return func(g *Server) error {
|
||||
g.cfg.router = r
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithTimeout allows changing the timeout value for API calls.
|
||||
func WithTimeout(duration time.Duration) Option {
|
||||
return func(g *Server) error {
|
||||
g.cfg.timeout = duration
|
||||
return nil
|
||||
}
|
||||
}
|
||||
101
api/server/httprest/server.go
Normal file
101
api/server/httprest/server.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package httprest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/middleware"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime"
|
||||
)
|
||||
|
||||
var _ runtime.Service = (*Server)(nil)
|
||||
|
||||
// Config parameters for setting up the http-rest service.
|
||||
type config struct {
|
||||
httpAddr string
|
||||
middlewares []middleware.Middleware
|
||||
router http.Handler
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// Server serves HTTP traffic.
|
||||
type Server struct {
|
||||
cfg *config
|
||||
server *http.Server
|
||||
cancel context.CancelFunc
|
||||
ctx context.Context
|
||||
startFailure error
|
||||
}
|
||||
|
||||
// New returns a new instance of the Server.
|
||||
func New(ctx context.Context, opts ...Option) (*Server, error) {
|
||||
g := &Server{
|
||||
ctx: ctx,
|
||||
cfg: &config{},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if err := opt(g); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if g.cfg.router == nil {
|
||||
return nil, errors.New("router option not configured")
|
||||
}
|
||||
var handler http.Handler
|
||||
defaultReadHeaderTimeout := time.Second
|
||||
handler = middleware.MiddlewareChain(g.cfg.router, g.cfg.middlewares)
|
||||
if g.cfg.timeout > 0*time.Second {
|
||||
defaultReadHeaderTimeout = g.cfg.timeout
|
||||
handler = http.TimeoutHandler(handler, g.cfg.timeout, "request timed out")
|
||||
}
|
||||
g.server = &http.Server{
|
||||
Addr: g.cfg.httpAddr,
|
||||
Handler: handler,
|
||||
ReadHeaderTimeout: defaultReadHeaderTimeout,
|
||||
}
|
||||
|
||||
return g, nil
|
||||
}
|
||||
|
||||
// Start the http rest service.
|
||||
func (g *Server) Start() {
|
||||
g.ctx, g.cancel = context.WithCancel(g.ctx)
|
||||
|
||||
go func() {
|
||||
log.WithField("address", g.cfg.httpAddr).Info("Starting HTTP server")
|
||||
if err := g.server.ListenAndServe(); err != http.ErrServerClosed {
|
||||
log.WithError(err).Error("Failed to start HTTP server")
|
||||
g.startFailure = err
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Status of the HTTP server. Returns an error if this service is unhealthy.
|
||||
func (g *Server) Status() error {
|
||||
if g.startFailure != nil {
|
||||
return g.startFailure
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop the HTTP server with a graceful shutdown.
|
||||
func (g *Server) Stop() error {
|
||||
if g.server != nil {
|
||||
shutdownCtx, shutdownCancel := context.WithTimeout(g.ctx, 2*time.Second)
|
||||
defer shutdownCancel()
|
||||
if err := g.server.Shutdown(shutdownCtx); err != nil {
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
log.Warn("Existing connections terminated")
|
||||
} else {
|
||||
log.WithError(err).Error("Failed to gracefully shut down server")
|
||||
}
|
||||
}
|
||||
}
|
||||
if g.cancel != nil {
|
||||
g.cancel()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
71
api/server/httprest/server_test.go
Normal file
71
api/server/httprest/server_test.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package httprest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func TestServer_StartStop(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
ctx := cli.NewContext(&app, set, nil)
|
||||
|
||||
port := ctx.Int(flags.HTTPServerPort.Name)
|
||||
portStr := fmt.Sprintf("%d", port) // Convert port to string
|
||||
host := ctx.String(flags.HTTPServerHost.Name)
|
||||
address := net.JoinHostPort(host, portStr)
|
||||
handler := http.NewServeMux()
|
||||
opts := []Option{
|
||||
WithHTTPAddr(address),
|
||||
WithRouter(handler),
|
||||
}
|
||||
|
||||
g, err := New(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
g.Start()
|
||||
go func() {
|
||||
require.LogsContain(t, hook, "Starting HTTP server")
|
||||
require.LogsDoNotContain(t, hook, "Starting API middleware")
|
||||
}()
|
||||
err = g.Stop()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestServer_NilHandler_NotFoundHandlerRegistered(t *testing.T) {
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
ctx := cli.NewContext(&app, set, nil)
|
||||
|
||||
handler := http.NewServeMux()
|
||||
port := ctx.Int(flags.HTTPServerPort.Name)
|
||||
portStr := fmt.Sprintf("%d", port) // Convert port to string
|
||||
host := ctx.String(flags.HTTPServerHost.Name)
|
||||
address := net.JoinHostPort(host, portStr)
|
||||
|
||||
opts := []Option{
|
||||
WithHTTPAddr(address),
|
||||
WithRouter(handler),
|
||||
}
|
||||
|
||||
g, err := New(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
writer := httptest.NewRecorder()
|
||||
g.cfg.router.ServeHTTP(writer, &http.Request{Method: "GET", Host: "localhost", URL: &url.URL{Path: "/foo"}})
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code)
|
||||
}
|
||||
@@ -8,10 +8,7 @@ go_library(
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/server/middleware",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_rs_cors//:go_default_library",
|
||||
],
|
||||
deps = ["@com_github_rs_cors//:go_default_library"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
|
||||
@@ -5,10 +5,11 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/rs/cors"
|
||||
)
|
||||
|
||||
type Middleware func(http.Handler) http.Handler
|
||||
|
||||
// NormalizeQueryValuesHandler normalizes an input query of "key=value1,value2,value3" to "key=value1&key=value2&key=value3"
|
||||
func NormalizeQueryValuesHandler(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -21,7 +22,7 @@ func NormalizeQueryValuesHandler(next http.Handler) http.Handler {
|
||||
}
|
||||
|
||||
// CorsHandler sets the cors settings on api endpoints
|
||||
func CorsHandler(allowOrigins []string) mux.MiddlewareFunc {
|
||||
func CorsHandler(allowOrigins []string) Middleware {
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: allowOrigins,
|
||||
AllowedMethods: []string{http.MethodPost, http.MethodGet, http.MethodDelete, http.MethodOptions},
|
||||
@@ -34,7 +35,7 @@ func CorsHandler(allowOrigins []string) mux.MiddlewareFunc {
|
||||
}
|
||||
|
||||
// ContentTypeHandler checks request for the appropriate media types otherwise returning a http.StatusUnsupportedMediaType error
|
||||
func ContentTypeHandler(acceptedMediaTypes []string) mux.MiddlewareFunc {
|
||||
func ContentTypeHandler(acceptedMediaTypes []string) Middleware {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// skip the GET request
|
||||
@@ -67,7 +68,7 @@ func ContentTypeHandler(acceptedMediaTypes []string) mux.MiddlewareFunc {
|
||||
}
|
||||
|
||||
// AcceptHeaderHandler checks if the client's response preference is handled
|
||||
func AcceptHeaderHandler(serverAcceptedTypes []string) mux.MiddlewareFunc {
|
||||
func AcceptHeaderHandler(serverAcceptedTypes []string) Middleware {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
acceptHeader := r.Header.Get("Accept")
|
||||
@@ -110,3 +111,15 @@ func AcceptHeaderHandler(serverAcceptedTypes []string) mux.MiddlewareFunc {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func MiddlewareChain(h http.Handler, mw []Middleware) http.Handler {
|
||||
if len(mw) < 1 {
|
||||
return h
|
||||
}
|
||||
|
||||
wrapped := h
|
||||
for i := len(mw) - 1; i >= 0; i-- {
|
||||
wrapped = mw[i](wrapped)
|
||||
}
|
||||
return wrapped
|
||||
}
|
||||
|
||||
@@ -5,7 +5,9 @@ go_library(
|
||||
srcs = [
|
||||
"block.go",
|
||||
"conversions.go",
|
||||
"conversions_blob.go",
|
||||
"conversions_block.go",
|
||||
"conversions_lightclient.go",
|
||||
"conversions_state.go",
|
||||
"endpoints_beacon.go",
|
||||
"endpoints_blob.go",
|
||||
@@ -33,6 +35,9 @@ go_library(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
|
||||
@@ -317,6 +317,96 @@ type BlindedBeaconBlockBodyDeneb struct {
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockContentsElectra struct {
|
||||
SignedBlock *SignedBeaconBlockElectra `json:"signed_block"`
|
||||
KzgProofs []string `json:"kzg_proofs"`
|
||||
Blobs []string `json:"blobs"`
|
||||
}
|
||||
|
||||
type BeaconBlockContentsElectra struct {
|
||||
Block *BeaconBlockElectra `json:"block"`
|
||||
KzgProofs []string `json:"kzg_proofs"`
|
||||
Blobs []string `json:"blobs"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockElectra struct {
|
||||
Message *BeaconBlockElectra `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBeaconBlockElectra{}
|
||||
|
||||
func (s *SignedBeaconBlockElectra) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBeaconBlockElectra) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BeaconBlockElectra struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root"`
|
||||
StateRoot string `json:"state_root"`
|
||||
Body *BeaconBlockBodyElectra `json:"body"`
|
||||
}
|
||||
|
||||
type BeaconBlockBodyElectra struct {
|
||||
RandaoReveal string `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
|
||||
Attestations []*AttestationElectra `json:"attestations"`
|
||||
Deposits []*Deposit `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
ExecutionPayload *ExecutionPayloadElectra `json:"execution_payload"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockElectra struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root"`
|
||||
StateRoot string `json:"state_root"`
|
||||
Body *BlindedBeaconBlockBodyElectra `json:"body"`
|
||||
}
|
||||
|
||||
type SignedBlindedBeaconBlockElectra struct {
|
||||
Message *BlindedBeaconBlockElectra `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBlindedBeaconBlockElectra{}
|
||||
|
||||
func (s *SignedBlindedBeaconBlockElectra) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBlindedBeaconBlockElectra) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBodyElectra struct {
|
||||
RandaoReveal string `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
|
||||
Attestations []*AttestationElectra `json:"attestations"`
|
||||
Deposits []*Deposit `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeaderElectra `json:"execution_payload_header"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockHeaderContainer struct {
|
||||
Header *SignedBeaconBlockHeader `json:"header"`
|
||||
Root string `json:"root"`
|
||||
@@ -426,6 +516,8 @@ type ExecutionPayloadDeneb struct {
|
||||
ExcessBlobGas string `json:"excess_blob_gas"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadElectra = ExecutionPayloadDeneb
|
||||
|
||||
type ExecutionPayloadHeaderDeneb struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
@@ -445,3 +537,11 @@ type ExecutionPayloadHeaderDeneb struct {
|
||||
BlobGasUsed string `json:"blob_gas_used"`
|
||||
ExcessBlobGas string `json:"excess_blob_gas"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeaderElectra = ExecutionPayloadHeaderDeneb
|
||||
|
||||
type ExecutionRequests struct {
|
||||
Deposits []*DepositRequest `json:"deposits"`
|
||||
Withdrawals []*WithdrawalRequest `json:"withdrawals"`
|
||||
Consolidations []*ConsolidationRequest `json:"consolidations"`
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethv1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -340,6 +341,42 @@ func (a *AggregateAttestationAndProof) ToConsensus() (*eth.AggregateAttestationA
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *SignedAggregateAttestationAndProofElectra) ToConsensus() (*eth.SignedAggregateAttestationAndProofElectra, error) {
|
||||
msg, err := s.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Message")
|
||||
}
|
||||
sig, err := bytesutil.DecodeHexWithLength(s.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
|
||||
return ð.SignedAggregateAttestationAndProofElectra{
|
||||
Message: msg,
|
||||
Signature: sig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *AggregateAttestationAndProofElectra) ToConsensus() (*eth.AggregateAttestationAndProofElectra, error) {
|
||||
aggIndex, err := strconv.ParseUint(a.AggregatorIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "AggregatorIndex")
|
||||
}
|
||||
agg, err := a.Aggregate.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Aggregate")
|
||||
}
|
||||
proof, err := bytesutil.DecodeHexWithLength(a.SelectionProof, 96)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SelectionProof")
|
||||
}
|
||||
return ð.AggregateAttestationAndProofElectra{
|
||||
AggregatorIndex: primitives.ValidatorIndex(aggIndex),
|
||||
Aggregate: agg,
|
||||
SelectionProof: proof,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *Attestation) ToConsensus() (*eth.Attestation, error) {
|
||||
aggBits, err := hexutil.Decode(a.AggregationBits)
|
||||
if err != nil {
|
||||
@@ -369,6 +406,41 @@ func AttFromConsensus(a *eth.Attestation) *Attestation {
|
||||
}
|
||||
}
|
||||
|
||||
func (a *AttestationElectra) ToConsensus() (*eth.AttestationElectra, error) {
|
||||
aggBits, err := hexutil.Decode(a.AggregationBits)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "AggregationBits")
|
||||
}
|
||||
data, err := a.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Data")
|
||||
}
|
||||
sig, err := bytesutil.DecodeHexWithLength(a.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
committeeBits, err := hexutil.Decode(a.CommitteeBits)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "CommitteeBits")
|
||||
}
|
||||
|
||||
return ð.AttestationElectra{
|
||||
AggregationBits: aggBits,
|
||||
Data: data,
|
||||
Signature: sig,
|
||||
CommitteeBits: committeeBits,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func AttElectraFromConsensus(a *eth.AttestationElectra) *AttestationElectra {
|
||||
return &AttestationElectra{
|
||||
AggregationBits: hexutil.Encode(a.AggregationBits),
|
||||
Data: AttDataFromConsensus(a.Data),
|
||||
Signature: hexutil.Encode(a.Signature),
|
||||
CommitteeBits: hexutil.Encode(a.CommitteeBits),
|
||||
}
|
||||
}
|
||||
|
||||
func (a *AttestationData) ToConsensus() (*eth.AttestationData, error) {
|
||||
slot, err := strconv.ParseUint(a.Slot, 10, 64)
|
||||
if err != nil {
|
||||
@@ -623,6 +695,18 @@ func (s *AttesterSlashing) ToConsensus() (*eth.AttesterSlashing, error) {
|
||||
return ð.AttesterSlashing{Attestation_1: att1, Attestation_2: att2}, nil
|
||||
}
|
||||
|
||||
func (s *AttesterSlashingElectra) ToConsensus() (*eth.AttesterSlashingElectra, error) {
|
||||
att1, err := s.Attestation1.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Attestation1")
|
||||
}
|
||||
att2, err := s.Attestation2.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Attestation2")
|
||||
}
|
||||
return ð.AttesterSlashingElectra{Attestation_1: att1, Attestation_2: att2}, nil
|
||||
}
|
||||
|
||||
func (a *IndexedAttestation) ToConsensus() (*eth.IndexedAttestation, error) {
|
||||
indices := make([]uint64, len(a.AttestingIndices))
|
||||
var err error
|
||||
@@ -648,6 +732,31 @@ func (a *IndexedAttestation) ToConsensus() (*eth.IndexedAttestation, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *IndexedAttestationElectra) ToConsensus() (*eth.IndexedAttestationElectra, error) {
|
||||
indices := make([]uint64, len(a.AttestingIndices))
|
||||
var err error
|
||||
for i, ix := range a.AttestingIndices {
|
||||
indices[i], err = strconv.ParseUint(ix, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("AttestingIndices[%d]", i))
|
||||
}
|
||||
}
|
||||
data, err := a.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Data")
|
||||
}
|
||||
sig, err := bytesutil.DecodeHexWithLength(a.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
|
||||
return ð.IndexedAttestationElectra{
|
||||
AttestingIndices: indices,
|
||||
Data: data,
|
||||
Signature: sig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func WithdrawalsFromConsensus(ws []*enginev1.Withdrawal) []*Withdrawal {
|
||||
result := make([]*Withdrawal, len(ws))
|
||||
for i, w := range ws {
|
||||
@@ -665,6 +774,126 @@ func WithdrawalFromConsensus(w *enginev1.Withdrawal) *Withdrawal {
|
||||
}
|
||||
}
|
||||
|
||||
func WithdrawalRequestsFromConsensus(ws []*enginev1.WithdrawalRequest) []*WithdrawalRequest {
|
||||
result := make([]*WithdrawalRequest, len(ws))
|
||||
for i, w := range ws {
|
||||
result[i] = WithdrawalRequestFromConsensus(w)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func WithdrawalRequestFromConsensus(w *enginev1.WithdrawalRequest) *WithdrawalRequest {
|
||||
return &WithdrawalRequest{
|
||||
SourceAddress: hexutil.Encode(w.SourceAddress),
|
||||
ValidatorPubkey: hexutil.Encode(w.ValidatorPubkey),
|
||||
Amount: fmt.Sprintf("%d", w.Amount),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WithdrawalRequest) ToConsensus() (*enginev1.WithdrawalRequest, error) {
|
||||
src, err := bytesutil.DecodeHexWithLength(w.SourceAddress, common.AddressLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SourceAddress")
|
||||
}
|
||||
pubkey, err := bytesutil.DecodeHexWithLength(w.ValidatorPubkey, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ValidatorPubkey")
|
||||
}
|
||||
amount, err := strconv.ParseUint(w.Amount, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Amount")
|
||||
}
|
||||
return &enginev1.WithdrawalRequest{
|
||||
SourceAddress: src,
|
||||
ValidatorPubkey: pubkey,
|
||||
Amount: amount,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ConsolidationRequestsFromConsensus(cs []*enginev1.ConsolidationRequest) []*ConsolidationRequest {
|
||||
result := make([]*ConsolidationRequest, len(cs))
|
||||
for i, c := range cs {
|
||||
result[i] = ConsolidationRequestFromConsensus(c)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func ConsolidationRequestFromConsensus(c *enginev1.ConsolidationRequest) *ConsolidationRequest {
|
||||
return &ConsolidationRequest{
|
||||
SourceAddress: hexutil.Encode(c.SourceAddress),
|
||||
SourcePubkey: hexutil.Encode(c.SourcePubkey),
|
||||
TargetPubkey: hexutil.Encode(c.TargetPubkey),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ConsolidationRequest) ToConsensus() (*enginev1.ConsolidationRequest, error) {
|
||||
srcAddress, err := bytesutil.DecodeHexWithLength(c.SourceAddress, common.AddressLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SourceAddress")
|
||||
}
|
||||
srcPubkey, err := bytesutil.DecodeHexWithLength(c.SourcePubkey, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SourcePubkey")
|
||||
}
|
||||
targetPubkey, err := bytesutil.DecodeHexWithLength(c.TargetPubkey, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "TargetPubkey")
|
||||
}
|
||||
return &enginev1.ConsolidationRequest{
|
||||
SourceAddress: srcAddress,
|
||||
SourcePubkey: srcPubkey,
|
||||
TargetPubkey: targetPubkey,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func DepositRequestsFromConsensus(ds []*enginev1.DepositRequest) []*DepositRequest {
|
||||
result := make([]*DepositRequest, len(ds))
|
||||
for i, d := range ds {
|
||||
result[i] = DepositRequestFromConsensus(d)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func DepositRequestFromConsensus(d *enginev1.DepositRequest) *DepositRequest {
|
||||
return &DepositRequest{
|
||||
Pubkey: hexutil.Encode(d.Pubkey),
|
||||
WithdrawalCredentials: hexutil.Encode(d.WithdrawalCredentials),
|
||||
Amount: fmt.Sprintf("%d", d.Amount),
|
||||
Signature: hexutil.Encode(d.Signature),
|
||||
Index: fmt.Sprintf("%d", d.Index),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DepositRequest) ToConsensus() (*enginev1.DepositRequest, error) {
|
||||
pubkey, err := bytesutil.DecodeHexWithLength(d.Pubkey, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Pubkey")
|
||||
}
|
||||
withdrawalCredentials, err := bytesutil.DecodeHexWithLength(d.WithdrawalCredentials, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "WithdrawalCredentials")
|
||||
}
|
||||
amount, err := strconv.ParseUint(d.Amount, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Amount")
|
||||
}
|
||||
sig, err := bytesutil.DecodeHexWithLength(d.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
index, err := strconv.ParseUint(d.Index, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Index")
|
||||
}
|
||||
return &enginev1.DepositRequest{
|
||||
Pubkey: pubkey,
|
||||
WithdrawalCredentials: withdrawalCredentials,
|
||||
Amount: amount,
|
||||
Signature: sig,
|
||||
Index: index,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ProposerSlashingsToConsensus(src []*ProposerSlashing) ([]*eth.ProposerSlashing, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
@@ -930,6 +1159,138 @@ func AttesterSlashingFromConsensus(src *eth.AttesterSlashing) *AttesterSlashing
|
||||
}
|
||||
}
|
||||
|
||||
func AttesterSlashingsElectraToConsensus(src []*AttesterSlashingElectra) ([]*eth.AttesterSlashingElectra, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 2)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
attesterSlashings := make([]*eth.AttesterSlashingElectra, len(src))
|
||||
for i, s := range src {
|
||||
if s == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d]", i))
|
||||
}
|
||||
if s.Attestation1 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation1", i))
|
||||
}
|
||||
if s.Attestation2 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation2", i))
|
||||
}
|
||||
|
||||
a1Sig, err := bytesutil.DecodeHexWithLength(s.Attestation1.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.Signature", i))
|
||||
}
|
||||
err = slice.VerifyMaxLength(s.Attestation1.AttestingIndices, 2048)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.AttestingIndices", i))
|
||||
}
|
||||
a1AttestingIndices := make([]uint64, len(s.Attestation1.AttestingIndices))
|
||||
for j, ix := range s.Attestation1.AttestingIndices {
|
||||
attestingIndex, err := strconv.ParseUint(ix, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.AttestingIndices[%d]", i, j))
|
||||
}
|
||||
a1AttestingIndices[j] = attestingIndex
|
||||
}
|
||||
a1Data, err := s.Attestation1.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.Data", i))
|
||||
}
|
||||
a2Sig, err := bytesutil.DecodeHexWithLength(s.Attestation2.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.Signature", i))
|
||||
}
|
||||
err = slice.VerifyMaxLength(s.Attestation2.AttestingIndices, 2048)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.AttestingIndices", i))
|
||||
}
|
||||
a2AttestingIndices := make([]uint64, len(s.Attestation2.AttestingIndices))
|
||||
for j, ix := range s.Attestation2.AttestingIndices {
|
||||
attestingIndex, err := strconv.ParseUint(ix, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.AttestingIndices[%d]", i, j))
|
||||
}
|
||||
a2AttestingIndices[j] = attestingIndex
|
||||
}
|
||||
a2Data, err := s.Attestation2.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.Data", i))
|
||||
}
|
||||
attesterSlashings[i] = ð.AttesterSlashingElectra{
|
||||
Attestation_1: ð.IndexedAttestationElectra{
|
||||
AttestingIndices: a1AttestingIndices,
|
||||
Data: a1Data,
|
||||
Signature: a1Sig,
|
||||
},
|
||||
Attestation_2: ð.IndexedAttestationElectra{
|
||||
AttestingIndices: a2AttestingIndices,
|
||||
Data: a2Data,
|
||||
Signature: a2Sig,
|
||||
},
|
||||
}
|
||||
}
|
||||
return attesterSlashings, nil
|
||||
}
|
||||
|
||||
func AttesterSlashingsElectraFromConsensus(src []*eth.AttesterSlashingElectra) []*AttesterSlashingElectra {
|
||||
attesterSlashings := make([]*AttesterSlashingElectra, len(src))
|
||||
for i, s := range src {
|
||||
attesterSlashings[i] = AttesterSlashingElectraFromConsensus(s)
|
||||
}
|
||||
return attesterSlashings
|
||||
}
|
||||
|
||||
func AttesterSlashingElectraFromConsensus(src *eth.AttesterSlashingElectra) *AttesterSlashingElectra {
|
||||
a1AttestingIndices := make([]string, len(src.Attestation_1.AttestingIndices))
|
||||
for j, ix := range src.Attestation_1.AttestingIndices {
|
||||
a1AttestingIndices[j] = fmt.Sprintf("%d", ix)
|
||||
}
|
||||
a2AttestingIndices := make([]string, len(src.Attestation_2.AttestingIndices))
|
||||
for j, ix := range src.Attestation_2.AttestingIndices {
|
||||
a2AttestingIndices[j] = fmt.Sprintf("%d", ix)
|
||||
}
|
||||
return &AttesterSlashingElectra{
|
||||
Attestation1: &IndexedAttestationElectra{
|
||||
AttestingIndices: a1AttestingIndices,
|
||||
Data: &AttestationData{
|
||||
Slot: fmt.Sprintf("%d", src.Attestation_1.Data.Slot),
|
||||
CommitteeIndex: fmt.Sprintf("%d", src.Attestation_1.Data.CommitteeIndex),
|
||||
BeaconBlockRoot: hexutil.Encode(src.Attestation_1.Data.BeaconBlockRoot),
|
||||
Source: &Checkpoint{
|
||||
Epoch: fmt.Sprintf("%d", src.Attestation_1.Data.Source.Epoch),
|
||||
Root: hexutil.Encode(src.Attestation_1.Data.Source.Root),
|
||||
},
|
||||
Target: &Checkpoint{
|
||||
Epoch: fmt.Sprintf("%d", src.Attestation_1.Data.Target.Epoch),
|
||||
Root: hexutil.Encode(src.Attestation_1.Data.Target.Root),
|
||||
},
|
||||
},
|
||||
Signature: hexutil.Encode(src.Attestation_1.Signature),
|
||||
},
|
||||
Attestation2: &IndexedAttestationElectra{
|
||||
AttestingIndices: a2AttestingIndices,
|
||||
Data: &AttestationData{
|
||||
Slot: fmt.Sprintf("%d", src.Attestation_2.Data.Slot),
|
||||
CommitteeIndex: fmt.Sprintf("%d", src.Attestation_2.Data.CommitteeIndex),
|
||||
BeaconBlockRoot: hexutil.Encode(src.Attestation_2.Data.BeaconBlockRoot),
|
||||
Source: &Checkpoint{
|
||||
Epoch: fmt.Sprintf("%d", src.Attestation_2.Data.Source.Epoch),
|
||||
Root: hexutil.Encode(src.Attestation_2.Data.Source.Root),
|
||||
},
|
||||
Target: &Checkpoint{
|
||||
Epoch: fmt.Sprintf("%d", src.Attestation_2.Data.Target.Epoch),
|
||||
Root: hexutil.Encode(src.Attestation_2.Data.Target.Root),
|
||||
},
|
||||
},
|
||||
Signature: hexutil.Encode(src.Attestation_2.Signature),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func AttsToConsensus(src []*Attestation) ([]*eth.Attestation, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
@@ -957,6 +1318,33 @@ func AttsFromConsensus(src []*eth.Attestation) []*Attestation {
|
||||
return atts
|
||||
}
|
||||
|
||||
func AttsElectraToConsensus(src []*AttestationElectra) ([]*eth.AttestationElectra, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 8)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
atts := make([]*eth.AttestationElectra, len(src))
|
||||
for i, a := range src {
|
||||
atts[i], err = a.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d]", i))
|
||||
}
|
||||
}
|
||||
return atts, nil
|
||||
}
|
||||
|
||||
func AttsElectraFromConsensus(src []*eth.AttestationElectra) []*AttestationElectra {
|
||||
atts := make([]*AttestationElectra, len(src))
|
||||
for i, a := range src {
|
||||
atts[i] = AttElectraFromConsensus(a)
|
||||
}
|
||||
return atts
|
||||
}
|
||||
|
||||
func DepositsToConsensus(src []*Deposit) ([]*eth.Deposit, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
@@ -1087,3 +1475,74 @@ func DepositSnapshotFromConsensus(ds *eth.DepositSnapshot) *DepositSnapshot {
|
||||
ExecutionBlockHeight: fmt.Sprintf("%d", ds.ExecutionDepth),
|
||||
}
|
||||
}
|
||||
|
||||
func PendingDepositsFromConsensus(ds []*eth.PendingDeposit) []*PendingDeposit {
|
||||
deposits := make([]*PendingDeposit, len(ds))
|
||||
for i, d := range ds {
|
||||
deposits[i] = &PendingDeposit{
|
||||
Pubkey: hexutil.Encode(d.PublicKey),
|
||||
WithdrawalCredentials: hexutil.Encode(d.WithdrawalCredentials),
|
||||
Amount: fmt.Sprintf("%d", d.Amount),
|
||||
Signature: hexutil.Encode(d.Signature),
|
||||
Slot: fmt.Sprintf("%d", d.Slot),
|
||||
}
|
||||
}
|
||||
return deposits
|
||||
}
|
||||
|
||||
func PendingPartialWithdrawalsFromConsensus(ws []*eth.PendingPartialWithdrawal) []*PendingPartialWithdrawal {
|
||||
withdrawals := make([]*PendingPartialWithdrawal, len(ws))
|
||||
for i, w := range ws {
|
||||
withdrawals[i] = &PendingPartialWithdrawal{
|
||||
Index: fmt.Sprintf("%d", w.Index),
|
||||
Amount: fmt.Sprintf("%d", w.Amount),
|
||||
WithdrawableEpoch: fmt.Sprintf("%d", w.WithdrawableEpoch),
|
||||
}
|
||||
}
|
||||
return withdrawals
|
||||
}
|
||||
|
||||
func PendingConsolidationsFromConsensus(cs []*eth.PendingConsolidation) []*PendingConsolidation {
|
||||
consolidations := make([]*PendingConsolidation, len(cs))
|
||||
for i, c := range cs {
|
||||
consolidations[i] = &PendingConsolidation{
|
||||
SourceIndex: fmt.Sprintf("%d", c.SourceIndex),
|
||||
TargetIndex: fmt.Sprintf("%d", c.TargetIndex),
|
||||
}
|
||||
}
|
||||
return consolidations
|
||||
}
|
||||
|
||||
func HeadEventFromV1(event *ethv1.EventHead) *HeadEvent {
|
||||
return &HeadEvent{
|
||||
Slot: fmt.Sprintf("%d", event.Slot),
|
||||
Block: hexutil.Encode(event.Block),
|
||||
State: hexutil.Encode(event.State),
|
||||
EpochTransition: event.EpochTransition,
|
||||
ExecutionOptimistic: event.ExecutionOptimistic,
|
||||
PreviousDutyDependentRoot: hexutil.Encode(event.PreviousDutyDependentRoot),
|
||||
CurrentDutyDependentRoot: hexutil.Encode(event.CurrentDutyDependentRoot),
|
||||
}
|
||||
}
|
||||
|
||||
func FinalizedCheckpointEventFromV1(event *ethv1.EventFinalizedCheckpoint) *FinalizedCheckpointEvent {
|
||||
return &FinalizedCheckpointEvent{
|
||||
Block: hexutil.Encode(event.Block),
|
||||
State: hexutil.Encode(event.State),
|
||||
Epoch: fmt.Sprintf("%d", event.Epoch),
|
||||
ExecutionOptimistic: event.ExecutionOptimistic,
|
||||
}
|
||||
}
|
||||
|
||||
func EventChainReorgFromV1(event *ethv1.EventChainReorg) *ChainReorgEvent {
|
||||
return &ChainReorgEvent{
|
||||
Slot: fmt.Sprintf("%d", event.Slot),
|
||||
Depth: fmt.Sprintf("%d", event.Depth),
|
||||
OldHeadBlock: hexutil.Encode(event.OldHeadBlock),
|
||||
NewHeadBlock: hexutil.Encode(event.NewHeadBlock),
|
||||
OldHeadState: hexutil.Encode(event.OldHeadState),
|
||||
NewHeadState: hexutil.Encode(event.NewHeadState),
|
||||
Epoch: fmt.Sprintf("%d", event.Epoch),
|
||||
ExecutionOptimistic: event.ExecutionOptimistic,
|
||||
}
|
||||
}
|
||||
|
||||
61
api/server/structs/conversions_blob.go
Normal file
61
api/server/structs/conversions_blob.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package structs
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func (sc *Sidecar) ToConsensus() (*eth.BlobSidecar, error) {
|
||||
if sc == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
|
||||
index, err := strconv.ParseUint(sc.Index, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Index")
|
||||
}
|
||||
|
||||
blob, err := bytesutil.DecodeHexWithLength(sc.Blob, 131072)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Blob")
|
||||
}
|
||||
|
||||
kzgCommitment, err := bytesutil.DecodeHexWithLength(sc.KzgCommitment, 48)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "KzgCommitment")
|
||||
}
|
||||
|
||||
kzgProof, err := bytesutil.DecodeHexWithLength(sc.KzgProof, 48)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "KzgProof")
|
||||
}
|
||||
|
||||
header, err := sc.SignedBeaconBlockHeader.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SignedBeaconBlockHeader")
|
||||
}
|
||||
|
||||
// decode the commitment inclusion proof
|
||||
var commitmentInclusionProof [][]byte
|
||||
for _, proof := range sc.CommitmentInclusionProof {
|
||||
proofBytes, err := bytesutil.DecodeHexWithLength(proof, 32)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "CommitmentInclusionProof")
|
||||
}
|
||||
commitmentInclusionProof = append(commitmentInclusionProof, proofBytes)
|
||||
}
|
||||
|
||||
bsc := ð.BlobSidecar{
|
||||
Index: index,
|
||||
Blob: blob,
|
||||
KzgCommitment: kzgCommitment,
|
||||
KzgProof: kzgProof,
|
||||
SignedBlockHeader: header,
|
||||
CommitmentInclusionProof: commitmentInclusionProof,
|
||||
}
|
||||
|
||||
return bsc, nil
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
127
api/server/structs/conversions_lightclient.go
Normal file
127
api/server/structs/conversions_lightclient.go
Normal file
@@ -0,0 +1,127 @@
|
||||
package structs
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
v1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
|
||||
v2 "github.com/prysmaticlabs/prysm/v5/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/migration"
|
||||
)
|
||||
|
||||
func LightClientUpdateFromConsensus(update *v2.LightClientUpdate) (*LightClientUpdate, error) {
|
||||
attestedHeader, err := lightClientHeaderContainerToJSON(update.AttestedHeader)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not marshal attested light client header")
|
||||
}
|
||||
finalizedHeader, err := lightClientHeaderContainerToJSON(update.FinalizedHeader)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not marshal finalized light client header")
|
||||
}
|
||||
|
||||
return &LightClientUpdate{
|
||||
AttestedHeader: attestedHeader,
|
||||
NextSyncCommittee: SyncCommitteeFromConsensus(migration.V2SyncCommitteeToV1Alpha1(update.NextSyncCommittee)),
|
||||
NextSyncCommitteeBranch: branchToJSON(update.NextSyncCommitteeBranch),
|
||||
FinalizedHeader: finalizedHeader,
|
||||
FinalityBranch: branchToJSON(update.FinalityBranch),
|
||||
SyncAggregate: syncAggregateToJSON(update.SyncAggregate),
|
||||
SignatureSlot: strconv.FormatUint(uint64(update.SignatureSlot), 10),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func LightClientFinalityUpdateFromConsensus(update *v2.LightClientFinalityUpdate) (*LightClientFinalityUpdate, error) {
|
||||
attestedHeader, err := lightClientHeaderContainerToJSON(update.AttestedHeader)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not marshal attested light client header")
|
||||
}
|
||||
finalizedHeader, err := lightClientHeaderContainerToJSON(update.FinalizedHeader)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not marshal finalized light client header")
|
||||
}
|
||||
|
||||
return &LightClientFinalityUpdate{
|
||||
AttestedHeader: attestedHeader,
|
||||
FinalizedHeader: finalizedHeader,
|
||||
FinalityBranch: branchToJSON(update.FinalityBranch),
|
||||
SyncAggregate: syncAggregateToJSON(update.SyncAggregate),
|
||||
SignatureSlot: strconv.FormatUint(uint64(update.SignatureSlot), 10),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func LightClientOptimisticUpdateFromConsensus(update *v2.LightClientOptimisticUpdate) (*LightClientOptimisticUpdate, error) {
|
||||
attestedHeader, err := lightClientHeaderContainerToJSON(update.AttestedHeader)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not marshal attested light client header")
|
||||
}
|
||||
|
||||
return &LightClientOptimisticUpdate{
|
||||
AttestedHeader: attestedHeader,
|
||||
SyncAggregate: syncAggregateToJSON(update.SyncAggregate),
|
||||
SignatureSlot: strconv.FormatUint(uint64(update.SignatureSlot), 10),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func branchToJSON(branchBytes [][]byte) []string {
|
||||
if branchBytes == nil {
|
||||
return nil
|
||||
}
|
||||
branch := make([]string, len(branchBytes))
|
||||
for i, root := range branchBytes {
|
||||
branch[i] = hexutil.Encode(root)
|
||||
}
|
||||
return branch
|
||||
}
|
||||
|
||||
func syncAggregateToJSON(input *v1.SyncAggregate) *SyncAggregate {
|
||||
return &SyncAggregate{
|
||||
SyncCommitteeBits: hexutil.Encode(input.SyncCommitteeBits),
|
||||
SyncCommitteeSignature: hexutil.Encode(input.SyncCommitteeSignature),
|
||||
}
|
||||
}
|
||||
|
||||
func lightClientHeaderContainerToJSON(container *v2.LightClientHeaderContainer) (json.RawMessage, error) {
|
||||
// In the case that a finalizedHeader is nil.
|
||||
if container == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
beacon, err := container.GetBeacon()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get beacon block header")
|
||||
}
|
||||
|
||||
var header any
|
||||
|
||||
switch t := (container.Header).(type) {
|
||||
case *v2.LightClientHeaderContainer_HeaderAltair:
|
||||
header = &LightClientHeader{Beacon: BeaconBlockHeaderFromConsensus(migration.V1HeaderToV1Alpha1(beacon))}
|
||||
case *v2.LightClientHeaderContainer_HeaderCapella:
|
||||
execution, err := ExecutionPayloadHeaderCapellaFromConsensus(t.HeaderCapella.Execution)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
header = &LightClientHeaderCapella{
|
||||
Beacon: BeaconBlockHeaderFromConsensus(migration.V1HeaderToV1Alpha1(beacon)),
|
||||
Execution: execution,
|
||||
ExecutionBranch: branchToJSON(t.HeaderCapella.ExecutionBranch),
|
||||
}
|
||||
case *v2.LightClientHeaderContainer_HeaderDeneb:
|
||||
execution, err := ExecutionPayloadHeaderDenebFromConsensus(t.HeaderDeneb.Execution)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
header = &LightClientHeaderDeneb{
|
||||
Beacon: BeaconBlockHeaderFromConsensus(migration.V1HeaderToV1Alpha1(beacon)),
|
||||
Execution: execution,
|
||||
ExecutionBranch: branchToJSON(t.HeaderDeneb.ExecutionBranch),
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported header type %T", t)
|
||||
}
|
||||
|
||||
return json.Marshal(header)
|
||||
}
|
||||
@@ -593,3 +593,185 @@ func BeaconStateDenebFromConsensus(st beaconState.BeaconState) (*BeaconStateDene
|
||||
HistoricalSummaries: hs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func BeaconStateElectraFromConsensus(st beaconState.BeaconState) (*BeaconStateElectra, error) {
|
||||
srcBr := st.BlockRoots()
|
||||
br := make([]string, len(srcBr))
|
||||
for i, r := range srcBr {
|
||||
br[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcSr := st.StateRoots()
|
||||
sr := make([]string, len(srcSr))
|
||||
for i, r := range srcSr {
|
||||
sr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcHr, err := st.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hr := make([]string, len(srcHr))
|
||||
for i, r := range srcHr {
|
||||
hr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcVotes := st.Eth1DataVotes()
|
||||
votes := make([]*Eth1Data, len(srcVotes))
|
||||
for i, e := range srcVotes {
|
||||
votes[i] = Eth1DataFromConsensus(e)
|
||||
}
|
||||
srcVals := st.Validators()
|
||||
vals := make([]*Validator, len(srcVals))
|
||||
for i, v := range srcVals {
|
||||
vals[i] = ValidatorFromConsensus(v)
|
||||
}
|
||||
srcBals := st.Balances()
|
||||
bals := make([]string, len(srcBals))
|
||||
for i, b := range srcBals {
|
||||
bals[i] = fmt.Sprintf("%d", b)
|
||||
}
|
||||
srcRm := st.RandaoMixes()
|
||||
rm := make([]string, len(srcRm))
|
||||
for i, m := range srcRm {
|
||||
rm[i] = hexutil.Encode(m)
|
||||
}
|
||||
srcSlashings := st.Slashings()
|
||||
slashings := make([]string, len(srcSlashings))
|
||||
for i, s := range srcSlashings {
|
||||
slashings[i] = fmt.Sprintf("%d", s)
|
||||
}
|
||||
srcPrevPart, err := st.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prevPart := make([]string, len(srcPrevPart))
|
||||
for i, p := range srcPrevPart {
|
||||
prevPart[i] = fmt.Sprintf("%d", p)
|
||||
}
|
||||
srcCurrPart, err := st.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currPart := make([]string, len(srcCurrPart))
|
||||
for i, p := range srcCurrPart {
|
||||
currPart[i] = fmt.Sprintf("%d", p)
|
||||
}
|
||||
srcIs, err := st.InactivityScores()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
is := make([]string, len(srcIs))
|
||||
for i, s := range srcIs {
|
||||
is[i] = fmt.Sprintf("%d", s)
|
||||
}
|
||||
currSc, err := st.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nextSc, err := st.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
execData, err := st.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcPayload, ok := execData.Proto().(*enginev1.ExecutionPayloadHeaderDeneb)
|
||||
if !ok {
|
||||
return nil, errPayloadHeaderNotFound
|
||||
}
|
||||
payload, err := ExecutionPayloadHeaderElectraFromConsensus(srcPayload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcHs, err := st.HistoricalSummaries()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hs := make([]*HistoricalSummary, len(srcHs))
|
||||
for i, s := range srcHs {
|
||||
hs[i] = HistoricalSummaryFromConsensus(s)
|
||||
}
|
||||
nwi, err := st.NextWithdrawalIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nwvi, err := st.NextWithdrawalValidatorIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
drsi, err := st.DepositRequestsStartIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dbtc, err := st.DepositBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ebtc, err := st.ExitBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eee, err := st.EarliestExitEpoch()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cbtc, err := st.ConsolidationBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ece, err := st.EarliestConsolidationEpoch()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pbd, err := st.PendingDeposits()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ppw, err := st.PendingPartialWithdrawals()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pc, err := st.PendingConsolidations()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &BeaconStateElectra{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
LatestBlockHeader: BeaconBlockHeaderFromConsensus(st.LatestBlockHeader()),
|
||||
BlockRoots: br,
|
||||
StateRoots: sr,
|
||||
HistoricalRoots: hr,
|
||||
Eth1Data: Eth1DataFromConsensus(st.Eth1Data()),
|
||||
Eth1DataVotes: votes,
|
||||
Eth1DepositIndex: fmt.Sprintf("%d", st.Eth1DepositIndex()),
|
||||
Validators: vals,
|
||||
Balances: bals,
|
||||
RandaoMixes: rm,
|
||||
Slashings: slashings,
|
||||
PreviousEpochParticipation: prevPart,
|
||||
CurrentEpochParticipation: currPart,
|
||||
JustificationBits: hexutil.Encode(st.JustificationBits()),
|
||||
PreviousJustifiedCheckpoint: CheckpointFromConsensus(st.PreviousJustifiedCheckpoint()),
|
||||
CurrentJustifiedCheckpoint: CheckpointFromConsensus(st.CurrentJustifiedCheckpoint()),
|
||||
FinalizedCheckpoint: CheckpointFromConsensus(st.FinalizedCheckpoint()),
|
||||
InactivityScores: is,
|
||||
CurrentSyncCommittee: SyncCommitteeFromConsensus(currSc),
|
||||
NextSyncCommittee: SyncCommitteeFromConsensus(nextSc),
|
||||
LatestExecutionPayloadHeader: payload,
|
||||
NextWithdrawalIndex: fmt.Sprintf("%d", nwi),
|
||||
NextWithdrawalValidatorIndex: fmt.Sprintf("%d", nwvi),
|
||||
HistoricalSummaries: hs,
|
||||
DepositRequestsStartIndex: fmt.Sprintf("%d", drsi),
|
||||
DepositBalanceToConsume: fmt.Sprintf("%d", dbtc),
|
||||
ExitBalanceToConsume: fmt.Sprintf("%d", ebtc),
|
||||
EarliestExitEpoch: fmt.Sprintf("%d", eee),
|
||||
ConsolidationBalanceToConsume: fmt.Sprintf("%d", cbtc),
|
||||
EarliestConsolidationEpoch: fmt.Sprintf("%d", ece),
|
||||
PendingDeposits: PendingDepositsFromConsensus(pbd),
|
||||
PendingPartialWithdrawals: PendingPartialWithdrawalsFromConsensus(ppw),
|
||||
PendingConsolidations: PendingConsolidationsFromConsensus(pc),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -21,11 +21,12 @@ type GetCommitteesResponse struct {
|
||||
}
|
||||
|
||||
type ListAttestationsResponse struct {
|
||||
Data []*Attestation `json:"data"`
|
||||
Version string `json:"version,omitempty"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
}
|
||||
|
||||
type SubmitAttestationsRequest struct {
|
||||
Data []*Attestation `json:"data"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
}
|
||||
|
||||
type ListVoluntaryExitsResponse struct {
|
||||
@@ -133,6 +134,13 @@ type GetBlockAttestationsResponse struct {
|
||||
Data []*Attestation `json:"data"`
|
||||
}
|
||||
|
||||
type GetBlockAttestationsV2Response struct {
|
||||
Version string `json:"version"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data json.RawMessage `json:"data"` // Accepts both `Attestation` and `AttestationElectra` types
|
||||
}
|
||||
|
||||
type GetStateRootResponse struct {
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
@@ -169,7 +177,8 @@ type BLSToExecutionChangesPoolResponse struct {
|
||||
}
|
||||
|
||||
type GetAttesterSlashingsResponse struct {
|
||||
Data []*AttesterSlashing `json:"data"`
|
||||
Version string `json:"version,omitempty"`
|
||||
Data json.RawMessage `json:"data"` // Accepts both `[]*AttesterSlashing` and `[]*AttesterSlashingElectra` types
|
||||
}
|
||||
|
||||
type GetProposerSlashingsResponse struct {
|
||||
|
||||
@@ -12,3 +12,12 @@ type Sidecar struct {
|
||||
KzgProof string `json:"kzg_proof"`
|
||||
CommitmentInclusionProof []string `json:"kzg_commitment_inclusion_proof"`
|
||||
}
|
||||
|
||||
type BlobSidecars struct {
|
||||
Sidecars []*Sidecar `json:"sidecars"`
|
||||
}
|
||||
|
||||
type PublishBlobsRequest struct {
|
||||
BlobSidecars *BlobSidecars `json:"blob_sidecars"`
|
||||
BlockRoot string `json:"block_root"`
|
||||
}
|
||||
|
||||
@@ -96,21 +96,7 @@ type LightClientFinalityUpdateEvent struct {
|
||||
Data *LightClientFinalityUpdate `json:"data"`
|
||||
}
|
||||
|
||||
type LightClientFinalityUpdate struct {
|
||||
AttestedHeader *BeaconBlockHeader `json:"attested_header"`
|
||||
FinalizedHeader *BeaconBlockHeader `json:"finalized_header"`
|
||||
FinalityBranch []string `json:"finality_branch"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
SignatureSlot string `json:"signature_slot"`
|
||||
}
|
||||
|
||||
type LightClientOptimisticUpdateEvent struct {
|
||||
Version string `json:"version"`
|
||||
Data *LightClientOptimisticUpdate `json:"data"`
|
||||
}
|
||||
|
||||
type LightClientOptimisticUpdate struct {
|
||||
AttestedHeader *BeaconBlockHeader `json:"attested_header"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
SignatureSlot string `json:"signature_slot"`
|
||||
}
|
||||
|
||||
@@ -1,31 +1,73 @@
|
||||
package structs
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
type LightClientHeader struct {
|
||||
Beacon *BeaconBlockHeader `json:"beacon"`
|
||||
}
|
||||
|
||||
type LightClientHeaderCapella struct {
|
||||
Beacon *BeaconBlockHeader `json:"beacon"`
|
||||
Execution *ExecutionPayloadHeaderCapella `json:"execution"`
|
||||
ExecutionBranch []string `json:"execution_branch"`
|
||||
}
|
||||
|
||||
type LightClientHeaderDeneb struct {
|
||||
Beacon *BeaconBlockHeader `json:"beacon"`
|
||||
Execution *ExecutionPayloadHeaderDeneb `json:"execution"`
|
||||
ExecutionBranch []string `json:"execution_branch"`
|
||||
}
|
||||
|
||||
type LightClientBootstrap struct {
|
||||
Header json.RawMessage `json:"header"`
|
||||
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
|
||||
CurrentSyncCommitteeBranch []string `json:"current_sync_committee_branch"`
|
||||
}
|
||||
|
||||
type LightClientUpdate struct {
|
||||
AttestedHeader json.RawMessage `json:"attested_header"`
|
||||
NextSyncCommittee *SyncCommittee `json:"next_sync_committee,omitempty"`
|
||||
FinalizedHeader json.RawMessage `json:"finalized_header,omitempty"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
NextSyncCommitteeBranch []string `json:"next_sync_committee_branch,omitempty"`
|
||||
FinalityBranch []string `json:"finality_branch,omitempty"`
|
||||
SignatureSlot string `json:"signature_slot"`
|
||||
}
|
||||
|
||||
type LightClientFinalityUpdate struct {
|
||||
AttestedHeader json.RawMessage `json:"attested_header"`
|
||||
FinalizedHeader json.RawMessage `json:"finalized_header"`
|
||||
FinalityBranch []string `json:"finality_branch"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
SignatureSlot string `json:"signature_slot"`
|
||||
}
|
||||
|
||||
type LightClientOptimisticUpdate struct {
|
||||
AttestedHeader json.RawMessage `json:"attested_header"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
SignatureSlot string `json:"signature_slot"`
|
||||
}
|
||||
|
||||
type LightClientBootstrapResponse struct {
|
||||
Version string `json:"version"`
|
||||
Data *LightClientBootstrap `json:"data"`
|
||||
}
|
||||
|
||||
type LightClientBootstrap struct {
|
||||
Header *BeaconBlockHeader `json:"header"`
|
||||
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
|
||||
CurrentSyncCommitteeBranch []string `json:"current_sync_committee_branch"`
|
||||
}
|
||||
|
||||
type LightClientUpdate struct {
|
||||
AttestedHeader *BeaconBlockHeader `json:"attested_header"`
|
||||
NextSyncCommittee *SyncCommittee `json:"next_sync_committee,omitempty"`
|
||||
FinalizedHeader *BeaconBlockHeader `json:"finalized_header,omitempty"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
NextSyncCommitteeBranch []string `json:"next_sync_committee_branch,omitempty"`
|
||||
FinalityBranch []string `json:"finality_branch,omitempty"`
|
||||
SignatureSlot string `json:"signature_slot"`
|
||||
}
|
||||
|
||||
type LightClientUpdateWithVersion struct {
|
||||
type LightClientUpdateResponse struct {
|
||||
Version string `json:"version"`
|
||||
Data *LightClientUpdate `json:"data"`
|
||||
}
|
||||
|
||||
type LightClientUpdatesByRangeResponse struct {
|
||||
Updates []*LightClientUpdateWithVersion `json:"updates"`
|
||||
type LightClientFinalityUpdateResponse struct {
|
||||
Version string `json:"version"`
|
||||
Data *LightClientFinalityUpdate `json:"data"`
|
||||
}
|
||||
|
||||
type LightClientOptimisticUpdateResponse struct {
|
||||
Version string `json:"version"`
|
||||
Data *LightClientOptimisticUpdate `json:"data"`
|
||||
}
|
||||
|
||||
type LightClientUpdatesByRangeResponse struct {
|
||||
Updates []*LightClientUpdateResponse `json:"updates"`
|
||||
}
|
||||
|
||||
@@ -7,7 +7,8 @@ import (
|
||||
)
|
||||
|
||||
type AggregateAttestationResponse struct {
|
||||
Data *Attestation `json:"data"`
|
||||
Version string `json:"version,omitempty"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
}
|
||||
|
||||
type SubmitContributionAndProofsRequest struct {
|
||||
@@ -15,7 +16,7 @@ type SubmitContributionAndProofsRequest struct {
|
||||
}
|
||||
|
||||
type SubmitAggregateAndProofsRequest struct {
|
||||
Data []*SignedAggregateAttestationAndProof `json:"data"`
|
||||
Data []json.RawMessage `json:"data"`
|
||||
}
|
||||
|
||||
type SubmitSyncCommitteeSubscriptionsRequest struct {
|
||||
|
||||
@@ -29,6 +29,13 @@ type Attestation struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
type AttestationElectra struct {
|
||||
AggregationBits string `json:"aggregation_bits"`
|
||||
Data *AttestationData `json:"data"`
|
||||
Signature string `json:"signature"`
|
||||
CommitteeBits string `json:"committee_bits"`
|
||||
}
|
||||
|
||||
type AttestationData struct {
|
||||
Slot string `json:"slot"`
|
||||
CommitteeIndex string `json:"index"`
|
||||
@@ -78,6 +85,17 @@ type AggregateAttestationAndProof struct {
|
||||
SelectionProof string `json:"selection_proof"`
|
||||
}
|
||||
|
||||
type SignedAggregateAttestationAndProofElectra struct {
|
||||
Message *AggregateAttestationAndProofElectra `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
type AggregateAttestationAndProofElectra struct {
|
||||
AggregatorIndex string `json:"aggregator_index"`
|
||||
Aggregate *AttestationElectra `json:"aggregate"`
|
||||
SelectionProof string `json:"selection_proof"`
|
||||
}
|
||||
|
||||
type SyncCommitteeSubscription struct {
|
||||
ValidatorIndex string `json:"validator_index"`
|
||||
SyncCommitteeIndices []string `json:"sync_committee_indices"`
|
||||
@@ -178,6 +196,11 @@ type AttesterSlashing struct {
|
||||
Attestation2 *IndexedAttestation `json:"attestation_2"`
|
||||
}
|
||||
|
||||
type AttesterSlashingElectra struct {
|
||||
Attestation1 *IndexedAttestationElectra `json:"attestation_1"`
|
||||
Attestation2 *IndexedAttestationElectra `json:"attestation_2"`
|
||||
}
|
||||
|
||||
type Deposit struct {
|
||||
Proof []string `json:"proof"`
|
||||
Data *DepositData `json:"data"`
|
||||
@@ -196,6 +219,12 @@ type IndexedAttestation struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
type IndexedAttestationElectra struct {
|
||||
AttestingIndices []string `json:"attesting_indices"`
|
||||
Data *AttestationData `json:"data"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
type SyncAggregate struct {
|
||||
SyncCommitteeBits string `json:"sync_committee_bits"`
|
||||
SyncCommitteeSignature string `json:"sync_committee_signature"`
|
||||
@@ -207,3 +236,42 @@ type Withdrawal struct {
|
||||
ExecutionAddress string `json:"address"`
|
||||
Amount string `json:"amount"`
|
||||
}
|
||||
|
||||
type DepositRequest struct {
|
||||
Pubkey string `json:"pubkey"`
|
||||
WithdrawalCredentials string `json:"withdrawal_credentials"`
|
||||
Amount string `json:"amount"`
|
||||
Signature string `json:"signature"`
|
||||
Index string `json:"index"`
|
||||
}
|
||||
|
||||
type WithdrawalRequest struct {
|
||||
SourceAddress string `json:"source_address"`
|
||||
ValidatorPubkey string `json:"validator_pubkey"`
|
||||
Amount string `json:"amount"`
|
||||
}
|
||||
|
||||
type ConsolidationRequest struct {
|
||||
SourceAddress string `json:"source_address"`
|
||||
SourcePubkey string `json:"source_pubkey"`
|
||||
TargetPubkey string `json:"target_pubkey"`
|
||||
}
|
||||
|
||||
type PendingDeposit struct {
|
||||
Pubkey string `json:"pubkey"`
|
||||
WithdrawalCredentials string `json:"withdrawal_credentials"`
|
||||
Amount string `json:"amount"`
|
||||
Signature string `json:"signature"`
|
||||
Slot string `json:"slot"`
|
||||
}
|
||||
|
||||
type PendingPartialWithdrawal struct {
|
||||
Index string `json:"index"`
|
||||
Amount string `json:"amount"`
|
||||
WithdrawableEpoch string `json:"withdrawable_epoch"`
|
||||
}
|
||||
|
||||
type PendingConsolidation struct {
|
||||
SourceIndex string `json:"source_index"`
|
||||
TargetIndex string `json:"target_index"`
|
||||
}
|
||||
|
||||
@@ -140,3 +140,43 @@ type BeaconStateDeneb struct {
|
||||
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
|
||||
HistoricalSummaries []*HistoricalSummary `json:"historical_summaries"`
|
||||
}
|
||||
|
||||
type BeaconStateElectra struct {
|
||||
GenesisTime string `json:"genesis_time"`
|
||||
GenesisValidatorsRoot string `json:"genesis_validators_root"`
|
||||
Slot string `json:"slot"`
|
||||
Fork *Fork `json:"fork"`
|
||||
LatestBlockHeader *BeaconBlockHeader `json:"latest_block_header"`
|
||||
BlockRoots []string `json:"block_roots"`
|
||||
StateRoots []string `json:"state_roots"`
|
||||
HistoricalRoots []string `json:"historical_roots"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Eth1DataVotes []*Eth1Data `json:"eth1_data_votes"`
|
||||
Eth1DepositIndex string `json:"eth1_deposit_index"`
|
||||
Validators []*Validator `json:"validators"`
|
||||
Balances []string `json:"balances"`
|
||||
RandaoMixes []string `json:"randao_mixes"`
|
||||
Slashings []string `json:"slashings"`
|
||||
PreviousEpochParticipation []string `json:"previous_epoch_participation"`
|
||||
CurrentEpochParticipation []string `json:"current_epoch_participation"`
|
||||
JustificationBits string `json:"justification_bits"`
|
||||
PreviousJustifiedCheckpoint *Checkpoint `json:"previous_justified_checkpoint"`
|
||||
CurrentJustifiedCheckpoint *Checkpoint `json:"current_justified_checkpoint"`
|
||||
FinalizedCheckpoint *Checkpoint `json:"finalized_checkpoint"`
|
||||
InactivityScores []string `json:"inactivity_scores"`
|
||||
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
|
||||
NextSyncCommittee *SyncCommittee `json:"next_sync_committee"`
|
||||
LatestExecutionPayloadHeader *ExecutionPayloadHeaderElectra `json:"latest_execution_payload_header"`
|
||||
NextWithdrawalIndex string `json:"next_withdrawal_index"`
|
||||
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
|
||||
HistoricalSummaries []*HistoricalSummary `json:"historical_summaries"`
|
||||
DepositRequestsStartIndex string `json:"deposit_requests_start_index"`
|
||||
DepositBalanceToConsume string `json:"deposit_balance_to_consume"`
|
||||
ExitBalanceToConsume string `json:"exit_balance_to_consume"`
|
||||
EarliestExitEpoch string `json:"earliest_exit_epoch"`
|
||||
ConsolidationBalanceToConsume string `json:"consolidation_balance_to_consume"`
|
||||
EarliestConsolidationEpoch string `json:"earliest_consolidation_epoch"`
|
||||
PendingDeposits []*PendingDeposit `json:"pending_deposits"`
|
||||
PendingPartialWithdrawals []*PendingPartialWithdrawal `json:"pending_partial_withdrawals"`
|
||||
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
|
||||
}
|
||||
|
||||
@@ -4,26 +4,25 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"feed.go",
|
||||
"interface.go",
|
||||
"subscription.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/async/event",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//time/mclock:go_default_library"],
|
||||
deps = [
|
||||
"//time/mclock:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//event:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
"example_feed_test.go",
|
||||
"example_scope_test.go",
|
||||
"example_subscription_test.go",
|
||||
"feed_test.go",
|
||||
"subscription_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
deps = ["//testing/require:go_default_library"],
|
||||
)
|
||||
|
||||
@@ -1,73 +0,0 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package event_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
)
|
||||
|
||||
func ExampleFeed_acknowledgedEvents() {
|
||||
// This example shows how the return value of Send can be used for request/reply
|
||||
// interaction between event consumers and producers.
|
||||
var feed event.Feed
|
||||
type ackedEvent struct {
|
||||
i int
|
||||
ack chan<- struct{}
|
||||
}
|
||||
|
||||
// Consumers wait for events on the feed and acknowledge processing.
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
for i := 0; i < 3; i++ {
|
||||
ch := make(chan ackedEvent, 100)
|
||||
sub := feed.Subscribe(ch)
|
||||
go func() {
|
||||
defer sub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case ev := <-ch:
|
||||
fmt.Println(ev.i) // "process" the event
|
||||
ev.ack <- struct{}{}
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// The producer sends values of type ackedEvent with increasing values of i.
|
||||
// It waits for all consumers to acknowledge before sending the next event.
|
||||
for i := 0; i < 3; i++ {
|
||||
acksignal := make(chan struct{})
|
||||
n := feed.Send(ackedEvent{i, acksignal})
|
||||
for ack := 0; ack < n; ack++ {
|
||||
<-acksignal
|
||||
}
|
||||
}
|
||||
// Output:
|
||||
// 0
|
||||
// 0
|
||||
// 0
|
||||
// 1
|
||||
// 1
|
||||
// 1
|
||||
// 2
|
||||
// 2
|
||||
// 2
|
||||
}
|
||||
@@ -14,241 +14,12 @@
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package event contains an event feed implementation for process communication.
|
||||
package event
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sync"
|
||||
geth_event "github.com/ethereum/go-ethereum/event"
|
||||
)
|
||||
|
||||
var errBadChannel = errors.New("event: Subscribe argument does not have sendable channel type")
|
||||
|
||||
// Feed implements one-to-many subscriptions where the carrier of events is a channel.
|
||||
// Values sent to a Feed are delivered to all subscribed channels simultaneously.
|
||||
//
|
||||
// Feeds can only be used with a single type. The type is determined by the first Send or
|
||||
// Subscribe operation. Subsequent calls to these methods panic if the type does not
|
||||
// match.
|
||||
//
|
||||
// The zero value is ready to use.
|
||||
type Feed struct {
|
||||
once sync.Once // ensures that init only runs once
|
||||
sendLock chan struct{} // sendLock has a one-element buffer and is empty when held.It protects sendCases.
|
||||
removeSub chan interface{} // interrupts Send
|
||||
sendCases caseList // the active set of select cases used by Send
|
||||
|
||||
// The inbox holds newly subscribed channels until they are added to sendCases.
|
||||
mu sync.Mutex
|
||||
inbox caseList
|
||||
etype reflect.Type
|
||||
}
|
||||
|
||||
// This is the index of the first actual subscription channel in sendCases.
|
||||
// sendCases[0] is a SelectRecv case for the removeSub channel.
|
||||
const firstSubSendCase = 1
|
||||
|
||||
type feedTypeError struct {
|
||||
got, want reflect.Type
|
||||
op string
|
||||
}
|
||||
|
||||
func (e feedTypeError) Error() string {
|
||||
return "event: wrong type in " + e.op + " got " + e.got.String() + ", want " + e.want.String()
|
||||
}
|
||||
|
||||
func (f *Feed) init() {
|
||||
f.removeSub = make(chan interface{})
|
||||
f.sendLock = make(chan struct{}, 1)
|
||||
f.sendLock <- struct{}{}
|
||||
f.sendCases = caseList{{Chan: reflect.ValueOf(f.removeSub), Dir: reflect.SelectRecv}}
|
||||
}
|
||||
|
||||
// Subscribe adds a channel to the feed. Future sends will be delivered on the channel
|
||||
// until the subscription is canceled. All channels added must have the same element type.
|
||||
//
|
||||
// The channel should have ample buffer space to avoid blocking other subscribers.
|
||||
// Slow subscribers are not dropped.
|
||||
func (f *Feed) Subscribe(channel interface{}) Subscription {
|
||||
f.once.Do(f.init)
|
||||
|
||||
chanval := reflect.ValueOf(channel)
|
||||
chantyp := chanval.Type()
|
||||
if chantyp.Kind() != reflect.Chan || chantyp.ChanDir()&reflect.SendDir == 0 {
|
||||
panic(errBadChannel)
|
||||
}
|
||||
sub := &feedSub{feed: f, channel: chanval, err: make(chan error, 1)}
|
||||
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
if !f.typecheck(chantyp.Elem()) {
|
||||
panic(feedTypeError{op: "Subscribe", got: chantyp, want: reflect.ChanOf(reflect.SendDir, f.etype)})
|
||||
}
|
||||
// Add the select case to the inbox.
|
||||
// The next Send will add it to f.sendCases.
|
||||
cas := reflect.SelectCase{Dir: reflect.SelectSend, Chan: chanval}
|
||||
f.inbox = append(f.inbox, cas)
|
||||
return sub
|
||||
}
|
||||
|
||||
// note: callers must hold f.mu
|
||||
func (f *Feed) typecheck(typ reflect.Type) bool {
|
||||
if f.etype == nil {
|
||||
f.etype = typ
|
||||
return true
|
||||
}
|
||||
// In the event the feed's type is an actual interface, we
|
||||
// perform an interface conformance check here.
|
||||
if f.etype.Kind() == reflect.Interface && typ.Implements(f.etype) {
|
||||
return true
|
||||
}
|
||||
return f.etype == typ
|
||||
}
|
||||
|
||||
func (f *Feed) remove(sub *feedSub) {
|
||||
// Delete from inbox first, which covers channels
|
||||
// that have not been added to f.sendCases yet.
|
||||
ch := sub.channel.Interface()
|
||||
f.mu.Lock()
|
||||
index := f.inbox.find(ch)
|
||||
if index != -1 {
|
||||
f.inbox = f.inbox.delete(index)
|
||||
f.mu.Unlock()
|
||||
return
|
||||
}
|
||||
f.mu.Unlock()
|
||||
|
||||
select {
|
||||
case f.removeSub <- ch:
|
||||
// Send will remove the channel from f.sendCases.
|
||||
case <-f.sendLock:
|
||||
// No Send is in progress, delete the channel now that we have the send lock.
|
||||
f.sendCases = f.sendCases.delete(f.sendCases.find(ch))
|
||||
f.sendLock <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Send delivers to all subscribed channels simultaneously.
|
||||
// It returns the number of subscribers that the value was sent to.
|
||||
func (f *Feed) Send(value interface{}) (nsent int) {
|
||||
rvalue := reflect.ValueOf(value)
|
||||
|
||||
f.once.Do(f.init)
|
||||
<-f.sendLock
|
||||
|
||||
// Add new cases from the inbox after taking the send lock.
|
||||
f.mu.Lock()
|
||||
f.sendCases = append(f.sendCases, f.inbox...)
|
||||
f.inbox = nil
|
||||
|
||||
if !f.typecheck(rvalue.Type()) {
|
||||
f.sendLock <- struct{}{}
|
||||
f.mu.Unlock()
|
||||
panic(feedTypeError{op: "Send", got: rvalue.Type(), want: f.etype})
|
||||
}
|
||||
f.mu.Unlock()
|
||||
|
||||
// Set the sent value on all channels.
|
||||
for i := firstSubSendCase; i < len(f.sendCases); i++ {
|
||||
f.sendCases[i].Send = rvalue
|
||||
}
|
||||
|
||||
// Send until all channels except removeSub have been chosen. 'cases' tracks a prefix
|
||||
// of sendCases. When a send succeeds, the corresponding case moves to the end of
|
||||
// 'cases' and it shrinks by one element.
|
||||
cases := f.sendCases
|
||||
for {
|
||||
// Fast path: try sending without blocking before adding to the select set.
|
||||
// This should usually succeed if subscribers are fast enough and have free
|
||||
// buffer space.
|
||||
for i := firstSubSendCase; i < len(cases); i++ {
|
||||
if cases[i].Chan.TrySend(rvalue) {
|
||||
nsent++
|
||||
cases = cases.deactivate(i)
|
||||
i--
|
||||
}
|
||||
}
|
||||
if len(cases) == firstSubSendCase {
|
||||
break
|
||||
}
|
||||
// Select on all the receivers, waiting for them to unblock.
|
||||
chosen, recv, _ := reflect.Select(cases)
|
||||
if chosen == 0 /* <-f.removeSub */ {
|
||||
index := f.sendCases.find(recv.Interface())
|
||||
f.sendCases = f.sendCases.delete(index)
|
||||
if index >= 0 && index < len(cases) {
|
||||
// Shrink 'cases' too because the removed case was still active.
|
||||
cases = f.sendCases[:len(cases)-1]
|
||||
}
|
||||
} else {
|
||||
cases = cases.deactivate(chosen)
|
||||
nsent++
|
||||
}
|
||||
}
|
||||
|
||||
// Forget about the sent value and hand off the send lock.
|
||||
for i := firstSubSendCase; i < len(f.sendCases); i++ {
|
||||
f.sendCases[i].Send = reflect.Value{}
|
||||
}
|
||||
f.sendLock <- struct{}{}
|
||||
return nsent
|
||||
}
|
||||
|
||||
type feedSub struct {
|
||||
feed *Feed
|
||||
channel reflect.Value
|
||||
errOnce sync.Once
|
||||
err chan error
|
||||
}
|
||||
|
||||
// Unsubscribe remove feed subscription.
|
||||
func (sub *feedSub) Unsubscribe() {
|
||||
sub.errOnce.Do(func() {
|
||||
sub.feed.remove(sub)
|
||||
close(sub.err)
|
||||
})
|
||||
}
|
||||
|
||||
// Err returns error channel.
|
||||
func (sub *feedSub) Err() <-chan error {
|
||||
return sub.err
|
||||
}
|
||||
|
||||
type caseList []reflect.SelectCase
|
||||
|
||||
// find returns the index of a case containing the given channel.
|
||||
func (cs caseList) find(channel interface{}) int {
|
||||
return slices.IndexFunc(cs, func(selectCase reflect.SelectCase) bool {
|
||||
return selectCase.Chan.Interface() == channel
|
||||
})
|
||||
}
|
||||
|
||||
// delete removes the given case from cs.
|
||||
func (cs caseList) delete(index int) caseList {
|
||||
return append(cs[:index], cs[index+1:]...)
|
||||
}
|
||||
|
||||
// deactivate moves the case at index into the non-accessible portion of the cs slice.
|
||||
func (cs caseList) deactivate(index int) caseList {
|
||||
last := len(cs) - 1
|
||||
cs[index], cs[last] = cs[last], cs[index]
|
||||
return cs[:last]
|
||||
}
|
||||
|
||||
// func (cs caseList) String() string {
|
||||
// s := "["
|
||||
// for i, cas := range cs {
|
||||
// if i != 0 {
|
||||
// s += ", "
|
||||
// }
|
||||
// switch cas.Dir {
|
||||
// case reflect.SelectSend:
|
||||
// s += fmt.Sprintf("%v<-", cas.Chan.Interface())
|
||||
// case reflect.SelectRecv:
|
||||
// s += fmt.Sprintf("<-%v", cas.Chan.Interface())
|
||||
// }
|
||||
// }
|
||||
// return s + "]"
|
||||
// }
|
||||
// Feed is a re-export of the go-ethereum event feed.
|
||||
type Feed = geth_event.Feed
|
||||
type Subscription = geth_event.Subscription
|
||||
|
||||
@@ -1,509 +0,0 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package event
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
)
|
||||
|
||||
func TestFeedPanics(t *testing.T) {
|
||||
{
|
||||
var f Feed
|
||||
f.Send(2)
|
||||
want := feedTypeError{op: "Send", got: reflect.TypeOf(uint64(0)), want: reflect.TypeOf(0)}
|
||||
assert.NoError(t, checkPanic(want, func() { f.Send(uint64(2)) }))
|
||||
// Validate it doesn't deadlock.
|
||||
assert.NoError(t, checkPanic(want, func() { f.Send(uint64(2)) }))
|
||||
}
|
||||
{
|
||||
var f Feed
|
||||
ch := make(chan int)
|
||||
f.Subscribe(ch)
|
||||
want := feedTypeError{op: "Send", got: reflect.TypeOf(uint64(0)), want: reflect.TypeOf(0)}
|
||||
assert.NoError(t, checkPanic(want, func() { f.Send(uint64(2)) }))
|
||||
}
|
||||
{
|
||||
var f Feed
|
||||
f.Send(2)
|
||||
want := feedTypeError{op: "Subscribe", got: reflect.TypeOf(make(chan uint64)), want: reflect.TypeOf(make(chan<- int))}
|
||||
assert.NoError(t, checkPanic(want, func() { f.Subscribe(make(chan uint64)) }))
|
||||
}
|
||||
{
|
||||
var f Feed
|
||||
assert.NoError(t, checkPanic(errBadChannel, func() { f.Subscribe(make(<-chan int)) }))
|
||||
}
|
||||
{
|
||||
var f Feed
|
||||
assert.NoError(t, checkPanic(errBadChannel, func() { f.Subscribe(0) }))
|
||||
}
|
||||
}
|
||||
|
||||
func checkPanic(want error, fn func()) (err error) {
|
||||
defer func() {
|
||||
panicResult := recover()
|
||||
if panicResult == nil {
|
||||
err = fmt.Errorf("didn't panic")
|
||||
} else if !reflect.DeepEqual(panicResult, want) {
|
||||
err = fmt.Errorf("panicked with wrong error: got %q, want %q", panicResult, want)
|
||||
}
|
||||
}()
|
||||
fn()
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestFeed(t *testing.T) {
|
||||
var feed Feed
|
||||
var done, subscribed sync.WaitGroup
|
||||
subscriber := func(i int) {
|
||||
defer done.Done()
|
||||
|
||||
subchan := make(chan int)
|
||||
sub := feed.Subscribe(subchan)
|
||||
timeout := time.NewTimer(2 * time.Second)
|
||||
subscribed.Done()
|
||||
|
||||
select {
|
||||
case v := <-subchan:
|
||||
if v != 1 {
|
||||
t.Errorf("%d: received value %d, want 1", i, v)
|
||||
}
|
||||
case <-timeout.C:
|
||||
t.Errorf("%d: receive timeout", i)
|
||||
}
|
||||
|
||||
sub.Unsubscribe()
|
||||
select {
|
||||
case _, ok := <-sub.Err():
|
||||
if ok {
|
||||
t.Errorf("%d: error channel not closed after unsubscribe", i)
|
||||
}
|
||||
case <-timeout.C:
|
||||
t.Errorf("%d: unsubscribe timeout", i)
|
||||
}
|
||||
}
|
||||
|
||||
const n = 1000
|
||||
done.Add(n)
|
||||
subscribed.Add(n)
|
||||
for i := 0; i < n; i++ {
|
||||
go subscriber(i)
|
||||
}
|
||||
subscribed.Wait()
|
||||
if nsent := feed.Send(1); nsent != n {
|
||||
t.Errorf("first send delivered %d times, want %d", nsent, n)
|
||||
}
|
||||
if nsent := feed.Send(2); nsent != 0 {
|
||||
t.Errorf("second send delivered %d times, want 0", nsent)
|
||||
}
|
||||
done.Wait()
|
||||
}
|
||||
|
||||
func TestFeedSubscribeSameChannel(t *testing.T) {
|
||||
var (
|
||||
feed Feed
|
||||
done sync.WaitGroup
|
||||
ch = make(chan int)
|
||||
sub1 = feed.Subscribe(ch)
|
||||
sub2 = feed.Subscribe(ch)
|
||||
_ = feed.Subscribe(ch)
|
||||
)
|
||||
expectSends := func(value, n int) {
|
||||
if nsent := feed.Send(value); nsent != n {
|
||||
t.Errorf("send delivered %d times, want %d", nsent, n)
|
||||
}
|
||||
done.Done()
|
||||
}
|
||||
expectRecv := func(wantValue, n int) {
|
||||
for i := 0; i < n; i++ {
|
||||
if v := <-ch; v != wantValue {
|
||||
t.Errorf("received %d, want %d", v, wantValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
done.Add(1)
|
||||
go expectSends(1, 3)
|
||||
expectRecv(1, 3)
|
||||
done.Wait()
|
||||
|
||||
sub1.Unsubscribe()
|
||||
|
||||
done.Add(1)
|
||||
go expectSends(2, 2)
|
||||
expectRecv(2, 2)
|
||||
done.Wait()
|
||||
|
||||
sub2.Unsubscribe()
|
||||
|
||||
done.Add(1)
|
||||
go expectSends(3, 1)
|
||||
expectRecv(3, 1)
|
||||
done.Wait()
|
||||
}
|
||||
|
||||
func TestFeedSubscribeBlockedPost(_ *testing.T) {
|
||||
var (
|
||||
feed Feed
|
||||
nsends = 2000
|
||||
ch1 = make(chan int)
|
||||
ch2 = make(chan int)
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
defer wg.Wait()
|
||||
|
||||
feed.Subscribe(ch1)
|
||||
wg.Add(nsends)
|
||||
for i := 0; i < nsends; i++ {
|
||||
go func() {
|
||||
feed.Send(99)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
sub2 := feed.Subscribe(ch2)
|
||||
defer sub2.Unsubscribe()
|
||||
|
||||
// We're done when ch1 has received N times.
|
||||
// The number of receives on ch2 depends on scheduling.
|
||||
for i := 0; i < nsends; {
|
||||
select {
|
||||
case <-ch1:
|
||||
i++
|
||||
case <-ch2:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFeedUnsubscribeBlockedPost(_ *testing.T) {
|
||||
var (
|
||||
feed Feed
|
||||
nsends = 200
|
||||
chans = make([]chan int, 2000)
|
||||
subs = make([]Subscription, len(chans))
|
||||
bchan = make(chan int)
|
||||
bsub = feed.Subscribe(bchan)
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
for i := range chans {
|
||||
chans[i] = make(chan int, nsends)
|
||||
}
|
||||
|
||||
// Queue up some Sends. None of these can make progress while bchan isn't read.
|
||||
wg.Add(nsends)
|
||||
for i := 0; i < nsends; i++ {
|
||||
go func() {
|
||||
feed.Send(99)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
// Subscribe the other channels.
|
||||
for i, ch := range chans {
|
||||
subs[i] = feed.Subscribe(ch)
|
||||
}
|
||||
// Unsubscribe them again.
|
||||
for _, sub := range subs {
|
||||
sub.Unsubscribe()
|
||||
}
|
||||
// Unblock the Sends.
|
||||
bsub.Unsubscribe()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Checks that unsubscribing a channel during Send works even if that
|
||||
// channel has already been sent on.
|
||||
func TestFeedUnsubscribeSentChan(_ *testing.T) {
|
||||
var (
|
||||
feed Feed
|
||||
ch1 = make(chan int)
|
||||
ch2 = make(chan int)
|
||||
sub1 = feed.Subscribe(ch1)
|
||||
sub2 = feed.Subscribe(ch2)
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
defer sub2.Unsubscribe()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
feed.Send(0)
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
// Wait for the value on ch1.
|
||||
<-ch1
|
||||
// Unsubscribe ch1, removing it from the send cases.
|
||||
sub1.Unsubscribe()
|
||||
|
||||
// Receive ch2, finishing Send.
|
||||
<-ch2
|
||||
wg.Wait()
|
||||
|
||||
// Send again. This should send to ch2 only, so the wait group will unblock
|
||||
// as soon as a value is received on ch2.
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
feed.Send(0)
|
||||
wg.Done()
|
||||
}()
|
||||
<-ch2
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestFeedUnsubscribeFromInbox(t *testing.T) {
|
||||
var (
|
||||
feed Feed
|
||||
ch1 = make(chan int)
|
||||
ch2 = make(chan int)
|
||||
sub1 = feed.Subscribe(ch1)
|
||||
sub2 = feed.Subscribe(ch1)
|
||||
sub3 = feed.Subscribe(ch2)
|
||||
)
|
||||
assert.Equal(t, 3, len(feed.inbox))
|
||||
assert.Equal(t, 1, len(feed.sendCases), "sendCases is non-empty after unsubscribe")
|
||||
|
||||
sub1.Unsubscribe()
|
||||
sub2.Unsubscribe()
|
||||
sub3.Unsubscribe()
|
||||
assert.Equal(t, 0, len(feed.inbox), "Inbox is non-empty after unsubscribe")
|
||||
assert.Equal(t, 1, len(feed.sendCases), "sendCases is non-empty after unsubscribe")
|
||||
}
|
||||
|
||||
func BenchmarkFeedSend1000(b *testing.B) {
|
||||
var (
|
||||
done sync.WaitGroup
|
||||
feed Feed
|
||||
nsubs = 1000
|
||||
)
|
||||
subscriber := func(ch <-chan int) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
<-ch
|
||||
}
|
||||
done.Done()
|
||||
}
|
||||
done.Add(nsubs)
|
||||
for i := 0; i < nsubs; i++ {
|
||||
ch := make(chan int, 200)
|
||||
feed.Subscribe(ch)
|
||||
go subscriber(ch)
|
||||
}
|
||||
|
||||
// The actual benchmark.
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if feed.Send(i) != nsubs {
|
||||
panic("wrong number of sends")
|
||||
}
|
||||
}
|
||||
|
||||
b.StopTimer()
|
||||
done.Wait()
|
||||
}
|
||||
|
||||
func TestFeed_Send(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
evFeed *Feed
|
||||
testSetup func(fd *Feed, t *testing.T, o interface{})
|
||||
obj interface{}
|
||||
expectPanic bool
|
||||
}{
|
||||
{
|
||||
name: "normal struct",
|
||||
evFeed: new(Feed),
|
||||
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
|
||||
testChan := make(chan testFeedWithPointer, 1)
|
||||
fd.Subscribe(testChan)
|
||||
},
|
||||
obj: testFeedWithPointer{
|
||||
a: new(uint64),
|
||||
b: new(string),
|
||||
},
|
||||
expectPanic: false,
|
||||
},
|
||||
{
|
||||
name: "un-implemented interface",
|
||||
evFeed: new(Feed),
|
||||
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
|
||||
testChan := make(chan testFeedIface, 1)
|
||||
fd.Subscribe(testChan)
|
||||
},
|
||||
obj: testFeedWithPointer{
|
||||
a: new(uint64),
|
||||
b: new(string),
|
||||
},
|
||||
expectPanic: true,
|
||||
},
|
||||
{
|
||||
name: "semi-implemented interface",
|
||||
evFeed: new(Feed),
|
||||
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
|
||||
testChan := make(chan testFeedIface, 1)
|
||||
fd.Subscribe(testChan)
|
||||
},
|
||||
obj: testFeed2{
|
||||
a: 0,
|
||||
b: "",
|
||||
c: []byte{'A'},
|
||||
},
|
||||
expectPanic: true,
|
||||
},
|
||||
{
|
||||
name: "fully-implemented interface",
|
||||
evFeed: new(Feed),
|
||||
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
|
||||
testChan := make(chan testFeedIface)
|
||||
// Make it unbuffered to allow message to
|
||||
// pass through
|
||||
go func() {
|
||||
a := <-testChan
|
||||
if !reflect.DeepEqual(a, o) {
|
||||
t.Errorf("Got = %v, want = %v", a, o)
|
||||
}
|
||||
}()
|
||||
fd.Subscribe(testChan)
|
||||
},
|
||||
obj: testFeed{
|
||||
a: 0,
|
||||
b: "",
|
||||
},
|
||||
expectPanic: false,
|
||||
},
|
||||
{
|
||||
name: "fully-implemented interface with additional methods",
|
||||
evFeed: new(Feed),
|
||||
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
|
||||
testChan := make(chan testFeedIface)
|
||||
// Make it unbuffered to allow message to
|
||||
// pass through
|
||||
go func() {
|
||||
a := <-testChan
|
||||
if !reflect.DeepEqual(a, o) {
|
||||
t.Errorf("Got = %v, want = %v", a, o)
|
||||
}
|
||||
}()
|
||||
fd.Subscribe(testChan)
|
||||
},
|
||||
obj: testFeed3{
|
||||
a: 0,
|
||||
b: "",
|
||||
c: []byte{'A'},
|
||||
d: []byte{'B'},
|
||||
},
|
||||
expectPanic: false,
|
||||
},
|
||||
{
|
||||
name: "concrete types implementing the same interface",
|
||||
evFeed: new(Feed),
|
||||
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
|
||||
testChan := make(chan testFeed, 1)
|
||||
// Make it unbuffered to allow message to
|
||||
// pass through
|
||||
go func() {
|
||||
a := <-testChan
|
||||
if !reflect.DeepEqual(a, o) {
|
||||
t.Errorf("Got = %v, want = %v", a, o)
|
||||
}
|
||||
}()
|
||||
fd.Subscribe(testChan)
|
||||
},
|
||||
obj: testFeed3{
|
||||
a: 0,
|
||||
b: "",
|
||||
c: []byte{'A'},
|
||||
d: []byte{'B'},
|
||||
},
|
||||
expectPanic: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if !tt.expectPanic {
|
||||
t.Errorf("panic triggered when unexpected: %v", r)
|
||||
}
|
||||
} else {
|
||||
if tt.expectPanic {
|
||||
t.Error("panic not triggered when expected")
|
||||
}
|
||||
}
|
||||
}()
|
||||
tt.testSetup(tt.evFeed, t, tt.obj)
|
||||
if gotNsent := tt.evFeed.Send(tt.obj); gotNsent != 1 {
|
||||
t.Errorf("Send() = %v, want %v", gotNsent, 1)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// The following objects below are a collection of different
|
||||
// struct types to test with.
|
||||
type testFeed struct {
|
||||
a uint64
|
||||
b string
|
||||
}
|
||||
|
||||
func (testFeed) method1() {
|
||||
|
||||
}
|
||||
|
||||
func (testFeed) method2() {
|
||||
|
||||
}
|
||||
|
||||
type testFeedWithPointer struct {
|
||||
a *uint64
|
||||
b *string
|
||||
}
|
||||
|
||||
type testFeed2 struct {
|
||||
a uint64
|
||||
b string
|
||||
c []byte
|
||||
}
|
||||
|
||||
func (testFeed2) method1() {
|
||||
|
||||
}
|
||||
|
||||
type testFeed3 struct {
|
||||
a uint64
|
||||
b string
|
||||
c, d []byte
|
||||
}
|
||||
|
||||
func (testFeed3) method1() {
|
||||
|
||||
}
|
||||
|
||||
func (testFeed3) method2() {
|
||||
|
||||
}
|
||||
|
||||
func (testFeed3) method3() {
|
||||
|
||||
}
|
||||
|
||||
type testFeedIface interface {
|
||||
method1()
|
||||
method2()
|
||||
}
|
||||
8
async/event/interface.go
Normal file
8
async/event/interface.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package event
|
||||
|
||||
// SubscriberSender is an abstract representation of an *event.Feed
|
||||
// to use in describing types that accept or return an *event.Feed.
|
||||
type SubscriberSender interface {
|
||||
Subscribe(channel interface{}) Subscription
|
||||
Send(value interface{}) (nsent int)
|
||||
}
|
||||
@@ -28,25 +28,6 @@ import (
|
||||
// request backoff time.
|
||||
const waitQuotient = 10
|
||||
|
||||
// Subscription represents a stream of events. The carrier of the events is typically a
|
||||
// channel, but isn't part of the interface.
|
||||
//
|
||||
// Subscriptions can fail while established. Failures are reported through an error
|
||||
// channel. It receives a value if there is an issue with the subscription (e.g. the
|
||||
// network connection delivering the events has been closed). Only one value will ever be
|
||||
// sent.
|
||||
//
|
||||
// The error channel is closed when the subscription ends successfully (i.e. when the
|
||||
// source of events is closed). It is also closed when Unsubscribe is called.
|
||||
//
|
||||
// The Unsubscribe method cancels the sending of events. You must call Unsubscribe in all
|
||||
// cases to ensure that resources related to the subscription are released. It can be
|
||||
// called any number of times.
|
||||
type Subscription interface {
|
||||
Err() <-chan error // returns the error channel
|
||||
Unsubscribe() // cancels sending of events, closing the error channel
|
||||
}
|
||||
|
||||
// NewSubscription runs a producer function as a subscription in a new goroutine. The
|
||||
// channel given to the producer is closed when Unsubscribe is called. If fn returns an
|
||||
// error, it is sent on the subscription's error channel.
|
||||
|
||||
@@ -13,7 +13,6 @@ go_library(
|
||||
"head.go",
|
||||
"head_sync_committee_info.go",
|
||||
"init_sync_process_block.go",
|
||||
"lightclient.go",
|
||||
"log.go",
|
||||
"merge_ascii_art.go",
|
||||
"metrics.go",
|
||||
@@ -48,6 +47,7 @@ go_library(
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -81,10 +81,10 @@ go_library(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
@@ -97,7 +97,6 @@ go_library(
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -117,7 +116,6 @@ go_test(
|
||||
"head_test.go",
|
||||
"init_sync_process_block_test.go",
|
||||
"init_test.go",
|
||||
"lightclient_test.go",
|
||||
"log_test.go",
|
||||
"metrics_test.go",
|
||||
"mock_test.go",
|
||||
@@ -174,7 +172,6 @@ go_test(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
|
||||
@@ -12,13 +12,14 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ChainInfoFetcher defines a common interface for methods in blockchain service which
|
||||
@@ -44,7 +45,7 @@ type ForkchoiceFetcher interface {
|
||||
UpdateHead(context.Context, primitives.Slot)
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
InsertNode(context.Context, state.BeaconState, [32]byte) error
|
||||
InsertNode(context.Context, state.BeaconState, consensus_blocks.ROBlock) error
|
||||
ForkChoiceDump(context.Context) (*forkchoice.Dump, error)
|
||||
NewSlot(context.Context, primitives.Slot) error
|
||||
ProposerBoost() [32]byte
|
||||
@@ -203,7 +204,7 @@ func (s *Service) HeadState(ctx context.Context) (state.BeaconState, error) {
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
ok := s.hasHeadState()
|
||||
span.AddAttributes(trace.BoolAttribute("cache_hit", ok))
|
||||
span.SetAttributes(trace.BoolAttribute("cache_hit", ok))
|
||||
|
||||
if ok {
|
||||
return s.headState(ctx), nil
|
||||
@@ -225,7 +226,7 @@ func (s *Service) HeadStateReadOnly(ctx context.Context) (state.ReadOnlyBeaconSt
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
ok := s.hasHeadState()
|
||||
span.AddAttributes(trace.BoolAttribute("cache_hit", ok))
|
||||
span.SetAttributes(trace.BoolAttribute("cache_hit", ok))
|
||||
|
||||
if ok {
|
||||
return s.headStateReadOnly(ctx), nil
|
||||
@@ -242,7 +243,7 @@ func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch primitives.Ep
|
||||
if !s.hasHeadState() {
|
||||
return []primitives.ValidatorIndex{}, nil
|
||||
}
|
||||
return helpers.ActiveValidatorIndices(ctx, s.headState(ctx), epoch)
|
||||
return helpers.ActiveValidatorIndices(ctx, s.headStateReadOnly(ctx), epoch)
|
||||
}
|
||||
|
||||
// HeadGenesisValidatorsRoot returns genesis validators root of the head state.
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
@@ -22,13 +23,6 @@ func (s *Service) GetProposerHead() [32]byte {
|
||||
return s.cfg.ForkChoiceStore.GetProposerHead()
|
||||
}
|
||||
|
||||
// ShouldOverrideFCU returns the corresponding value from forkchoice
|
||||
func (s *Service) ShouldOverrideFCU() bool {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.ShouldOverrideFCU()
|
||||
}
|
||||
|
||||
// SetForkChoiceGenesisTime sets the genesis time in Forkchoice
|
||||
func (s *Service) SetForkChoiceGenesisTime(timestamp uint64) {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
@@ -51,10 +45,10 @@ func (s *Service) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
}
|
||||
|
||||
// InsertNode is a wrapper for node insertion which is self locked
|
||||
func (s *Service) InsertNode(ctx context.Context, st state.BeaconState, root [32]byte) error {
|
||||
func (s *Service) InsertNode(ctx context.Context, st state.BeaconState, block consensus_blocks.ROBlock) error {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, block)
|
||||
}
|
||||
|
||||
// ForkChoiceDump returns the corresponding value from forkchoice
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
@@ -38,7 +39,7 @@ func prepareForkchoiceState(
|
||||
payloadHash [32]byte,
|
||||
justified *ethpb.Checkpoint,
|
||||
finalized *ethpb.Checkpoint,
|
||||
) (state.BeaconState, [32]byte, error) {
|
||||
) (state.BeaconState, consensus_blocks.ROBlock, error) {
|
||||
blockHeader := ðpb.BeaconBlockHeader{
|
||||
ParentRoot: parentRoot[:],
|
||||
}
|
||||
@@ -59,7 +60,26 @@ func prepareForkchoiceState(
|
||||
|
||||
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
|
||||
st, err := state_native.InitializeFromProtoBellatrix(base)
|
||||
return st, blockRoot, err
|
||||
if err != nil {
|
||||
return nil, consensus_blocks.ROBlock{}, err
|
||||
}
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: slot,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &enginev1.ExecutionPayload{
|
||||
BlockHash: payloadHash[:],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
signed, err := blocks.NewSignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
return nil, consensus_blocks.ROBlock{}, err
|
||||
}
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(signed, blockRoot)
|
||||
return st, roblock, err
|
||||
}
|
||||
|
||||
func TestHeadRoot_Nil(t *testing.T) {
|
||||
@@ -122,9 +142,9 @@ func TestUnrealizedJustifiedBlockHash(t *testing.T) {
|
||||
service := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
ojc := ðpb.Checkpoint{Root: []byte{'j'}}
|
||||
ofc := ðpb.Checkpoint{Root: []byte{'f'}}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
service.cfg.ForkChoiceStore.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) { return []uint64{}, nil })
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 6, Root: [32]byte{'j'}}))
|
||||
|
||||
@@ -316,24 +336,24 @@ func TestService_ChainHeads(t *testing.T) {
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
|
||||
roots, slots := c.ChainHeads()
|
||||
require.Equal(t, 3, len(roots))
|
||||
@@ -413,12 +433,12 @@ func TestService_IsOptimistic(t *testing.T) {
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
|
||||
opt, err := c.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -449,12 +469,12 @@ func TestService_IsOptimisticForRoot(t *testing.T) {
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
|
||||
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -2,7 +2,6 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -21,15 +20,13 @@ import (
|
||||
payloadattribute "github.com/prysmaticlabs/prysm/v5/consensus-types/payload-attribute"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
const blobCommitmentVersionKZG uint8 = 0x01
|
||||
|
||||
var defaultLatestValidHash = bytesutil.PadTo([]byte{0xff}, 32)
|
||||
|
||||
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
|
||||
@@ -39,7 +36,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyForkchoiceUpdate")
|
||||
defer span.End()
|
||||
|
||||
if arg.headBlock.IsNil() {
|
||||
if arg.headBlock == nil || arg.headBlock.IsNil() {
|
||||
log.Error("Head block is nil")
|
||||
return nil, nil
|
||||
}
|
||||
@@ -219,17 +216,25 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
}
|
||||
|
||||
var lastValidHash []byte
|
||||
var parentRoot *common.Hash
|
||||
var versionedHashes []common.Hash
|
||||
var requests *enginev1.ExecutionRequests
|
||||
if blk.Version() >= version.Deneb {
|
||||
var versionedHashes []common.Hash
|
||||
versionedHashes, err = kzgCommitmentsToVersionedHashes(blk.Block().Body())
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get versioned hashes to feed the engine")
|
||||
}
|
||||
pr := common.Hash(blk.Block().ParentRoot())
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, &pr)
|
||||
} else {
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, []common.Hash{}, &common.Hash{} /*empty version hashes and root before Deneb*/)
|
||||
prh := common.Hash(blk.Block().ParentRoot())
|
||||
parentRoot = &prh
|
||||
}
|
||||
if blk.Version() >= version.Electra {
|
||||
requests, err = blk.Block().Body().ExecutionRequests()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get execution requests")
|
||||
}
|
||||
}
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests)
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
newPayloadValidNodeCount.Inc()
|
||||
@@ -402,13 +407,7 @@ func kzgCommitmentsToVersionedHashes(body interfaces.ReadOnlyBeaconBlockBody) ([
|
||||
|
||||
versionedHashes := make([]common.Hash, len(commitments))
|
||||
for i, commitment := range commitments {
|
||||
versionedHashes[i] = ConvertKzgCommitmentToVersionedHash(commitment)
|
||||
versionedHashes[i] = primitives.ConvertKzgCommitmentToVersionedHash(commitment)
|
||||
}
|
||||
return versionedHashes, nil
|
||||
}
|
||||
|
||||
func ConvertKzgCommitmentToVersionedHash(commitment []byte) common.Hash {
|
||||
versionedHash := sha256.Sum256(commitment)
|
||||
versionedHash[0] = blobCommitmentVersionKZG
|
||||
return versionedHash
|
||||
}
|
||||
|
||||
@@ -1135,9 +1135,14 @@ func TestComputePayloadAttribute(t *testing.T) {
|
||||
// Cache hit, advance state, no fee recipient
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
signed, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(signed, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
blockRoot: [32]byte{'a'},
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
}
|
||||
fcu := &fcuConfig{
|
||||
headState: st,
|
||||
|
||||
@@ -12,9 +12,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
payloadattribute "github.com/prysmaticlabs/prysm/v5/consensus-types/payload-attribute"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
func (s *Service) isNewHead(r [32]byte) bool {
|
||||
|
||||
@@ -18,11 +18,11 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// UpdateAndSaveHeadWithBalances updates the beacon state head after getting justified balanced from cache.
|
||||
|
||||
@@ -1,245 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v5/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/migration"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
const (
|
||||
FinalityBranchNumOfLeaves = 6
|
||||
)
|
||||
|
||||
// CreateLightClientFinalityUpdate - implements https://github.com/ethereum/consensus-specs/blob/3d235740e5f1e641d3b160c8688f26e7dc5a1894/specs/altair/light-client/full-node.md#create_light_client_finality_update
|
||||
// def create_light_client_finality_update(update: LightClientUpdate) -> LightClientFinalityUpdate:
|
||||
//
|
||||
// return LightClientFinalityUpdate(
|
||||
// attested_header=update.attested_header,
|
||||
// finalized_header=update.finalized_header,
|
||||
// finality_branch=update.finality_branch,
|
||||
// sync_aggregate=update.sync_aggregate,
|
||||
// signature_slot=update.signature_slot,
|
||||
// )
|
||||
func CreateLightClientFinalityUpdate(update *ethpbv2.LightClientUpdate) *ethpbv2.LightClientFinalityUpdate {
|
||||
return ðpbv2.LightClientFinalityUpdate{
|
||||
AttestedHeader: update.AttestedHeader,
|
||||
FinalizedHeader: update.FinalizedHeader,
|
||||
FinalityBranch: update.FinalityBranch,
|
||||
SyncAggregate: update.SyncAggregate,
|
||||
SignatureSlot: update.SignatureSlot,
|
||||
}
|
||||
}
|
||||
|
||||
// CreateLightClientOptimisticUpdate - implements https://github.com/ethereum/consensus-specs/blob/3d235740e5f1e641d3b160c8688f26e7dc5a1894/specs/altair/light-client/full-node.md#create_light_client_optimistic_update
|
||||
// def create_light_client_optimistic_update(update: LightClientUpdate) -> LightClientOptimisticUpdate:
|
||||
//
|
||||
// return LightClientOptimisticUpdate(
|
||||
// attested_header=update.attested_header,
|
||||
// sync_aggregate=update.sync_aggregate,
|
||||
// signature_slot=update.signature_slot,
|
||||
// )
|
||||
func CreateLightClientOptimisticUpdate(update *ethpbv2.LightClientUpdate) *ethpbv2.LightClientOptimisticUpdate {
|
||||
return ðpbv2.LightClientOptimisticUpdate{
|
||||
AttestedHeader: update.AttestedHeader,
|
||||
SyncAggregate: update.SyncAggregate,
|
||||
SignatureSlot: update.SignatureSlot,
|
||||
}
|
||||
}
|
||||
|
||||
func NewLightClientOptimisticUpdateFromBeaconState(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
attestedState state.BeaconState) (*ethpbv2.LightClientUpdate, error) {
|
||||
// assert compute_epoch_at_slot(attested_state.slot) >= ALTAIR_FORK_EPOCH
|
||||
attestedEpoch := slots.ToEpoch(attestedState.Slot())
|
||||
if attestedEpoch < params.BeaconConfig().AltairForkEpoch {
|
||||
return nil, fmt.Errorf("invalid attested epoch %d", attestedEpoch)
|
||||
}
|
||||
|
||||
// assert sum(block.message.body.sync_aggregate.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS
|
||||
syncAggregate, err := block.Block().Body().SyncAggregate()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get sync aggregate %w", err)
|
||||
}
|
||||
|
||||
if syncAggregate.SyncCommitteeBits.Count() < params.BeaconConfig().MinSyncCommitteeParticipants {
|
||||
return nil, fmt.Errorf("invalid sync committee bits count %d", syncAggregate.SyncCommitteeBits.Count())
|
||||
}
|
||||
|
||||
// assert state.slot == state.latest_block_header.slot
|
||||
if state.Slot() != state.LatestBlockHeader().Slot {
|
||||
return nil, fmt.Errorf("state slot %d not equal to latest block header slot %d", state.Slot(), state.LatestBlockHeader().Slot)
|
||||
}
|
||||
|
||||
// assert hash_tree_root(header) == hash_tree_root(block.message)
|
||||
header := state.LatestBlockHeader()
|
||||
stateRoot, err := state.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get state root %w", err)
|
||||
}
|
||||
header.StateRoot = stateRoot[:]
|
||||
|
||||
headerRoot, err := header.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get header root %w", err)
|
||||
}
|
||||
|
||||
blockRoot, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get block root %w", err)
|
||||
}
|
||||
|
||||
if headerRoot != blockRoot {
|
||||
return nil, fmt.Errorf("header root %#x not equal to block root %#x", headerRoot, blockRoot)
|
||||
}
|
||||
|
||||
// assert attested_state.slot == attested_state.latest_block_header.slot
|
||||
if attestedState.Slot() != attestedState.LatestBlockHeader().Slot {
|
||||
return nil, fmt.Errorf("attested state slot %d not equal to attested latest block header slot %d", attestedState.Slot(), attestedState.LatestBlockHeader().Slot)
|
||||
}
|
||||
|
||||
// attested_header = attested_state.latest_block_header.copy()
|
||||
attestedHeader := attestedState.LatestBlockHeader()
|
||||
|
||||
// attested_header.state_root = hash_tree_root(attested_state)
|
||||
attestedStateRoot, err := attestedState.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get attested state root %w", err)
|
||||
}
|
||||
attestedHeader.StateRoot = attestedStateRoot[:]
|
||||
|
||||
// assert hash_tree_root(attested_header) == block.message.parent_root
|
||||
attestedHeaderRoot, err := attestedHeader.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get attested header root %w", err)
|
||||
}
|
||||
|
||||
if attestedHeaderRoot != block.Block().ParentRoot() {
|
||||
return nil, fmt.Errorf("attested header root %#x not equal to block parent root %#x", attestedHeaderRoot, block.Block().ParentRoot())
|
||||
}
|
||||
|
||||
// Return result
|
||||
attestedHeaderResult := ðpbv1.BeaconBlockHeader{
|
||||
Slot: attestedHeader.Slot,
|
||||
ProposerIndex: attestedHeader.ProposerIndex,
|
||||
ParentRoot: attestedHeader.ParentRoot,
|
||||
StateRoot: attestedHeader.StateRoot,
|
||||
BodyRoot: attestedHeader.BodyRoot,
|
||||
}
|
||||
|
||||
syncAggregateResult := ðpbv1.SyncAggregate{
|
||||
SyncCommitteeBits: syncAggregate.SyncCommitteeBits,
|
||||
SyncCommitteeSignature: syncAggregate.SyncCommitteeSignature,
|
||||
}
|
||||
|
||||
result := ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: attestedHeaderResult,
|
||||
SyncAggregate: syncAggregateResult,
|
||||
SignatureSlot: block.Block().Slot(),
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func NewLightClientFinalityUpdateFromBeaconState(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
attestedState state.BeaconState,
|
||||
finalizedBlock interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.LightClientUpdate, error) {
|
||||
result, err := NewLightClientOptimisticUpdateFromBeaconState(
|
||||
ctx,
|
||||
state,
|
||||
block,
|
||||
attestedState,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Indicate finality whenever possible
|
||||
var finalizedHeader *ethpbv1.BeaconBlockHeader
|
||||
var finalityBranch [][]byte
|
||||
|
||||
if finalizedBlock != nil && !finalizedBlock.IsNil() {
|
||||
if finalizedBlock.Block().Slot() != 0 {
|
||||
tempFinalizedHeader, err := finalizedBlock.Header()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get finalized header %w", err)
|
||||
}
|
||||
finalizedHeader = migration.V1Alpha1SignedHeaderToV1(tempFinalizedHeader).GetMessage()
|
||||
|
||||
finalizedHeaderRoot, err := finalizedHeader.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get finalized header root %w", err)
|
||||
}
|
||||
|
||||
if finalizedHeaderRoot != bytesutil.ToBytes32(attestedState.FinalizedCheckpoint().Root) {
|
||||
return nil, fmt.Errorf("finalized header root %#x not equal to attested finalized checkpoint root %#x", finalizedHeaderRoot, bytesutil.ToBytes32(attestedState.FinalizedCheckpoint().Root))
|
||||
}
|
||||
} else {
|
||||
if !bytes.Equal(attestedState.FinalizedCheckpoint().Root, make([]byte, 32)) {
|
||||
return nil, fmt.Errorf("invalid finalized header root %v", attestedState.FinalizedCheckpoint().Root)
|
||||
}
|
||||
|
||||
finalizedHeader = ðpbv1.BeaconBlockHeader{
|
||||
Slot: 0,
|
||||
ProposerIndex: 0,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
}
|
||||
}
|
||||
|
||||
var bErr error
|
||||
finalityBranch, bErr = attestedState.FinalizedRootProof(ctx)
|
||||
if bErr != nil {
|
||||
return nil, fmt.Errorf("could not get finalized root proof %w", bErr)
|
||||
}
|
||||
} else {
|
||||
finalizedHeader = ðpbv1.BeaconBlockHeader{
|
||||
Slot: 0,
|
||||
ProposerIndex: 0,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
}
|
||||
|
||||
finalityBranch = make([][]byte, FinalityBranchNumOfLeaves)
|
||||
for i := 0; i < FinalityBranchNumOfLeaves; i++ {
|
||||
finalityBranch[i] = make([]byte, 32)
|
||||
}
|
||||
}
|
||||
|
||||
result.FinalizedHeader = finalizedHeader
|
||||
result.FinalityBranch = finalityBranch
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func NewLightClientUpdateFromFinalityUpdate(update *ethpbv2.LightClientFinalityUpdate) *ethpbv2.LightClientUpdate {
|
||||
return ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: update.AttestedHeader,
|
||||
FinalizedHeader: update.FinalizedHeader,
|
||||
FinalityBranch: update.FinalityBranch,
|
||||
SyncAggregate: update.SyncAggregate,
|
||||
SignatureSlot: update.SignatureSlot,
|
||||
}
|
||||
}
|
||||
|
||||
func NewLightClientUpdateFromOptimisticUpdate(update *ethpbv2.LightClientOptimisticUpdate) *ethpbv2.LightClientUpdate {
|
||||
return ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: update.AttestedHeader,
|
||||
SyncAggregate: update.SyncAggregate,
|
||||
SignatureSlot: update.SignatureSlot,
|
||||
}
|
||||
}
|
||||
@@ -1,160 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v5/proto/eth/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
type testlc struct {
|
||||
t *testing.T
|
||||
ctx context.Context
|
||||
state state.BeaconState
|
||||
block interfaces.ReadOnlySignedBeaconBlock
|
||||
attestedState state.BeaconState
|
||||
attestedHeader *ethpb.BeaconBlockHeader
|
||||
}
|
||||
|
||||
func newTestLc(t *testing.T) *testlc {
|
||||
return &testlc{t: t}
|
||||
}
|
||||
|
||||
func (l *testlc) setupTest() *testlc {
|
||||
ctx := context.Background()
|
||||
|
||||
slot := primitives.Slot(params.BeaconConfig().AltairForkEpoch * primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)).Add(1)
|
||||
|
||||
attestedState, err := util.NewBeaconStateCapella()
|
||||
require.NoError(l.t, err)
|
||||
err = attestedState.SetSlot(slot)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
parent := util.NewBeaconBlockCapella()
|
||||
parent.Block.Slot = slot
|
||||
|
||||
signedParent, err := blocks.NewSignedBeaconBlock(parent)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
parentHeader, err := signedParent.Header()
|
||||
require.NoError(l.t, err)
|
||||
attestedHeader := parentHeader.Header
|
||||
|
||||
err = attestedState.SetLatestBlockHeader(attestedHeader)
|
||||
require.NoError(l.t, err)
|
||||
attestedStateRoot, err := attestedState.HashTreeRoot(ctx)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
// get a new signed block so the root is updated with the new state root
|
||||
parent.Block.StateRoot = attestedStateRoot[:]
|
||||
signedParent, err = blocks.NewSignedBeaconBlock(parent)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
state, err := util.NewBeaconStateCapella()
|
||||
require.NoError(l.t, err)
|
||||
err = state.SetSlot(slot)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
parentRoot, err := signedParent.Block().HashTreeRoot()
|
||||
require.NoError(l.t, err)
|
||||
|
||||
block := util.NewBeaconBlockCapella()
|
||||
block.Block.Slot = slot
|
||||
block.Block.ParentRoot = parentRoot[:]
|
||||
|
||||
for i := uint64(0); i < params.BeaconConfig().MinSyncCommitteeParticipants; i++ {
|
||||
block.Block.Body.SyncAggregate.SyncCommitteeBits.SetBitAt(i, true)
|
||||
}
|
||||
|
||||
signedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
h, err := signedBlock.Header()
|
||||
require.NoError(l.t, err)
|
||||
|
||||
err = state.SetLatestBlockHeader(h.Header)
|
||||
require.NoError(l.t, err)
|
||||
stateRoot, err := state.HashTreeRoot(ctx)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
// get a new signed block so the root is updated with the new state root
|
||||
block.Block.StateRoot = stateRoot[:]
|
||||
signedBlock, err = blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
l.state = state
|
||||
l.attestedState = attestedState
|
||||
l.attestedHeader = attestedHeader
|
||||
l.block = signedBlock
|
||||
l.ctx = ctx
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *testlc) checkAttestedHeader(update *ethpbv2.LightClientUpdate) {
|
||||
require.Equal(l.t, l.attestedHeader.Slot, update.AttestedHeader.Slot, "Attested header slot is not equal")
|
||||
require.Equal(l.t, l.attestedHeader.ProposerIndex, update.AttestedHeader.ProposerIndex, "Attested header proposer index is not equal")
|
||||
require.DeepSSZEqual(l.t, l.attestedHeader.ParentRoot, update.AttestedHeader.ParentRoot, "Attested header parent root is not equal")
|
||||
require.DeepSSZEqual(l.t, l.attestedHeader.BodyRoot, update.AttestedHeader.BodyRoot, "Attested header body root is not equal")
|
||||
|
||||
attestedStateRoot, err := l.attestedState.HashTreeRoot(l.ctx)
|
||||
require.NoError(l.t, err)
|
||||
require.DeepSSZEqual(l.t, attestedStateRoot[:], update.AttestedHeader.StateRoot, "Attested header state root is not equal")
|
||||
}
|
||||
|
||||
func (l *testlc) checkSyncAggregate(update *ethpbv2.LightClientUpdate) {
|
||||
syncAggregate, err := l.block.Block().Body().SyncAggregate()
|
||||
require.NoError(l.t, err)
|
||||
require.DeepSSZEqual(l.t, syncAggregate.SyncCommitteeBits, update.SyncAggregate.SyncCommitteeBits, "SyncAggregate bits is not equal")
|
||||
require.DeepSSZEqual(l.t, syncAggregate.SyncCommitteeSignature, update.SyncAggregate.SyncCommitteeSignature, "SyncAggregate signature is not equal")
|
||||
}
|
||||
|
||||
func TestLightClient_NewLightClientOptimisticUpdateFromBeaconState(t *testing.T) {
|
||||
l := newTestLc(t).setupTest()
|
||||
|
||||
update, err := NewLightClientOptimisticUpdateFromBeaconState(l.ctx, l.state, l.block, l.attestedState)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
require.Equal(t, l.block.Block().Slot(), update.SignatureSlot, "Signature slot is not equal")
|
||||
|
||||
l.checkSyncAggregate(update)
|
||||
l.checkAttestedHeader(update)
|
||||
|
||||
require.Equal(t, (*v1.BeaconBlockHeader)(nil), update.FinalizedHeader, "Finalized header is not nil")
|
||||
require.DeepSSZEqual(t, ([][]byte)(nil), update.FinalityBranch, "Finality branch is not nil")
|
||||
}
|
||||
|
||||
func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
|
||||
l := newTestLc(t).setupTest()
|
||||
|
||||
update, err := NewLightClientFinalityUpdateFromBeaconState(l.ctx, l.state, l.block, l.attestedState, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
require.Equal(t, l.block.Block().Slot(), update.SignatureSlot, "Signature slot is not equal")
|
||||
|
||||
l.checkSyncAggregate(update)
|
||||
l.checkAttestedHeader(update)
|
||||
|
||||
zeroHash := params.BeaconConfig().ZeroHash[:]
|
||||
require.NotNil(t, update.FinalizedHeader, "Finalized header is nil")
|
||||
require.Equal(t, primitives.Slot(0), update.FinalizedHeader.Slot, "Finalized header slot is not zero")
|
||||
require.Equal(t, primitives.ValidatorIndex(0), update.FinalizedHeader.ProposerIndex, "Finalized header proposer index is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.ParentRoot, "Finalized header parent root is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.StateRoot, "Finalized header state root is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.BodyRoot, "Finalized header body root is not zero")
|
||||
require.Equal(t, FinalityBranchNumOfLeaves, len(update.FinalityBranch), "Invalid finality branch leaves")
|
||||
for _, leaf := range update.FinalityBranch {
|
||||
require.DeepSSZEqual(t, zeroHash, leaf, "Leaf is not zero")
|
||||
}
|
||||
}
|
||||
@@ -46,7 +46,7 @@ func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.ReadOnlyS
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if payload.IsNil() {
|
||||
if payload == nil || payload.IsNil() {
|
||||
return errors.New("nil execution payload")
|
||||
}
|
||||
ok, err := canUseValidatedTerminalBlockHash(b.Block().Slot(), payload)
|
||||
|
||||
@@ -7,10 +7,10 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// OnAttestation is called whenever an attestation is received, verifies the attestation is valid and saves
|
||||
|
||||
@@ -32,18 +32,18 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
util.SaveBlock(t, ctx, beaconDB, blkWithoutState)
|
||||
|
||||
cp := ðpb.Checkpoint{}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
|
||||
blkWithStateBadAtt := util.NewBeaconBlock()
|
||||
blkWithStateBadAtt.Block.Slot = 1
|
||||
r, err := blkWithStateBadAtt.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp = ðpb.Checkpoint{Root: r[:]}
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, blkWithStateBadAtt.Block.Slot, r, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
|
||||
st, roblock, err = prepareForkchoiceState(ctx, blkWithStateBadAtt.Block.Slot, r, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
util.SaveBlock(t, ctx, beaconDB, blkWithStateBadAtt)
|
||||
BlkWithStateBadAttRoot, err := blkWithStateBadAtt.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -139,9 +139,9 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
ojc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
ofc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
state, roblock, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, roblock))
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
|
||||
}
|
||||
|
||||
@@ -318,10 +318,9 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
checkpoint := ðpb.Checkpoint{Epoch: epoch, Root: r1[:]}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r1, [32]byte{}, params.BeaconConfig().ZeroHash, checkpoint, checkpoint)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, blk.Block.Slot, r1, [32]byte{}, params.BeaconConfig().ZeroHash, checkpoint, checkpoint)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, r1))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
returned, err := service.getAttPreState(ctx, checkpoint)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch.Mul(uint64(checkpoint.Epoch)), returned.Slot(), "Incorrectly returned base state")
|
||||
@@ -337,10 +336,9 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
newCheckpoint := ðpb.Checkpoint{Epoch: epoch, Root: r2[:]}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root)))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, blk.Block.Slot, r2, r1, params.BeaconConfig().ZeroHash, newCheckpoint, newCheckpoint)
|
||||
st, roblock, err = prepareForkchoiceState(ctx, blk.Block.Slot, r2, r1, params.BeaconConfig().ZeroHash, newCheckpoint, newCheckpoint)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, r2))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
returned, err = service.getAttPreState(ctx, newCheckpoint)
|
||||
require.NoError(t, err)
|
||||
s, err := slots.EpochStart(newCheckpoint.Epoch)
|
||||
|
||||
@@ -25,12 +25,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// A custom slot deadline for processing state slots in our cache.
|
||||
@@ -46,8 +46,7 @@ var initialSyncBlockCacheSize = uint64(2 * params.BeaconConfig().SlotsPerEpoch)
|
||||
// process the beacon block after validating the state transition function
|
||||
type postBlockProcessConfig struct {
|
||||
ctx context.Context
|
||||
signed interfaces.ReadOnlySignedBeaconBlock
|
||||
blockRoot [32]byte
|
||||
roblock consensusblocks.ROBlock
|
||||
headRoot [32]byte
|
||||
postState state.BeaconState
|
||||
isValidPayload bool
|
||||
@@ -61,7 +60,7 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
ctx, span := trace.StartSpan(cfg.ctx, "blockChain.onBlock")
|
||||
defer span.End()
|
||||
cfg.ctx = ctx
|
||||
if err := consensusblocks.BeaconBlockIsNil(cfg.signed); err != nil {
|
||||
if err := consensusblocks.BeaconBlockIsNil(cfg.roblock); err != nil {
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
startTime := time.Now()
|
||||
@@ -73,19 +72,22 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
defer s.sendLightClientFeeds(cfg)
|
||||
defer s.sendStateFeedOnBlock(cfg)
|
||||
defer reportProcessingTime(startTime)
|
||||
defer reportAttestationInclusion(cfg.signed.Block())
|
||||
defer reportAttestationInclusion(cfg.roblock.Block())
|
||||
|
||||
err := s.cfg.ForkChoiceStore.InsertNode(ctx, cfg.postState, cfg.blockRoot)
|
||||
err := s.cfg.ForkChoiceStore.InsertNode(ctx, cfg.postState, cfg.roblock)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", cfg.signed.Block().Slot())
|
||||
// Do not use parent context in the event it deadlined
|
||||
ctx = trace.NewContext(context.Background(), span)
|
||||
s.rollbackBlock(ctx, cfg.roblock.Root())
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", cfg.roblock.Block().Slot())
|
||||
}
|
||||
if err := s.handleBlockAttestations(ctx, cfg.signed.Block(), cfg.postState); err != nil {
|
||||
if err := s.handleBlockAttestations(ctx, cfg.roblock.Block(), cfg.postState); err != nil {
|
||||
return errors.Wrap(err, "could not handle block's attestations")
|
||||
}
|
||||
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, cfg.signed.Block().Body().AttesterSlashings())
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, cfg.roblock.Block().Body().AttesterSlashings())
|
||||
if cfg.isValidPayload {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, cfg.blockRoot); err != nil {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, cfg.roblock.Root()); err != nil {
|
||||
return errors.Wrap(err, "could not set optimistic block to valid")
|
||||
}
|
||||
}
|
||||
@@ -95,8 +97,8 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
newBlockHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
if cfg.headRoot != cfg.blockRoot {
|
||||
s.logNonCanonicalBlockReceived(cfg.blockRoot, cfg.headRoot)
|
||||
if cfg.headRoot != cfg.roblock.Root() {
|
||||
s.logNonCanonicalBlockReceived(cfg.roblock.Root(), cfg.headRoot)
|
||||
return nil
|
||||
}
|
||||
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
|
||||
@@ -154,7 +156,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
}
|
||||
|
||||
// Fill in missing blocks
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0].Block(), preState.CurrentJustifiedCheckpoint(), preState.FinalizedCheckpoint()); err != nil {
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.CurrentJustifiedCheckpoint(), preState.FinalizedCheckpoint()); err != nil {
|
||||
return errors.Wrap(err, "could not fill in missing blocks to forkchoice")
|
||||
}
|
||||
|
||||
@@ -234,7 +236,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate blob data availability at slot %d", b.Block().Slot())
|
||||
}
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b.Block(),
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
|
||||
JustifiedCheckpoint: jCheckpoints[i],
|
||||
FinalizedCheckpoint: fCheckpoints[i]}
|
||||
pendingNodes[len(blks)-i-1] = args
|
||||
@@ -279,7 +281,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return errors.Wrap(err, "could not insert batch to forkchoice")
|
||||
}
|
||||
// Insert the last block to forkchoice
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastBR); err != nil {
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastB); err != nil {
|
||||
return errors.Wrap(err, "could not insert last block in batch to forkchoice")
|
||||
}
|
||||
// Set their optimistic status
|
||||
@@ -404,6 +406,10 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interface
|
||||
return errors.Wrapf(err, "could not save block from slot %d", b.Block().Slot())
|
||||
}
|
||||
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
|
||||
log.Warnf("Rolling back insertion of block with root %#x", r)
|
||||
if err := s.cfg.BeaconDB.DeleteBlock(ctx, r); err != nil {
|
||||
log.WithError(err).Errorf("Could not delete block with block root %#x", r)
|
||||
}
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
return nil
|
||||
@@ -684,3 +690,15 @@ func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, bl
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// In the event of an issue processing a block we rollback changes done to the db and our caches
|
||||
// to always ensure that the node's internal state is consistent.
|
||||
func (s *Service) rollbackBlock(ctx context.Context, blockRoot [32]byte) {
|
||||
log.Warnf("Rolling back insertion of block with root %#x due to processing error", blockRoot)
|
||||
if err := s.cfg.StateGen.DeleteStateFromCaches(ctx, blockRoot); err != nil {
|
||||
log.WithError(err).Errorf("Could not delete state from caches with block root %#x", blockRoot)
|
||||
}
|
||||
if err := s.cfg.BeaconDB.DeleteBlock(ctx, blockRoot); err != nil {
|
||||
log.WithError(err).Errorf("Could not delete block with block root %#x", blockRoot)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
lightclient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
@@ -16,15 +18,16 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
mathutil "github.com/prysmaticlabs/prysm/v5/math"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v5/proto/eth/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// CurrentSlot returns the current slot based on time.
|
||||
@@ -40,7 +43,7 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
|
||||
if !s.inRegularSync() {
|
||||
return nil
|
||||
}
|
||||
slot := cfg.signed.Block().Slot()
|
||||
slot := cfg.roblock.Block().Slot()
|
||||
if slots.WithinVotingWindow(uint64(s.genesisTime.Unix()), slot) {
|
||||
return nil
|
||||
}
|
||||
@@ -48,9 +51,9 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
|
||||
}
|
||||
|
||||
func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
if cfg.blockRoot == cfg.headRoot {
|
||||
if cfg.roblock.Root() == cfg.headRoot {
|
||||
fcuArgs.headState = cfg.postState
|
||||
fcuArgs.headBlock = cfg.signed
|
||||
fcuArgs.headBlock = cfg.roblock
|
||||
fcuArgs.headRoot = cfg.headRoot
|
||||
fcuArgs.proposingSlot = s.CurrentSlot() + 1
|
||||
return nil
|
||||
@@ -94,7 +97,7 @@ func (s *Service) fcuArgsNonCanonicalBlock(cfg *postBlockProcessConfig, fcuArgs
|
||||
|
||||
// sendStateFeedOnBlock sends an event that a new block has been synced
|
||||
func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(cfg.blockRoot)
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(cfg.roblock.Root())
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not check if block is optimistic")
|
||||
optimistic = true
|
||||
@@ -103,9 +106,9 @@ func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: cfg.signed.Block().Slot(),
|
||||
BlockRoot: cfg.blockRoot,
|
||||
SignedBlock: cfg.signed,
|
||||
Slot: cfg.roblock.Block().Slot(),
|
||||
BlockRoot: cfg.roblock.Root(),
|
||||
SignedBlock: cfg.roblock,
|
||||
Verified: true,
|
||||
Optimistic: optimistic,
|
||||
},
|
||||
@@ -115,7 +118,7 @@ func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
|
||||
// sendLightClientFeeds sends the light client feeds when feature flag is enabled.
|
||||
func (s *Service) sendLightClientFeeds(cfg *postBlockProcessConfig) {
|
||||
if features.Get().EnableLightClient {
|
||||
if _, err := s.sendLightClientOptimisticUpdate(cfg.ctx, cfg.signed, cfg.postState); err != nil {
|
||||
if _, err := s.sendLightClientOptimisticUpdate(cfg.ctx, cfg.roblock, cfg.postState); err != nil {
|
||||
log.WithError(err).Error("Failed to send light client optimistic update")
|
||||
}
|
||||
|
||||
@@ -123,7 +126,7 @@ func (s *Service) sendLightClientFeeds(cfg *postBlockProcessConfig) {
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
|
||||
// LightClientFinalityUpdate needs super majority
|
||||
s.tryPublishLightClientFinalityUpdate(cfg.ctx, cfg.signed, finalized, cfg.postState)
|
||||
s.tryPublishLightClientFinalityUpdate(cfg.ctx, cfg.roblock, finalized, cfg.postState)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -160,6 +163,10 @@ func (s *Service) sendLightClientFinalityUpdate(ctx context.Context, signed inte
|
||||
postState state.BeaconState) (int, error) {
|
||||
// Get attested state
|
||||
attestedRoot := signed.Block().ParentRoot()
|
||||
attestedBlock, err := s.cfg.BeaconDB.Block(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get attested block")
|
||||
}
|
||||
attestedState, err := s.cfg.StateGen.StateByRoot(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get attested state")
|
||||
@@ -176,11 +183,12 @@ func (s *Service) sendLightClientFinalityUpdate(ctx context.Context, signed inte
|
||||
}
|
||||
}
|
||||
|
||||
update, err := NewLightClientFinalityUpdateFromBeaconState(
|
||||
update, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(
|
||||
ctx,
|
||||
postState,
|
||||
signed,
|
||||
attestedState,
|
||||
attestedBlock,
|
||||
finalizedBlock,
|
||||
)
|
||||
|
||||
@@ -191,7 +199,7 @@ func (s *Service) sendLightClientFinalityUpdate(ctx context.Context, signed inte
|
||||
// Return the result
|
||||
result := ðpbv2.LightClientFinalityUpdateWithVersion{
|
||||
Version: ethpbv2.Version(signed.Version()),
|
||||
Data: CreateLightClientFinalityUpdate(update),
|
||||
Data: update,
|
||||
}
|
||||
|
||||
// Send event
|
||||
@@ -206,16 +214,21 @@ func (s *Service) sendLightClientOptimisticUpdate(ctx context.Context, signed in
|
||||
postState state.BeaconState) (int, error) {
|
||||
// Get attested state
|
||||
attestedRoot := signed.Block().ParentRoot()
|
||||
attestedBlock, err := s.cfg.BeaconDB.Block(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get attested block")
|
||||
}
|
||||
attestedState, err := s.cfg.StateGen.StateByRoot(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get attested state")
|
||||
}
|
||||
|
||||
update, err := NewLightClientOptimisticUpdateFromBeaconState(
|
||||
update, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(
|
||||
ctx,
|
||||
postState,
|
||||
signed,
|
||||
attestedState,
|
||||
attestedBlock,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
@@ -225,7 +238,7 @@ func (s *Service) sendLightClientOptimisticUpdate(ctx context.Context, signed in
|
||||
// Return the result
|
||||
result := ðpbv2.LightClientOptimisticUpdateWithVersion{
|
||||
Version: ethpbv2.Version(signed.Version()),
|
||||
Data: CreateLightClientOptimisticUpdate(update),
|
||||
Data: update,
|
||||
}
|
||||
|
||||
return s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
@@ -240,20 +253,21 @@ func (s *Service) sendLightClientOptimisticUpdate(ctx context.Context, signed in
|
||||
// before sending FCU to the engine.
|
||||
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) error {
|
||||
slot := cfg.postState.Slot()
|
||||
if err := transition.UpdateNextSlotCache(cfg.ctx, cfg.blockRoot[:], cfg.postState); err != nil {
|
||||
root := cfg.roblock.Root()
|
||||
if err := transition.UpdateNextSlotCache(cfg.ctx, root[:], cfg.postState); err != nil {
|
||||
return errors.Wrap(err, "could not update next slot state cache")
|
||||
}
|
||||
if !slots.IsEpochEnd(slot) {
|
||||
return nil
|
||||
}
|
||||
return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, cfg.blockRoot[:])
|
||||
return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:])
|
||||
}
|
||||
|
||||
// handleSecondFCUCall handles a second call to FCU when syncing a new block.
|
||||
// This is useful when proposing in the next block and we want to defer the
|
||||
// computation of the next slot shuffling.
|
||||
func (s *Service) handleSecondFCUCall(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.blockRoot {
|
||||
if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.roblock.Root() {
|
||||
go s.sendFCUWithAttributes(cfg, fcuArgs)
|
||||
}
|
||||
}
|
||||
@@ -269,7 +283,7 @@ func reportProcessingTime(startTime time.Time) {
|
||||
// called on blocks that arrive after the attestation voting window, or in a
|
||||
// background routine after syncing early blocks.
|
||||
func (s *Service) computePayloadAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
if cfg.blockRoot == cfg.headRoot {
|
||||
if cfg.roblock.Root() == cfg.headRoot {
|
||||
if err := s.updateCachesPostBlockProcessing(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -426,7 +440,7 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot primitives.
|
||||
|
||||
// This retrieves missing blocks from DB (ie. the blocks that couldn't be received over sync) and inserts them to fork choice store.
|
||||
// This is useful for block tree visualizer and additional vote accounting.
|
||||
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock,
|
||||
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
|
||||
|
||||
@@ -436,10 +450,15 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pendingNodes = append(pendingNodes, &forkchoicetypes.BlockAndCheckpoints{Block: blk,
|
||||
// The first block can have a bogus root since the block is not inserted in forkchoice
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(signed, [32]byte{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pendingNodes = append(pendingNodes, &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
|
||||
JustifiedCheckpoint: jCheckpoint, FinalizedCheckpoint: fCheckpoint})
|
||||
// As long as parent node is not in fork choice store, and parent node is in DB.
|
||||
root := blk.ParentRoot()
|
||||
root := roblock.Block().ParentRoot()
|
||||
for !s.cfg.ForkChoiceStore.HasNode(root) && s.cfg.BeaconDB.HasBlock(ctx, root) {
|
||||
b, err := s.getBlock(ctx, root)
|
||||
if err != nil {
|
||||
@@ -448,8 +467,12 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
|
||||
if b.Block().Slot() <= fSlot {
|
||||
break
|
||||
}
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(b, root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
root = b.Block().ParentRoot()
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b.Block(),
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
|
||||
JustifiedCheckpoint: jCheckpoint,
|
||||
FinalizedCheckpoint: fCheckpoint}
|
||||
pendingNodes = append(pendingNodes, args)
|
||||
|
||||
@@ -145,9 +145,8 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
fcp2 := &forkchoicetypes.Checkpoint{Epoch: 0, Root: r0}
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fcp2))
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
|
||||
@@ -190,7 +189,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fcp2))
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
|
||||
@@ -246,7 +245,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
||||
// Set finalized epoch to 2.
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 2, Root: r64}))
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// There should be 1 node: block 65
|
||||
@@ -279,7 +278,7 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.Equal(t, ErrNotDescendantOfFinalized.Error(), err.Error())
|
||||
}
|
||||
|
||||
@@ -566,7 +565,9 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, r, [32]byte{}, postState, true}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -614,7 +615,9 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, r, [32]byte{}, postState, true}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -640,7 +643,9 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
|
||||
func TestOnBlock_NilBlock(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, nil, [32]byte{}, [32]byte{}, nil, true})
|
||||
signed := &consensusblocks.SignedBeaconBlock{}
|
||||
roblock := consensusblocks.ROBlock{ReadOnlySignedBeaconBlock: signed}
|
||||
err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, roblock, [32]byte{}, nil, true})
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
@@ -688,7 +693,9 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, r, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1114,7 +1121,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb1)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb1, r1, [32]byte{}, postState, true}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb1, r1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1124,7 +1133,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb2)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb2, r2, [32]byte{}, postState, true}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb2, r2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1134,7 +1145,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb3)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb3, r3, [32]byte{}, postState, true}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb3, r3)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1144,7 +1157,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb4)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb4, r4, [32]byte{}, postState, true}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb4, r4)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1219,7 +1234,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1237,7 +1254,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1256,7 +1275,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
@@ -1278,7 +1299,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, firstInvalidRoot, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
@@ -1306,7 +1329,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
// Check that forkchoice's head is the last invalid block imported. The
|
||||
// store's headroot is the previous head (since the invalid block did
|
||||
@@ -1335,7 +1360,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
@@ -1397,7 +1424,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1415,7 +1444,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1435,7 +1466,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
@@ -1457,7 +1490,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, firstInvalidRoot, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
@@ -1485,7 +1520,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
|
||||
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, rowsb)
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
|
||||
// not finish importing and it was never imported to forkchoice). Check
|
||||
@@ -1513,7 +1550,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
@@ -1578,7 +1617,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1597,7 +1638,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1616,7 +1659,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, lastValidRoot, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -1643,7 +1688,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, invalidRoots[i-13], [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, invalidRoots[i-13])
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we have justified the second epoch
|
||||
@@ -1669,7 +1716,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
|
||||
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, rowsb)
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
|
||||
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
|
||||
@@ -1708,7 +1757,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true}))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
// Check that the head is still INVALID and the node is still optimistic
|
||||
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
@@ -1731,7 +1782,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
|
||||
require.NoError(t, err)
|
||||
@@ -1757,7 +1810,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
@@ -1813,7 +1868,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1831,7 +1888,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1850,7 +1909,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, lastValidRoot, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -1879,7 +1940,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -1905,7 +1968,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
|
||||
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, rowsb)
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
|
||||
// Check that the headroot/state are not in DB and restart the node
|
||||
@@ -1995,7 +2060,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2044,7 +2111,11 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockElectra(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
defaultConfig := util.DefaultBlockGenConfig()
|
||||
defaultConfig.NumWithdrawalRequests = 1
|
||||
defaultConfig.NumDepositRequests = 2
|
||||
defaultConfig.NumConsolidationRequests = 1
|
||||
b, err := util.GenerateFullBlockElectra(st, keys, defaultConfig, 1)
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
@@ -2055,11 +2126,13 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err = util.GenerateFullBlockElectra(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
b, err = util.GenerateFullBlockElectra(st, keys, defaultConfig, 2)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
@@ -2067,7 +2140,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
// prepare another block that is not inserted
|
||||
st3, err := transition.ExecuteStateTransition(ctx, st, wsb)
|
||||
require.NoError(t, err)
|
||||
b3, err := util.GenerateFullBlockElectra(st3, keys, util.DefaultBlockGenConfig(), 3)
|
||||
b3, err := util.GenerateFullBlockElectra(st3, keys, defaultConfig, 3)
|
||||
require.NoError(t, err)
|
||||
wsb3, err := consensusblocks.NewSignedBeaconBlock(b3)
|
||||
require.NoError(t, err)
|
||||
@@ -2205,11 +2278,11 @@ func Test_getFCUArgs(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
signed: wsb,
|
||||
blockRoot: [32]byte{'a'},
|
||||
roblock: roblock,
|
||||
postState: st,
|
||||
isValidPayload: true,
|
||||
}
|
||||
@@ -2219,11 +2292,143 @@ func Test_getFCUArgs(t *testing.T) {
|
||||
require.ErrorContains(t, "block does not exist", err)
|
||||
|
||||
// canonical branch
|
||||
cfg.headRoot = cfg.blockRoot
|
||||
cfg.headRoot = cfg.roblock.Root()
|
||||
fcuArgs = &fcuConfig{}
|
||||
err = s.getFCUArgs(cfg, fcuArgs)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, cfg.blockRoot, fcuArgs.headRoot)
|
||||
require.Equal(t, cfg.roblock.Root(), fcuArgs.headRoot)
|
||||
}
|
||||
|
||||
func TestRollbackBlock(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
|
||||
require.Equal(t, true, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
hasState, err := service.cfg.StateGen.HasState(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, hasState)
|
||||
|
||||
// Set invalid parent root to trigger forkchoice error.
|
||||
wsb.SetParentRoot([]byte("bad"))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Rollback block insertion into db and caches.
|
||||
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
hasState, err = service.cfg.StateGen.HasState(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, hasState)
|
||||
}
|
||||
|
||||
func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{Root: parentRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: parentRoot[:]}))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 33)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
|
||||
b, err = util.GenerateFullBlock(postState, keys, util.DefaultBlockGenConfig(), 34)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
|
||||
require.Equal(t, true, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
hasState, err := service.cfg.StateGen.HasState(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, hasState)
|
||||
|
||||
// Set deadlined context when processing the block
|
||||
cancCtx, canc := context.WithCancel(context.Background())
|
||||
canc()
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
|
||||
parentRoot = roblock.Block().ParentRoot()
|
||||
|
||||
cj := ðpb.Checkpoint{}
|
||||
cj.Epoch = 1
|
||||
cj.Root = parentRoot[:]
|
||||
require.NoError(t, postState.SetCurrentJustifiedCheckpoint(cj))
|
||||
require.NoError(t, postState.SetFinalizedCheckpoint(cj))
|
||||
|
||||
// Rollback block insertion into db and caches.
|
||||
require.ErrorContains(t, "context canceled", service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false}))
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
hasState, err = service.cfg.StateGen.HasState(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, hasState)
|
||||
}
|
||||
|
||||
func fakeCommitments(n int) [][]byte {
|
||||
|
||||
@@ -12,10 +12,11 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// reorgLateBlockCountAttestations is the time until the end of the slot in which we count
|
||||
@@ -190,13 +191,26 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
|
||||
}
|
||||
|
||||
if err := s.receiveAttestationNoPubsub(ctx, a, disparity); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": a.GetData().Slot,
|
||||
"committeeIndex": a.GetData().CommitteeIndex,
|
||||
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().BeaconBlockRoot)),
|
||||
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().Target.Root)),
|
||||
"aggregationCount": a.GetAggregationBits().Count(),
|
||||
}).WithError(err).Warn("Could not process attestation for fork choice")
|
||||
var fields logrus.Fields
|
||||
if a.Version() >= version.Electra {
|
||||
fields = logrus.Fields{
|
||||
"slot": a.GetData().Slot,
|
||||
"committeeCount": a.CommitteeBitsVal().Count(),
|
||||
"committeeIndices": a.CommitteeBitsVal().BitIndices(),
|
||||
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().BeaconBlockRoot)),
|
||||
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().Target.Root)),
|
||||
"aggregatedCount": a.GetAggregationBits().Count(),
|
||||
}
|
||||
} else {
|
||||
fields = logrus.Fields{
|
||||
"slot": a.GetData().Slot,
|
||||
"committeeIndex": a.GetData().CommitteeIndex,
|
||||
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().BeaconBlockRoot)),
|
||||
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().Target.Root)),
|
||||
"aggregatedCount": a.GetAggregationBits().Count(),
|
||||
}
|
||||
}
|
||||
log.WithFields(fields).WithError(err).Warn("Could not process attestation for fork choice")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -46,7 +47,7 @@ func TestVerifyLMDFFGConsistent(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, r32))
|
||||
|
||||
state, r33, err := prepareForkchoiceState(ctx, 33, [32]byte{'b'}, r32, params.BeaconConfig().ZeroHash, fc, fc)
|
||||
state, r33, err := prepareForkchoiceState(ctx, 33, [32]byte{'b'}, r32.Root(), params.BeaconConfig().ZeroHash, fc, fc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, r33))
|
||||
|
||||
@@ -54,10 +55,12 @@ func TestVerifyLMDFFGConsistent(t *testing.T) {
|
||||
a := util.NewAttestation()
|
||||
a.Data.Target.Epoch = 1
|
||||
a.Data.Target.Root = []byte{'c'}
|
||||
a.Data.BeaconBlockRoot = r33[:]
|
||||
r33Root := r33.Root()
|
||||
a.Data.BeaconBlockRoot = r33Root[:]
|
||||
require.ErrorContains(t, wanted, service.VerifyLmdFfgConsistency(context.Background(), a))
|
||||
|
||||
a.Data.Target.Root = r32[:]
|
||||
r32Root := r32.Root()
|
||||
a.Data.Target.Root = r32Root[:]
|
||||
err = service.VerifyLmdFfgConsistency(context.Background(), a)
|
||||
require.NoError(t, err, "Could not verify LMD and FFG votes to be consistent")
|
||||
}
|
||||
@@ -116,7 +119,9 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, tRoot, [32]byte{}, postState, false}))
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
@@ -176,7 +181,9 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, tRoot, [32]byte{}, postState, false}))
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
require.Equal(t, tRoot, service.head.root)
|
||||
|
||||
@@ -17,16 +17,18 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
@@ -83,7 +85,12 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
}
|
||||
|
||||
currentCheckpoints := s.saveCurrentCheckpoints(preState)
|
||||
postState, isValidPayload, err := s.validateExecutionAndConsensus(ctx, preState, blockCopy, blockRoot)
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(blockCopy, blockRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
postState, isValidPayload, err := s.validateExecutionAndConsensus(ctx, preState, roblock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -102,8 +109,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
}
|
||||
args := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
signed: blockCopy,
|
||||
blockRoot: blockRoot,
|
||||
roblock: roblock,
|
||||
postState: postState,
|
||||
isValidPayload: isValidPayload,
|
||||
}
|
||||
@@ -184,8 +190,7 @@ func (s *Service) updateCheckpoints(
|
||||
func (s *Service) validateExecutionAndConsensus(
|
||||
ctx context.Context,
|
||||
preState state.BeaconState,
|
||||
block interfaces.SignedBeaconBlock,
|
||||
blockRoot [32]byte,
|
||||
block consensusblocks.ROBlock,
|
||||
) (state.BeaconState, bool, error) {
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
if err != nil {
|
||||
@@ -204,7 +209,7 @@ func (s *Service) validateExecutionAndConsensus(
|
||||
var isValidPayload bool
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, block, blockRoot)
|
||||
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not notify the engine of the new payload")
|
||||
}
|
||||
@@ -273,7 +278,6 @@ func (s *Service) reportPostBlockProcessing(
|
||||
func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedState state.BeaconState) {
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
go func() {
|
||||
finalizedState.SaveValidatorIndices() // used to handle Validator index invariant from EIP6110
|
||||
s.sendNewFinalizedEvent(ctx, finalizedState)
|
||||
}()
|
||||
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
|
||||
@@ -350,6 +354,9 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock
|
||||
|
||||
// HasBlock returns true if the block of the input root exists in initial sync blocks cache or DB.
|
||||
func (s *Service) HasBlock(ctx context.Context, root [32]byte) bool {
|
||||
if s.BlockBeingSynced(root) {
|
||||
return false
|
||||
}
|
||||
return s.hasBlockInInitSyncOrDB(ctx, root)
|
||||
}
|
||||
|
||||
@@ -553,16 +560,16 @@ func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySigne
|
||||
}
|
||||
|
||||
// validateExecutionOnBlock notifies the engine of the incoming block execution payload and returns true if the payload is valid
|
||||
func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header interfaces.ExecutionData, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) (bool, error) {
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, signed)
|
||||
func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header interfaces.ExecutionData, block consensusblocks.ROBlock) (bool, error) {
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, block)
|
||||
if err != nil {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
err = s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
|
||||
err = s.handleInvalidExecutionError(ctx, err, block.Root(), block.Block().ParentRoot())
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
return false, err
|
||||
}
|
||||
if signed.Version() < version.Capella && isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, ver, header, signed); err != nil {
|
||||
if block.Block().Version() < version.Capella && isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, ver, header, block); err != nil {
|
||||
return isValidPayload, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -278,6 +278,8 @@ func TestService_HasBlock(t *testing.T) {
|
||||
r, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, s.HasBlock(context.Background(), r))
|
||||
s.blockBeingSynced.set(r)
|
||||
require.Equal(t, false, s.HasBlock(context.Background(), r))
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
|
||||
@@ -36,13 +36,14 @@ import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// Service represents a service that handles the internal
|
||||
@@ -303,7 +304,15 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint state")
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(s.ctx, st, fRoot); err != nil {
|
||||
finalizedBlock, err := s.cfg.BeaconDB.Block(s.ctx, fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint block")
|
||||
}
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(finalizedBlock, fRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(s.ctx, st, roblock); err != nil {
|
||||
return errors.Wrap(err, "could not insert finalized block to forkchoice")
|
||||
}
|
||||
if !features.Get().EnableStartOptimistic {
|
||||
@@ -329,8 +338,6 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
return errors.Wrap(err, "failed to initialize blockchain service")
|
||||
}
|
||||
|
||||
saved.SaveValidatorIndices() // used to handle Validator index invariant from EIP6110
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -517,7 +524,11 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, genesisState, genesisBlkRoot); err != nil {
|
||||
gb, err := consensus_blocks.NewROBlockWithRoot(genesisBlk, genesisBlkRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, genesisState, gb); err != nil {
|
||||
log.WithError(err).Fatal("Could not process genesis block for fork choice")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.SetOriginRoot(genesisBlkRoot)
|
||||
|
||||
@@ -376,11 +376,15 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
|
||||
}
|
||||
b := util.NewBeaconBlock()
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, r))
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, roblock))
|
||||
|
||||
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
|
||||
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
|
||||
@@ -453,7 +457,11 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, r))
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(b, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, roblock))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
@@ -32,7 +32,7 @@ type mockBeaconNode struct {
|
||||
}
|
||||
|
||||
// StateFeed mocks the same method in the beacon node.
|
||||
func (mbn *mockBeaconNode) StateFeed() *event.Feed {
|
||||
func (mbn *mockBeaconNode) StateFeed() event.SubscriberSender {
|
||||
mbn.mu.Lock()
|
||||
defer mbn.mu.Unlock()
|
||||
if mbn.stateFeed == nil {
|
||||
|
||||
@@ -98,6 +98,44 @@ func (s *ChainService) BlockNotifier() blockfeed.Notifier {
|
||||
return s.blockNotifier
|
||||
}
|
||||
|
||||
type EventFeedWrapper struct {
|
||||
feed *event.Feed
|
||||
subscribed chan struct{} // this channel is closed once a subscription is made
|
||||
}
|
||||
|
||||
func (w *EventFeedWrapper) Subscribe(channel interface{}) event.Subscription {
|
||||
select {
|
||||
case <-w.subscribed:
|
||||
break // already closed
|
||||
default:
|
||||
close(w.subscribed)
|
||||
}
|
||||
return w.feed.Subscribe(channel)
|
||||
}
|
||||
|
||||
func (w *EventFeedWrapper) Send(value interface{}) int {
|
||||
return w.feed.Send(value)
|
||||
}
|
||||
|
||||
// WaitForSubscription allows test to wait for the feed to have a subscription before beginning to send events.
|
||||
func (w *EventFeedWrapper) WaitForSubscription(ctx context.Context) error {
|
||||
select {
|
||||
case <-w.subscribed:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
var _ event.SubscriberSender = &EventFeedWrapper{}
|
||||
|
||||
func NewEventFeedWrapper() *EventFeedWrapper {
|
||||
return &EventFeedWrapper{
|
||||
feed: new(event.Feed),
|
||||
subscribed: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// MockBlockNotifier mocks the block notifier.
|
||||
type MockBlockNotifier struct {
|
||||
feed *event.Feed
|
||||
@@ -131,7 +169,7 @@ func (msn *MockStateNotifier) ReceivedEvents() []*feed.Event {
|
||||
}
|
||||
|
||||
// StateFeed returns a state feed.
|
||||
func (msn *MockStateNotifier) StateFeed() *event.Feed {
|
||||
func (msn *MockStateNotifier) StateFeed() event.SubscriberSender {
|
||||
msn.feedLock.Lock()
|
||||
defer msn.feedLock.Unlock()
|
||||
|
||||
@@ -159,6 +197,23 @@ func (msn *MockStateNotifier) StateFeed() *event.Feed {
|
||||
return msn.feed
|
||||
}
|
||||
|
||||
// NewSimpleStateNotifier makes a state feed without the custom mock feed machinery.
|
||||
func NewSimpleStateNotifier() *MockStateNotifier {
|
||||
return &MockStateNotifier{feed: new(event.Feed)}
|
||||
}
|
||||
|
||||
type SimpleNotifier struct {
|
||||
Feed event.SubscriberSender
|
||||
}
|
||||
|
||||
func (n *SimpleNotifier) StateFeed() event.SubscriberSender {
|
||||
return n.Feed
|
||||
}
|
||||
|
||||
func (n *SimpleNotifier) OperationFeed() event.SubscriberSender {
|
||||
return n.Feed
|
||||
}
|
||||
|
||||
// OperationNotifier mocks the same method in the chain service.
|
||||
func (s *ChainService) OperationNotifier() opfeed.Notifier {
|
||||
if s.opNotifier == nil {
|
||||
@@ -173,7 +228,7 @@ type MockOperationNotifier struct {
|
||||
}
|
||||
|
||||
// OperationFeed returns an operation feed.
|
||||
func (mon *MockOperationNotifier) OperationFeed() *event.Feed {
|
||||
func (mon *MockOperationNotifier) OperationFeed() event.SubscriberSender {
|
||||
if mon.feed == nil {
|
||||
mon.feed = new(event.Feed)
|
||||
}
|
||||
@@ -512,7 +567,7 @@ func prepareForkchoiceState(
|
||||
payloadHash [32]byte,
|
||||
justified *ethpb.Checkpoint,
|
||||
finalized *ethpb.Checkpoint,
|
||||
) (state.BeaconState, [32]byte, error) {
|
||||
) (state.BeaconState, blocks.ROBlock, error) {
|
||||
blockHeader := ðpb.BeaconBlockHeader{
|
||||
ParentRoot: parentRoot[:],
|
||||
}
|
||||
@@ -533,7 +588,26 @@ func prepareForkchoiceState(
|
||||
|
||||
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
|
||||
st, err := state_native.InitializeFromProtoBellatrix(base)
|
||||
return st, blockRoot, err
|
||||
if err != nil {
|
||||
return nil, blocks.ROBlock{}, err
|
||||
}
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: slot,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &enginev1.ExecutionPayload{
|
||||
BlockHash: payloadHash[:],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
signed, err := blocks.NewSignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
return nil, blocks.ROBlock{}, err
|
||||
}
|
||||
roblock, err := blocks.NewROBlockWithRoot(signed, blockRoot)
|
||||
return st, roblock, err
|
||||
}
|
||||
|
||||
// CachedHeadRoot mocks the same method in the chain service
|
||||
@@ -576,9 +650,9 @@ func (s *ChainService) HighestReceivedBlockSlot() primitives.Slot {
|
||||
}
|
||||
|
||||
// InsertNode mocks the same method in the chain service
|
||||
func (s *ChainService) InsertNode(ctx context.Context, st state.BeaconState, root [32]byte) error {
|
||||
func (s *ChainService) InsertNode(ctx context.Context, st state.BeaconState, block blocks.ROBlock) error {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
return s.ForkChoiceStore.InsertNode(ctx, st, block)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
@@ -26,7 +27,6 @@ go_library(
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -14,10 +14,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ErrNoBuilder is used when builder endpoint is not configured.
|
||||
|
||||
2
beacon-chain/cache/BUILD.bazel
vendored
2
beacon-chain/cache/BUILD.bazel
vendored
@@ -46,6 +46,7 @@ go_library(
|
||||
"//crypto/rand:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
@@ -56,7 +57,6 @@ go_library(
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_k8s_client_go//tools/cache:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
2
beacon-chain/cache/common.go
vendored
2
beacon-chain/cache/common.go
vendored
@@ -17,6 +17,6 @@ func trim(queue *cache.FIFO, maxSize uint64) {
|
||||
}
|
||||
|
||||
// popProcessNoopFunc is a no-op function that never returns an error.
|
||||
func popProcessNoopFunc(_ interface{}) error {
|
||||
func popProcessNoopFunc(_ interface{}, _ bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ go_library(
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
@@ -26,7 +27,6 @@ go_library(
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_wealdtech_go_bytesutil//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -10,10 +10,10 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/wealdtech/go-bytesutil"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -241,7 +241,7 @@ func (c *Cache) InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, bloc
|
||||
c.pendingDeposits = append(c.pendingDeposits,
|
||||
ðpb.DepositContainer{Deposit: d, Eth1BlockHeight: blockNum, Index: index, DepositRoot: depositRoot[:]})
|
||||
pendingDepositsCount.Set(float64(len(c.pendingDeposits)))
|
||||
span.AddAttributes(trace.Int64Attribute("count", int64(len(c.pendingDeposits))))
|
||||
span.SetAttributes(trace.Int64Attribute("count", int64(len(c.pendingDeposits))))
|
||||
}
|
||||
|
||||
// Deposits returns the cached internal deposit tree.
|
||||
@@ -304,7 +304,7 @@ func (c *Cache) PendingContainers(ctx context.Context, untilBlk *big.Int) []*eth
|
||||
return depositCntrs[i].Index < depositCntrs[j].Index
|
||||
})
|
||||
|
||||
span.AddAttributes(trace.Int64Attribute("count", int64(len(depositCntrs))))
|
||||
span.SetAttributes(trace.Int64Attribute("count", int64(len(depositCntrs))))
|
||||
|
||||
return depositCntrs
|
||||
}
|
||||
|
||||
@@ -10,9 +10,9 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
2
beacon-chain/cache/registration.go
vendored
2
beacon-chain/cache/registration.go
vendored
@@ -7,8 +7,8 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// RegistrationCache is used to store the cached results of an Validator Registration request.
|
||||
|
||||
8
beacon-chain/cache/skip_slot_cache.go
vendored
8
beacon-chain/cache/skip_slot_cache.go
vendored
@@ -11,7 +11,7 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
lruwrpr "github.com/prysmaticlabs/prysm/v5/cache/lru"
|
||||
"go.opencensus.io/trace"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -92,17 +92,17 @@ func (c *SkipSlotCache) Get(ctx context.Context, r [32]byte) (state.BeaconState,
|
||||
delay *= delayFactor
|
||||
delay = math.Min(delay, maxDelay)
|
||||
}
|
||||
span.AddAttributes(trace.BoolAttribute("inProgress", inProgress))
|
||||
span.SetAttributes(trace.BoolAttribute("inProgress", inProgress))
|
||||
|
||||
item, exists := c.cache.Get(r)
|
||||
|
||||
if exists && item != nil {
|
||||
skipSlotCacheHit.Inc()
|
||||
span.AddAttributes(trace.BoolAttribute("hit", true))
|
||||
span.SetAttributes(trace.BoolAttribute("hit", true))
|
||||
return item.(state.BeaconState).Copy(), nil
|
||||
}
|
||||
skipSlotCacheMiss.Inc()
|
||||
span.AddAttributes(trace.BoolAttribute("hit", false))
|
||||
span.SetAttributes(trace.BoolAttribute("hit", false))
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
||||
72
beacon-chain/cache/tracked_validators.go
vendored
72
beacon-chain/cache/tracked_validators.go
vendored
@@ -2,19 +2,28 @@ package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
var validatorInactivityThreshold = 5 // 5 epochs
|
||||
|
||||
type TrackedValidator struct {
|
||||
Active bool
|
||||
FeeRecipient primitives.ExecutionAddress
|
||||
Index primitives.ValidatorIndex
|
||||
LastUpdated time.Time
|
||||
}
|
||||
|
||||
type TrackedValidatorsCache struct {
|
||||
sync.Mutex
|
||||
trackedValidators map[primitives.ValidatorIndex]TrackedValidator
|
||||
sync.RWMutex
|
||||
trackedValidators map[primitives.ValidatorIndex]TrackedValidator
|
||||
trackedValidatorsNum uint64
|
||||
trackedValidatorsNumLastUpdated uint64
|
||||
trackedValidatorsMetric prometheus.GaugeFunc
|
||||
}
|
||||
|
||||
func NewTrackedValidatorsCache() *TrackedValidatorsCache {
|
||||
@@ -24,8 +33,8 @@ func NewTrackedValidatorsCache() *TrackedValidatorsCache {
|
||||
}
|
||||
|
||||
func (t *TrackedValidatorsCache) Validator(index primitives.ValidatorIndex) (TrackedValidator, bool) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
t.RLock()
|
||||
defer t.RUnlock()
|
||||
val, ok := t.trackedValidators[index]
|
||||
return val, ok
|
||||
}
|
||||
@@ -34,6 +43,32 @@ func (t *TrackedValidatorsCache) Set(val TrackedValidator) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
t.trackedValidators[val.Index] = val
|
||||
t.updateTackedValidatorsNum()
|
||||
}
|
||||
|
||||
func (t *TrackedValidatorsCache) updateTackedValidatorsNum() {
|
||||
epochTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
if t.trackedValidatorsNumLastUpdated != 0 && t.trackedValidatorsNumLastUpdated+epochTime < uint64(time.Now().Unix()) {
|
||||
// too early to update again
|
||||
return
|
||||
}
|
||||
num := 0
|
||||
for _, value := range t.trackedValidators {
|
||||
if value.LastUpdated.Unix()+int64(validatorInactivityThreshold)*int64(epochTime) < time.Now().Unix() {
|
||||
// validator expired
|
||||
// TODO: should we prune?
|
||||
continue
|
||||
}
|
||||
num++
|
||||
}
|
||||
t.trackedValidatorsNum = uint64(num)
|
||||
t.trackedValidatorsNumLastUpdated = uint64(time.Now().Unix())
|
||||
}
|
||||
|
||||
func (t *TrackedValidatorsCache) GetTrackedValidatorsNum() uint64 {
|
||||
t.RLock()
|
||||
defer t.RUnlock()
|
||||
return t.trackedValidatorsNum
|
||||
}
|
||||
|
||||
func (t *TrackedValidatorsCache) Prune() {
|
||||
@@ -43,7 +78,32 @@ func (t *TrackedValidatorsCache) Prune() {
|
||||
}
|
||||
|
||||
func (t *TrackedValidatorsCache) Validating() bool {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
t.RLock()
|
||||
defer t.RUnlock()
|
||||
return len(t.trackedValidators) > 0
|
||||
}
|
||||
|
||||
func (t *TrackedValidatorsCache) Size() int {
|
||||
t.RLock()
|
||||
defer t.RUnlock()
|
||||
return len(t.trackedValidators)
|
||||
}
|
||||
|
||||
func (t *TrackedValidatorsCache) RegisterTrackedValidatorMetric() error {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
t.trackedValidatorsMetric = prometheus.NewGaugeFunc(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "tracked_validator_count",
|
||||
Help: "The total number of validators tracked by trackedValidatorsCache in the beacon node. This is updated at intervals via the push proposer settings API endpoint.",
|
||||
},
|
||||
func() float64 { return float64(t.GetTrackedValidatorsNum()) },
|
||||
)
|
||||
return prometheus.Register(t.trackedValidatorsMetric)
|
||||
}
|
||||
|
||||
func (t *TrackedValidatorsCache) UnregisterTrackedValidatorMetric() {
|
||||
t.RLock()
|
||||
defer t.RUnlock()
|
||||
prometheus.Unregister(t.trackedValidatorsMetric)
|
||||
}
|
||||
|
||||
@@ -34,13 +34,13 @@ go_library(
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -14,9 +14,9 @@ import (
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ProcessAttestationsNoVerifySignature applies processing operations to a block's inner attestation
|
||||
|
||||
@@ -37,7 +37,7 @@ func ProcessDeposits(
|
||||
beaconState state.BeaconState,
|
||||
deposits []*ethpb.Deposit,
|
||||
) (state.BeaconState, error) {
|
||||
batchVerified, err := blocks.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
allSignaturesVerified, err := blocks.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -46,7 +46,7 @@ func ProcessDeposits(
|
||||
if deposit == nil || deposit.Data == nil {
|
||||
return nil, errors.New("got a nil deposit in block")
|
||||
}
|
||||
beaconState, err = ProcessDeposit(beaconState, deposit, batchVerified)
|
||||
beaconState, err = ProcessDeposit(beaconState, deposit, allSignaturesVerified)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process deposit from %#x", bytesutil.Trunc(deposit.Data.PublicKey))
|
||||
}
|
||||
@@ -81,7 +81,7 @@ func ProcessDeposits(
|
||||
// amount=deposit.data.amount,
|
||||
// signature=deposit.data.signature,
|
||||
// )
|
||||
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, verifySignature bool) (state.BeaconState, error) {
|
||||
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, allSignaturesVerified bool) (state.BeaconState, error) {
|
||||
if err := blocks.VerifyDeposit(beaconState, deposit); err != nil {
|
||||
if deposit == nil || deposit.Data == nil {
|
||||
return nil, err
|
||||
@@ -92,7 +92,7 @@ func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, verif
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ApplyDeposit(beaconState, deposit.Data, verifySignature)
|
||||
return ApplyDeposit(beaconState, deposit.Data, allSignaturesVerified)
|
||||
}
|
||||
|
||||
// ApplyDeposit
|
||||
@@ -115,13 +115,13 @@ func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, verif
|
||||
// # Increase balance by deposit amount
|
||||
// index = ValidatorIndex(validator_pubkeys.index(pubkey))
|
||||
// increase_balance(state, index, amount)
|
||||
func ApplyDeposit(beaconState state.BeaconState, data *ethpb.Deposit_Data, verifySignature bool) (state.BeaconState, error) {
|
||||
func ApplyDeposit(beaconState state.BeaconState, data *ethpb.Deposit_Data, allSignaturesVerified bool) (state.BeaconState, error) {
|
||||
pubKey := data.PublicKey
|
||||
amount := data.Amount
|
||||
withdrawalCredentials := data.WithdrawalCredentials
|
||||
index, ok := beaconState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
|
||||
if !ok {
|
||||
if verifySignature {
|
||||
if !allSignaturesVerified {
|
||||
valid, err := blocks.IsValidDepositSignature(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -187,11 +187,12 @@ func AddValidatorToRegistry(beaconState state.BeaconState, pubKey []byte, withdr
|
||||
// return Validator(
|
||||
// pubkey=pubkey,
|
||||
// withdrawal_credentials=withdrawal_credentials,
|
||||
// effective_balance=effective_balance,
|
||||
// slashed=False,
|
||||
// activation_eligibility_epoch=FAR_FUTURE_EPOCH,
|
||||
// activation_epoch=FAR_FUTURE_EPOCH,
|
||||
// exit_epoch=FAR_FUTURE_EPOCH,
|
||||
// withdrawable_epoch=FAR_FUTURE_EPOCH,
|
||||
// effective_balance=effective_balance,
|
||||
// )
|
||||
func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount uint64) *ethpb.Validator {
|
||||
effectiveBalance := amount - (amount % params.BeaconConfig().EffectiveBalanceIncrement)
|
||||
@@ -202,10 +203,11 @@ func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount
|
||||
return ðpb.Validator{
|
||||
PublicKey: pubKey,
|
||||
WithdrawalCredentials: withdrawalCredentials,
|
||||
EffectiveBalance: effectiveBalance,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ActivationEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: effectiveBalance,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -199,7 +199,7 @@ func TestProcessDeposit_SkipsInvalidDeposit(t *testing.T) {
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
newState, err := altair.ProcessDeposit(beaconState, dep[0], true)
|
||||
newState, err := altair.ProcessDeposit(beaconState, dep[0], false)
|
||||
require.NoError(t, err, "Expected invalid block deposit to be ignored without error")
|
||||
|
||||
if newState.Eth1DepositIndex() != 1 {
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
"go.opencensus.io/trace"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
)
|
||||
|
||||
// AttDelta contains rewards and penalties for a single attestation.
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
e "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"go.opencensus.io/trace"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
)
|
||||
|
||||
// ProcessEpoch describes the per epoch operations that are performed on the beacon state.
|
||||
|
||||
@@ -40,6 +40,7 @@ go_library(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
@@ -49,7 +50,6 @@ go_library(
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -14,10 +14,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ProcessAttestationsNoVerifySignature applies processing operations to a block's inner attestation
|
||||
@@ -110,25 +110,7 @@ func VerifyAttestationNoVerifySignature(
|
||||
|
||||
var indexedAtt ethpb.IndexedAtt
|
||||
|
||||
if att.Version() < version.Electra {
|
||||
if uint64(att.GetData().CommitteeIndex) >= c {
|
||||
return fmt.Errorf("committee index %d >= committee count %d", att.GetData().CommitteeIndex, c)
|
||||
}
|
||||
|
||||
if err = helpers.VerifyAttestationBitfieldLengths(ctx, beaconState, att); err != nil {
|
||||
return errors.Wrap(err, "could not verify attestation bitfields")
|
||||
}
|
||||
|
||||
// Verify attesting indices are correct.
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indexedAtt, err = attestation.ConvertToIndexed(ctx, att, committee)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if att.Version() >= version.Electra {
|
||||
if att.GetData().CommitteeIndex != 0 {
|
||||
return errors.New("committee index must be 0 post-Electra")
|
||||
}
|
||||
@@ -154,6 +136,29 @@ func VerifyAttestationNoVerifySignature(
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if uint64(att.GetData().CommitteeIndex) >= c {
|
||||
return fmt.Errorf("committee index %d >= committee count %d", att.GetData().CommitteeIndex, c)
|
||||
}
|
||||
|
||||
// Verify attesting indices are correct.
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if committee == nil {
|
||||
return errors.New("no committee exist for this attestation")
|
||||
}
|
||||
|
||||
if err := helpers.VerifyBitfieldLength(att.GetAggregationBits(), uint64(len(committee))); err != nil {
|
||||
return errors.Wrap(err, "failed to verify aggregation bitfield")
|
||||
}
|
||||
|
||||
indexedAtt, err = attestation.ConvertToIndexed(ctx, att, committee)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return attestation.IsValidAttestationIndices(ctx, indexedAtt)
|
||||
|
||||
@@ -55,12 +55,26 @@ func BatchVerifyDepositsSignatures(ctx context.Context, deposits []*ethpb.Deposi
|
||||
return false, err
|
||||
}
|
||||
|
||||
verified := false
|
||||
if err := verifyDepositDataWithDomain(ctx, deposits, domain); err != nil {
|
||||
log.WithError(err).Debug("Failed to batch verify deposits signatures, will try individual verify")
|
||||
verified = true
|
||||
return false, nil
|
||||
}
|
||||
return verified, nil
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// BatchVerifyPendingDepositsSignatures batch verifies pending deposit signatures.
|
||||
func BatchVerifyPendingDepositsSignatures(ctx context.Context, deposits []*ethpb.PendingDeposit) (bool, error) {
|
||||
var err error
|
||||
domain, err := signing.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if err := verifyPendingDepositDataWithDomain(ctx, deposits, domain); err != nil {
|
||||
log.WithError(err).Debug("Failed to batch verify deposits signatures, will try individual verify")
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// IsValidDepositSignature returns whether deposit_data is valid
|
||||
@@ -159,3 +173,44 @@ func verifyDepositDataWithDomain(ctx context.Context, deps []*ethpb.Deposit, dom
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func verifyPendingDepositDataWithDomain(ctx context.Context, deps []*ethpb.PendingDeposit, domain []byte) error {
|
||||
if len(deps) == 0 {
|
||||
return nil
|
||||
}
|
||||
pks := make([]bls.PublicKey, len(deps))
|
||||
sigs := make([][]byte, len(deps))
|
||||
msgs := make([][32]byte, len(deps))
|
||||
for i, dep := range deps {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
if dep == nil {
|
||||
return errors.New("nil deposit")
|
||||
}
|
||||
dpk, err := bls.PublicKeyFromBytes(dep.PublicKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pks[i] = dpk
|
||||
sigs[i] = dep.Signature
|
||||
depositMessage := ðpb.DepositMessage{
|
||||
PublicKey: dep.PublicKey,
|
||||
WithdrawalCredentials: dep.WithdrawalCredentials,
|
||||
Amount: dep.Amount,
|
||||
}
|
||||
sr, err := signing.ComputeSigningRoot(depositMessage, domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgs[i] = sr
|
||||
}
|
||||
verify, err := bls.VerifyMultipleSignatures(sigs, msgs, pks)
|
||||
if err != nil {
|
||||
return errors.Errorf("could not verify multiple signatures: %v", err)
|
||||
}
|
||||
if !verify {
|
||||
return errors.New("one or more deposit signatures did not verify")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -17,6 +17,41 @@ import (
|
||||
)
|
||||
|
||||
func TestBatchVerifyDepositsSignatures_Ok(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
domain, err := signing.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil)
|
||||
require.NoError(t, err)
|
||||
deposit := ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Amount: 3000,
|
||||
},
|
||||
}
|
||||
sr, err := signing.ComputeSigningRoot(ðpb.DepositMessage{
|
||||
PublicKey: deposit.Data.PublicKey,
|
||||
WithdrawalCredentials: deposit.Data.WithdrawalCredentials,
|
||||
Amount: 3000,
|
||||
}, domain)
|
||||
require.NoError(t, err)
|
||||
sig := sk.Sign(sr[:])
|
||||
deposit.Data.Signature = sig.Marshal()
|
||||
leaf, err := deposit.Data.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
// We then create a merkle branch for the test.
|
||||
depositTrie, err := trie.GenerateTrieFromItems([][]byte{leaf[:]}, params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not generate trie")
|
||||
proof, err := depositTrie.MerkleProof(0)
|
||||
require.NoError(t, err, "Could not generate proof")
|
||||
|
||||
deposit.Proof = proof
|
||||
require.NoError(t, err)
|
||||
verified, err := blocks.BatchVerifyDepositsSignatures(context.Background(), []*ethpb.Deposit{deposit})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified)
|
||||
}
|
||||
|
||||
func TestBatchVerifyDepositsSignatures_InvalidSignature(t *testing.T) {
|
||||
deposit := ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{1, 2, 3}, 48),
|
||||
@@ -34,9 +69,9 @@ func TestBatchVerifyDepositsSignatures_Ok(t *testing.T) {
|
||||
|
||||
deposit.Proof = proof
|
||||
require.NoError(t, err)
|
||||
ok, err := blocks.BatchVerifyDepositsSignatures(context.Background(), []*ethpb.Deposit{deposit})
|
||||
verified, err := blocks.BatchVerifyDepositsSignatures(context.Background(), []*ethpb.Deposit{deposit})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, false, verified)
|
||||
}
|
||||
|
||||
func TestVerifyDeposit_MerkleBranchFailsVerification(t *testing.T) {
|
||||
@@ -93,3 +128,54 @@ func TestIsValidDepositSignature_Ok(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, valid)
|
||||
}
|
||||
|
||||
func TestBatchVerifyPendingDepositsSignatures_Ok(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
domain, err := signing.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil)
|
||||
require.NoError(t, err)
|
||||
pendingDeposit := ðpb.PendingDeposit{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Amount: 3000,
|
||||
}
|
||||
sr, err := signing.ComputeSigningRoot(ðpb.DepositMessage{
|
||||
PublicKey: pendingDeposit.PublicKey,
|
||||
WithdrawalCredentials: pendingDeposit.WithdrawalCredentials,
|
||||
Amount: 3000,
|
||||
}, domain)
|
||||
require.NoError(t, err)
|
||||
sig := sk.Sign(sr[:])
|
||||
pendingDeposit.Signature = sig.Marshal()
|
||||
|
||||
sk2, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pendingDeposit2 := ðpb.PendingDeposit{
|
||||
PublicKey: sk2.PublicKey().Marshal(),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Amount: 4000,
|
||||
}
|
||||
sr2, err := signing.ComputeSigningRoot(ðpb.DepositMessage{
|
||||
PublicKey: pendingDeposit2.PublicKey,
|
||||
WithdrawalCredentials: pendingDeposit2.WithdrawalCredentials,
|
||||
Amount: 4000,
|
||||
}, domain)
|
||||
require.NoError(t, err)
|
||||
sig2 := sk2.Sign(sr2[:])
|
||||
pendingDeposit2.Signature = sig2.Marshal()
|
||||
|
||||
verified, err := blocks.BatchVerifyPendingDepositsSignatures(context.Background(), []*ethpb.PendingDeposit{pendingDeposit, pendingDeposit2})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified)
|
||||
}
|
||||
|
||||
func TestBatchVerifyPendingDepositsSignatures_InvalidSignature(t *testing.T) {
|
||||
pendingDeposit := ðpb.PendingDeposit{
|
||||
PublicKey: bytesutil.PadTo([]byte{1, 2, 3}, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
verified, err := blocks.BatchVerifyPendingDepositsSignatures(context.Background(), []*ethpb.PendingDeposit{pendingDeposit})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, verified)
|
||||
}
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
package blocks
|
||||
|
||||
var ProcessBLSToExecutionChange = processBLSToExecutionChange
|
||||
|
||||
var VerifyBlobCommitmentCount = verifyBlobCommitmentCount
|
||||
|
||||
@@ -213,6 +213,11 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
|
||||
},
|
||||
BlsToExecutionChanges: make([]*ethpb.SignedBLSToExecutionChange, 0),
|
||||
BlobKzgCommitments: make([][]byte, 0),
|
||||
ExecutionRequests: &enginev1.ExecutionRequests{
|
||||
Withdrawals: make([]*enginev1.WithdrawalRequest, 0),
|
||||
Deposits: make([]*enginev1.DepositRequest, 0),
|
||||
Consolidations: make([]*enginev1.ConsolidationRequest, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
|
||||
@@ -2,11 +2,13 @@ package blocks
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
consensus_types "github.com/prysmaticlabs/prysm/v5/consensus-types"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
@@ -59,6 +61,9 @@ func IsExecutionBlock(body interfaces.ReadOnlyBeaconBlockBody) (bool, error) {
|
||||
if body == nil {
|
||||
return false, errors.New("nil block body")
|
||||
}
|
||||
if body.Version() >= version.Capella {
|
||||
return true, nil
|
||||
}
|
||||
payload, err := body.Execution()
|
||||
switch {
|
||||
case errors.Is(err, consensus_types.ErrUnsupportedField):
|
||||
@@ -200,90 +205,40 @@ func ValidatePayload(st state.BeaconState, payload interfaces.ExecutionData) err
|
||||
// block_hash=payload.block_hash,
|
||||
// transactions_root=hash_tree_root(payload.transactions),
|
||||
// )
|
||||
func ProcessPayload(st state.BeaconState, payload interfaces.ExecutionData) (state.BeaconState, error) {
|
||||
var err error
|
||||
if st.Version() >= version.Capella {
|
||||
st, err = ProcessWithdrawals(st, payload)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process withdrawals")
|
||||
}
|
||||
func ProcessPayload(st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) error {
|
||||
payload, err := body.Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := verifyBlobCommitmentCount(body); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ValidatePayloadWhenMergeCompletes(st, payload); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if err := ValidatePayload(st, payload); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if err := st.SetLatestExecutionPayloadHeader(payload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// ValidatePayloadHeaderWhenMergeCompletes validates the payload header when the merge completes.
|
||||
func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header interfaces.ExecutionData) error {
|
||||
// Skip validation if the state is not merge compatible.
|
||||
complete, err := IsMergeTransitionComplete(st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !complete {
|
||||
return nil
|
||||
}
|
||||
|
||||
func verifyBlobCommitmentCount(body interfaces.ReadOnlyBeaconBlockBody) error {
|
||||
if body.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
// Validate current header's parent hash matches state header's block hash.
|
||||
h, err := st.LatestExecutionPayloadHeader()
|
||||
kzgs, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(header.ParentHash(), h.BlockHash()) {
|
||||
return ErrInvalidPayloadBlockHash
|
||||
if len(kzgs) > field_params.MaxBlobsPerBlock {
|
||||
return fmt.Errorf("too many kzg commitments in block: %d", len(kzgs))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidatePayloadHeader validates the payload header.
|
||||
func ValidatePayloadHeader(st state.BeaconState, header interfaces.ExecutionData) error {
|
||||
// Validate header's random mix matches with state in current epoch
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(header.PrevRandao(), random) {
|
||||
return ErrInvalidPayloadPrevRandao
|
||||
}
|
||||
|
||||
// Validate header's timestamp matches with state in current slot.
|
||||
t, err := slots.ToTime(st.GenesisTime(), st.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if header.Timestamp() != uint64(t.Unix()) {
|
||||
return ErrInvalidPayloadTimeStamp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessPayloadHeader processes the payload header.
|
||||
func ProcessPayloadHeader(st state.BeaconState, header interfaces.ExecutionData) (state.BeaconState, error) {
|
||||
var err error
|
||||
if st.Version() >= version.Capella {
|
||||
st, err = ProcessWithdrawals(st, header)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process withdrawals")
|
||||
}
|
||||
}
|
||||
if err := ValidatePayloadHeaderWhenMergeCompletes(st, header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := ValidatePayloadHeader(st, header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := st.SetLatestExecutionPayloadHeader(header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// GetBlockPayloadHash returns the hash of the execution payload of the block
|
||||
func GetBlockPayloadHash(blk interfaces.ReadOnlyBeaconBlock) ([32]byte, error) {
|
||||
var payloadHash [32]byte
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
@@ -13,6 +14,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
@@ -251,7 +253,8 @@ func Test_IsExecutionBlockCapella(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
got, err := blocks.IsExecutionBlock(wrappedBlock.Body())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, got)
|
||||
// #14614
|
||||
require.Equal(t, true, got)
|
||||
}
|
||||
|
||||
func Test_IsExecutionEnabled(t *testing.T) {
|
||||
@@ -581,14 +584,17 @@ func Test_ProcessPayload(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
wrappedPayload, err := consensusblocks.WrappedExecutionPayload(tt.payload)
|
||||
body, err := consensusblocks.NewBeaconBlockBody(ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: tt.payload,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
st, err := blocks.ProcessPayload(st, wrappedPayload)
|
||||
if err != nil {
|
||||
if err := blocks.ProcessPayload(st, body); err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
require.Equal(t, tt.err, err)
|
||||
want, err := consensusblocks.PayloadToHeader(wrappedPayload)
|
||||
payload, err := body.Execution()
|
||||
require.NoError(t, err)
|
||||
want, err := consensusblocks.PayloadToHeader(payload)
|
||||
require.Equal(t, tt.err, err)
|
||||
h, err := st.LatestExecutionPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
@@ -609,13 +615,14 @@ func Test_ProcessPayloadCapella(t *testing.T) {
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
require.NoError(t, err)
|
||||
payload.PrevRandao = random
|
||||
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload)
|
||||
require.NoError(t, err)
|
||||
_, err = blocks.ProcessPayload(st, wrapped)
|
||||
body, err := consensusblocks.NewBeaconBlockBody(ðpb.BeaconBlockBodyCapella{
|
||||
ExecutionPayload: payload,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, blocks.ProcessPayload(st, body))
|
||||
}
|
||||
|
||||
func Test_ProcessPayloadHeader(t *testing.T) {
|
||||
func Test_ProcessPayload_Blinded(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
require.NoError(t, err)
|
||||
@@ -663,8 +670,13 @@ func Test_ProcessPayloadHeader(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
st, err := blocks.ProcessPayloadHeader(st, tt.header)
|
||||
if err != nil {
|
||||
p, ok := tt.header.Proto().(*enginev1.ExecutionPayloadHeader)
|
||||
require.Equal(t, true, ok)
|
||||
body, err := consensusblocks.NewBeaconBlockBody(ðpb.BlindedBeaconBlockBodyBellatrix{
|
||||
ExecutionPayloadHeader: p,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
if err := blocks.ProcessPayload(st, body); err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
require.Equal(t, tt.err, err)
|
||||
@@ -728,7 +740,7 @@ func Test_ValidatePayloadHeader(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err = blocks.ValidatePayloadHeader(st, tt.header)
|
||||
err = blocks.ValidatePayload(st, tt.header)
|
||||
require.Equal(t, tt.err, err)
|
||||
})
|
||||
}
|
||||
@@ -785,7 +797,7 @@ func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err = blocks.ValidatePayloadHeaderWhenMergeCompletes(tt.state, tt.header)
|
||||
err = blocks.ValidatePayloadWhenMergeCompletes(tt.state, tt.header)
|
||||
require.Equal(t, tt.err, err)
|
||||
})
|
||||
}
|
||||
@@ -906,3 +918,15 @@ func emptyPayloadCapella() *enginev1.ExecutionPayloadCapella {
|
||||
Withdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyBlobCommitmentCount(t *testing.T) {
|
||||
b := ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{}}
|
||||
rb, err := consensusblocks.NewBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, blocks.VerifyBlobCommitmentCount(rb.Body()))
|
||||
|
||||
b = ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{BlobKzgCommitments: make([][]byte, fieldparams.MaxBlobsPerBlock+1)}}
|
||||
rb, err = consensusblocks.NewBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, fmt.Sprintf("too many kzg commitments in block: %d", fieldparams.MaxBlobsPerBlock+1), blocks.VerifyBlobCommitmentCount(rb.Body()))
|
||||
}
|
||||
|
||||
@@ -120,35 +120,36 @@ func ValidateBLSToExecutionChange(st state.ReadOnlyBeaconState, signed *ethpb.Si
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
|
||||
// expected_withdrawals, partial_withdrawals_count = get_expected_withdrawals(state) # [Modified in Electra:EIP7251]
|
||||
// def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
|
||||
//
|
||||
// assert len(payload.withdrawals) == len(expected_withdrawals)
|
||||
// expected_withdrawals, processed_partial_withdrawals_count = get_expected_withdrawals(state) # [Modified in Electra:EIP7251]
|
||||
//
|
||||
// for expected_withdrawal, withdrawal in zip(expected_withdrawals, payload.withdrawals):
|
||||
// assert withdrawal == expected_withdrawal
|
||||
// decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
|
||||
// assert len(payload.withdrawals) == len(expected_withdrawals)
|
||||
//
|
||||
// # Update pending partial withdrawals [New in Electra:EIP7251]
|
||||
// state.pending_partial_withdrawals = state.pending_partial_withdrawals[partial_withdrawals_count:]
|
||||
// for expected_withdrawal, withdrawal in zip(expected_withdrawals, payload.withdrawals):
|
||||
// assert withdrawal == expected_withdrawal
|
||||
// decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
|
||||
//
|
||||
// # Update the next withdrawal index if this block contained withdrawals
|
||||
// if len(expected_withdrawals) != 0:
|
||||
// latest_withdrawal = expected_withdrawals[-1]
|
||||
// state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1)
|
||||
// # Update pending partial withdrawals [New in Electra:EIP7251]
|
||||
// state.pending_partial_withdrawals = state.pending_partial_withdrawals[processed_partial_withdrawals_count:]
|
||||
//
|
||||
// # Update the next validator index to start the next withdrawal sweep
|
||||
// if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
// # Next sweep starts after the latest withdrawal's validator index
|
||||
// next_validator_index = ValidatorIndex((expected_withdrawals[-1].validator_index + 1) % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
// else:
|
||||
// # Advance sweep by the max length of the sweep if there was not a full set of withdrawals
|
||||
// next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
|
||||
// next_validator_index = ValidatorIndex(next_index % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
// # Update the next withdrawal index if this block contained withdrawals
|
||||
// if len(expected_withdrawals) != 0:
|
||||
// latest_withdrawal = expected_withdrawals[-1]
|
||||
// state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1)
|
||||
//
|
||||
// # Update the next validator index to start the next withdrawal sweep
|
||||
// if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
// # Next sweep starts after the latest withdrawal's validator index
|
||||
// next_validator_index = ValidatorIndex((expected_withdrawals[-1].validator_index + 1) % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
// else:
|
||||
// # Advance sweep by the max length of the sweep if there was not a full set of withdrawals
|
||||
// next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
|
||||
// next_validator_index = ValidatorIndex(next_index % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
func ProcessWithdrawals(st state.BeaconState, executionData interfaces.ExecutionData) (state.BeaconState, error) {
|
||||
expectedWithdrawals, partialWithdrawalsCount, err := st.ExpectedWithdrawals()
|
||||
expectedWithdrawals, processedPartialWithdrawalsCount, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get expected withdrawals")
|
||||
}
|
||||
@@ -192,7 +193,7 @@ func ProcessWithdrawals(st state.BeaconState, executionData interfaces.Execution
|
||||
}
|
||||
|
||||
if st.Version() >= version.Electra {
|
||||
if err := st.DequeuePartialWithdrawals(partialWithdrawalsCount); err != nil {
|
||||
if err := st.DequeuePendingPartialWithdrawals(processedPartialWithdrawalsCount); err != nil {
|
||||
return nil, fmt.Errorf("unable to dequeue partial withdrawals from state: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,15 +32,17 @@ go_library(
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//contracts/deposit:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -52,24 +54,27 @@ go_test(
|
||||
"deposit_fuzz_test.go",
|
||||
"deposits_test.go",
|
||||
"effective_balance_updates_test.go",
|
||||
"export_test.go",
|
||||
"registry_updates_test.go",
|
||||
"transition_test.go",
|
||||
"upgrade_test.go",
|
||||
"validator_test.go",
|
||||
"withdrawals_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/testing:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user