Compare commits

..

1 Commits

Author SHA1 Message Date
nisdas
2b52df5904 Add it in 2024-06-13 19:45:30 +08:00
1553 changed files with 90455 additions and 135325 deletions

View File

@@ -22,7 +22,6 @@ coverage --define=coverage_enabled=1
build --workspace_status_command=./hack/workspace_status.sh
build --define blst_disabled=false
build --compilation_mode=opt
run --define blst_disabled=false
build:blst_disabled --define blst_disabled=true

View File

@@ -1 +1 @@
7.4.1
7.1.0

View File

@@ -10,7 +10,6 @@
in review.
4. Note that PRs updating dependencies and new Go versions are not accepted.
Please file an issue instead.
5. A changelog entry is required for user facing issues.
-->
**What type of PR is this?**
@@ -29,9 +28,3 @@
Fixes #
**Other notes for review**
**Acknowledgements**
- [ ] I have read [CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [ ] I have included a uniquely named [changelog fragment file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [ ] I have added a description to this PR with sufficient context for reviewers to understand this PR.

View File

@@ -1,4 +1,4 @@
FROM golang:1.23-alpine
FROM golang:1.21-alpine
COPY entrypoint.sh /entrypoint.sh

View File

@@ -1,34 +0,0 @@
# This workflow will build a golang project
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go
name: changelog
on:
pull_request:
branches: [ "develop" ]
jobs:
run-changelog-check:
runs-on: ubuntu-latest
steps:
- name: Checkout source code
uses: actions/checkout@v3
- name: Download unclog binary
uses: dsaltares/fetch-gh-release-asset@aa2ab1243d6e0d5b405b973c89fa4d06a2d0fff7 # 1.1.2
with:
repo: OffchainLabs/unclog
version: "tags/v0.1.3"
file: "unclog"
- name: Get new changelog files
id: new-changelog-files
uses: tj-actions/changed-files@v45
with:
files: |
changelog/**.md
- name: Run lint command
env:
ALL_ADDED_MARKDOWN: ${{ steps.new-changelog-files.outputs.added_files }}
run: chmod +x unclog && ./unclog check -fragment-env=ALL_ADDED_MARKDOWN

View File

@@ -1,21 +0,0 @@
name: Protobuf Format
on:
push:
branches: [ '*' ]
pull_request:
branches: [ '*' ]
merge_group:
types: [checks_requested]
jobs:
clang-format-checking:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
# Is this step failing for you?
# Run: clang-format -i proto/**/*.proto
# See: https://clang.llvm.org/docs/ClangFormat.html
- uses: RafikFarhad/clang-format-github-action@v3
with:
sources: "proto/**/*.proto"

View File

@@ -16,7 +16,7 @@ jobs:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: '1.23.5'
go-version: '1.21.5'
- id: list
uses: shogo82148/actions-go-fuzz/list@v0
with:
@@ -36,7 +36,7 @@ jobs:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: '1.23.5'
go-version: '1.21.5'
- uses: shogo82148/actions-go-fuzz/run@v0
with:
packages: ${{ matrix.package }}

View File

@@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Go mod tidy checker
id: gomodtidy
@@ -27,15 +27,15 @@ jobs:
GO111MODULE: on
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Go 1.23
uses: actions/setup-go@v4
uses: actions/checkout@v2
- name: Set up Go 1.21
uses: actions/setup-go@v3
with:
go-version: '1.23.5'
go-version: '1.21.5'
- name: Run Gosec Security Scanner
run: | # https://github.com/securego/gosec/issues/469
export PATH=$PATH:$(go env GOPATH)/bin
go install github.com/securego/gosec/v2/cmd/gosec@v2.19.0
go install github.com/securego/gosec/v2/cmd/gosec@v2.15.0
gosec -exclude-generated -exclude=G307 -exclude-dir=crypto/bls/herumi ./...
lint:
@@ -43,18 +43,18 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Set up Go 1.23
uses: actions/setup-go@v4
- name: Set up Go 1.21
uses: actions/setup-go@v3
with:
go-version: '1.23.5'
go-version: '1.21.5'
id: go
- name: Golangci-lint
uses: golangci/golangci-lint-action@v5
uses: golangci/golangci-lint-action@v3
with:
version: v1.63.4
version: v1.55.2
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
build:
@@ -62,13 +62,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.x
uses: actions/setup-go@v4
uses: actions/setup-go@v2
with:
go-version: '1.23.5'
go-version: '1.21.5'
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Get dependencies
run: |

View File

@@ -1,20 +1,28 @@
run:
timeout: 10m
go: '1.23.5'
issues:
exclude-files:
skip-files:
- validator/web/site_data.go
- .*_test.go
exclude-dirs:
skip-dirs:
- proto
- tools/analyzers
timeout: 10m
go: '1.21.5'
linters:
enable-all: true
disable:
# Deprecated linters:
- deadcode
- exhaustivestruct
- golint
- govet
- ifshort
- interfacer
- maligned
- nosnakecase
- scopelint
- structcheck
- varcheck
# Disabled for now:
- asasalint
@@ -26,8 +34,7 @@ linters:
- dogsled
- dupl
- durationcheck
- errname
- err113
- errorlint
- exhaustive
- exhaustruct
- forbidigo
@@ -41,17 +48,16 @@ linters:
- gocyclo
- godot
- godox
- goerr113
- gofumpt
- gomnd
- gomoddirectives
- gosec
- inamedparam
- interfacebloat
- intrange
- ireturn
- lll
- maintidx
- makezero
- mnd
- musttag
- nakedret
- nestif
@@ -66,9 +72,7 @@ linters:
- predeclared
- promlinter
- protogetter
- recvcheck
- revive
- spancheck
- staticcheck
- stylecheck
- tagalign

View File

@@ -26,6 +26,7 @@ approval_rules:
only_changed_files:
paths:
- "*pb.go"
- "*pb.gw.go"
- "*.bazel"
options:
ignore_commits_by:
@@ -68,6 +69,7 @@ approval_rules:
changed_files:
ignore:
- "*pb.go"
- "*pb.gw.go"
- "*.bazel"
options:
ignore_commits_by:

View File

@@ -55,6 +55,13 @@ alias(
visibility = ["//visibility:public"],
)
# Protobuf gRPC gateway compiler
alias(
name = "grpc_gateway_proto_compiler",
actual = "@com_github_grpc_ecosystem_grpc_gateway_v2//protoc-gen-grpc-gateway:go_gen_grpc_gateway",
visibility = ["//visibility:public"],
)
gometalinter(
name = "gometalinter",
config = "//:.gometalinter.json",
@@ -217,7 +224,6 @@ nogo(
"@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_default_library",
"@org_golang_x_tools//go/analysis/passes/defers:go_default_library",
"@org_golang_x_tools//go/analysis/passes/directive:go_default_library",
"@org_golang_x_tools//go/analysis/passes/errorsas:go_default_library",
# fieldalignment disabled
#"@org_golang_x_tools//go/analysis/passes/fieldalignment:go_default_library",
"@org_golang_x_tools//go/analysis/passes/findcall:go_default_library",

File diff suppressed because it is too large Load Diff

View File

@@ -6,9 +6,6 @@ Excited by our work and want to get involved in building out our sharding releas
You can explore our [Open Issues](https://github.com/prysmaticlabs/prysm/issues) in-the works for our different releases. Feel free to fork our repo and start creating PRs after assigning yourself to an issue of interest. We are always chatting on [Discord](https://discord.gg/CTYGPUJ) drop us a line there if you want to get more involved or have any questions on our implementation!
> [!IMPORTANT]
> Please, **do not send pull requests for trivial changes**, such as typos, these will be rejected. These types of pull requests incur a cost to reviewers and do not provide much value to the project. If you are unsure, please open an issue first to discuss the change.
## Contribution Steps
**1. Set up Prysm following the instructions in README.md.**
@@ -123,19 +120,15 @@ $ git push myrepo feature-in-progress-branch
Navigate to your fork of the repo on GitHub. On the upper left where the current branch is listed, change the branch to your feature-in-progress-branch. Open the files that you have worked on and check to make sure they include your changes.
**16. Add an entry to CHANGELOG.md.**
**16. Create a pull request.**
All PRs must must include a changelog fragment file in the `changelog` directory. If your change is not user-facing or should not be mentioned in the changelog for some other reason, you may use the `Ignored` changelog section in your fragment's header to satisfy this requirement without altering the final release changelog. See the [Maintaining CHANGELOG.md](#maintaining-changelogmd) section for more information.
Navigate your browser to https://github.com/prysmaticlabs/prysm and click on the new pull request button. In the “base” box on the left, leave the default selection “base master”, the branch that you want your changes to be applied to. In the “compare” box on the right, select feature-in-progress-branch, the branch containing the changes you want to apply. You will then be asked to answer a few questions about your pull request. After you complete the questionnaire, the pull request will appear in the list of pull requests at https://github.com/prysmaticlabs/prysm/pulls.
**17. Create a pull request.**
Navigate your browser to https://github.com/prysmaticlabs/prysm and click on the new pull request button. In the “base” box on the left, leave the default selection “base develop”, the branch that you want your changes to be applied to. In the “compare” box on the right, select feature-in-progress-branch, the branch containing the changes you want to apply. You will then be asked to answer a few questions about your pull request. After you complete the questionnaire, the pull request will appear in the list of pull requests at https://github.com/prysmaticlabs/prysm/pulls. Ensure that you have added an entry to CHANGELOG.md if your PR is a user-facing change. See the [Maintaining CHANGELOG.md](#maintaining-changelogmd) section for more information.
**18. Respond to comments by Core Contributors.**
**17. Respond to comments by Core Contributors.**
Core Contributors may ask questions and request that you make edits. If you set notifications at the top of the page to “not watching,” you will still be notified by email whenever someone comments on the page of a pull request you have created. If you are asked to modify your pull request, repeat steps 8 through 15, then leave a comment to notify the Core Contributors that the pull request is ready for further review.
**19. If the number of commits becomes excessive, you may be asked to squash your commits.**
**18. If the number of commits becomes excessive, you may be asked to squash your commits.**
You can do this with an interactive rebase. Start by running the following command to determine the commit that is the base of your branch...
@@ -143,7 +136,7 @@ Core Contributors may ask questions and request that you make edits. If you set
$ git merge-base feature-in-progress-branch prysm/master
```
**20. The previous command will return a commit-hash that you should use in the following command.**
**19. The previous command will return a commit-hash that you should use in the following command.**
```
$ git rebase -i commit-hash
@@ -167,24 +160,13 @@ squash hash add a feature
Save and close the file, then a commit command will appear in the terminal that squashes the smaller commits into one. Check to be sure the commit message accurately reflects your changes and then hit enter to execute it.
**21. Update your pull request with the following command.**
**20. Update your pull request with the following command.**
```
$ git push myrepo feature-in-progress-branch -f
```
**22. Finally, again leave a comment to the Core Contributors on the pull request to let them know that the pull request has been updated.**
## Maintaining CHANGELOG.md
This project follows the changelog guidelines from [keepachangelog.com](https://keepachangelog.com/en/1.1.0/). In order to minimize conflicts and workflow headaches, we chose to implement a changelog management
strategy that uses changelog "fragment" files, managed by our changelog management tool called `unclog`. Each PR must include a new changelog fragment file in the `changelog` directory, as specified by unclog's
[README.md](https://github.com/OffchainLabs/unclog?tab=readme-ov-file#what-is-a-changelog-fragment). As the `unclog` README suggests in the [Best Practices](https://github.com/OffchainLabs/unclog?tab=readme-ov-file#best-practices) section,
the standard naming convention for your PR's fragment file, to avoid conflicting with another fragment file, is `changelog/<github user name>_<PR branch name>.md`.
### Releasing
When a new release is made, the "Unreleased" section should be moved to a new section with the release version and the current date. Then a new "Unreleased" section is made at the top of the file with the categories listed above.
**21. Finally, again leave a comment to the Core Contributors on the pull request to let them know that the pull request has been updated.**
## Contributor Responsibilities

View File

@@ -55,7 +55,7 @@ bazel build //beacon-chain --config=release
## Adding / updating dependencies
1. Add your dependency as you would with go modules. I.e. `go get ...`
1. Run `bazel run //:gazelle -- update-repos -from_file=go.mod -to_macro=deps.bzl%prysm_deps -prune=true` to update the bazel managed dependencies.
1. Run `bazel run //:gazelle -- update-repos -from_file=go.mod` to update the bazel managed dependencies.
Example:

View File

@@ -2,21 +2,18 @@
This README details how to setup Prysm for interop testing for usage with other Ethereum consensus clients.
> [!IMPORTANT]
> This guide is likely to be outdated. The Prysm team does not have capacity to troubleshoot
> outdated interop guides or instructions. If you experience issues with this guide, please file an
> issue for visibility and propose fixes, if possible.
## Installation & Setup
1. Install [Bazel](https://docs.bazel.build/versions/master/install.html) **(Recommended)**
2. `git clone https://github.com/prysmaticlabs/prysm && cd prysm`
3. `bazel build //cmd/...`
3. `bazel build //...`
## Starting from Genesis
Prysm can be started from a built-in mainnet genesis state, or started with a provided genesis state by
using the `--genesis-state` flag and providing a path to the genesis.ssz file.
Prysm supports a few ways to quickly launch a beacon node from basic configurations:
- `NumValidators + GenesisTime`: Launches a beacon node by deterministically generating a state from a num-validators flag along with a genesis time **(Recommended)**
- `SSZ Genesis`: Launches a beacon node from a .ssz file containing a SSZ-encoded, genesis beacon state
## Generating a Genesis State
@@ -24,34 +21,21 @@ To setup the necessary files for these quick starts, Prysm provides a tool to ge
a deterministically generated set of validator private keys following the official interop YAML format
[here](https://github.com/ethereum/eth2.0-pm/blob/master/interop/mocked_start).
You can use `prysmctl` to create a deterministic genesis state for interop.
You can use `bazel run //tools/genesis-state-gen` to create a deterministic genesis state for interop.
```sh
# Download (or create) a chain config file.
curl https://raw.githubusercontent.com/ethereum/consensus-specs/refs/heads/dev/configs/minimal.yaml -o /tmp/minimal.yaml
### Usage
- **--genesis-time** uint: Unix timestamp used as the genesis time in the generated genesis state (defaults to now)
- **--num-validators** int: Number of validators to deterministically include in the generated genesis state
- **--output-ssz** string: Output filename of the SSZ marshaling of the generated genesis state
- **--config-name=interop** string: name of the beacon chain config to use when generating the state. ex mainnet|minimal|interop
The example below creates 64 validator keys, instantiates a genesis state with those 64 validators and with genesis unix timestamp 1567542540,
and finally writes a ssz encoded output to ~/Desktop/genesis.ssz. This file can be used to kickstart the beacon chain in the next section. When using the `--interop-*` flags, the beacon node will assume the `interop` config should be used, unless a different config is specified on the command line.
# Run prysmctl to generate genesis with a 2 minute genesis delay and 256 validators.
bazel run //cmd/prysmctl --config=minimal -- \
testnet generate-genesis \
--genesis-time-delay=120 \
--num-validators=256 \
--output-ssz=/tmp/genesis.ssz \
--chain-config-file=/tmp/minimal.yaml
```
The flags are explained below:
- `bazel run //cmd/prysmctl` is the bazel command to compile and run prysmctl.
- `--config=minimal` is a bazel build time configuration flag to compile Prysm with minimal state constants.
- `--` is an argument divider to tell bazel that everything after this divider should be passed as arguments to prysmctl. Without this divider, it isn't clear to bazel if the arguments are meant to be build time arguments or runtime arguments so the operation complains and fails to build without this divider.
- `testnet` is the primary command argument for prysmctl.
- `generate-genesis` is the subcommand to `testnet` in prysmctl.
- `--genesis-time-delay` uint: The number of seconds in the future to define genesis. Example: a value of 60 will set the genesis time to 1 minute in the future. This should be sufficiently large enough to allow for you to start the beacon node before the genesis time.
- `--num-validators` int: Number of validators to deterministically include in the generated genesis state
- `--output-ssz` string: Output filename of the SSZ marshaling of the generated genesis state
- `--chain-config-file` string: Filepath to a chain config yaml file.
Note: This guide saves items to the `/tmp/` directory which will not persist if your machine is
restarted. Consider tweaking the arguments if persistence is needed.
bazel run //tools/genesis-state-gen -- --config-name interop --output-ssz ~/Desktop/genesis.ssz --num-validators 64 --genesis-time 1567542540
```
## Launching a Beacon Node + Validator Client
@@ -60,33 +44,45 @@ restarted. Consider tweaking the arguments if persistence is needed.
Open up two terminal windows, run:
```
bazel run //cmd/beacon-chain --config=minimal -- \
--minimal-config \
--bootstrap-node= \
--deposit-contract 0x8A04d14125D0FDCDc742F4A05C051De07232EDa4 \
--datadir=/tmp/beacon-chain-minimal-devnet \
--force-clear-db \
--min-sync-peers=0 \
--genesis-state=/tmp/genesis.ssz \
--chain-config-file=/tmp/minimal.yaml
bazel run //beacon-chain -- \
--bootstrap-node= \
--deposit-contract 0x8A04d14125D0FDCDc742F4A05C051De07232EDa4 \
--datadir=/tmp/beacon-chain-interop \
--force-clear-db \
--min-sync-peers=0 \
--interop-num-validators 64 \
--interop-eth1data-votes
```
This will start the system with 256 validators. The flags used can be explained as such:
- `bazel run //cmd/beacon-chain --config=minimal` builds and runs the beacon node in minimal build configuration.
- `--` is a flag divider to distinguish between bazel flags and flags that should be passed to the application. All flags and arguments after this divider are passed to the beacon chain.
- `--minimal-config` tells the beacon node to use minimal network configuration. This is different from the compile time state configuration flag `--config=minimal` and both are required.
- `--bootstrap-node=` disables the default bootstrap nodes. This prevents the client from attempting to peer with mainnet nodes.
- `--datadir=/tmp/beacon-chain-minimal-devnet` sets the data directory in a temporary location. Change this to your preferred destination.
- `--force-clear-db` will delete the beaconchain.db file without confirming with the user. This is helpful for iteratively running local devnets without changing the datadir, but less helpful for one off runs where there was no database in the data directory.
- `--min-sync-peers=0` allows the beacon node to skip initial sync without peers. This is essential because Prysm expects at least a few peers to start the blockchain.
- `--genesis-state=/tmp/genesis.ssz` defines the path to the generated genesis ssz file. The beacon node will use this as the initial genesis state.
- `--chain-config-file=/tmp/minimal.yaml` defines the path to the yaml file with the chain configuration.
As soon as the beacon node has started, start the validator in the other terminal window.
This will deterministically generate a beacon genesis state and start
the system with 64 validators and the genesis time set to the current unix timestamp.
Wait a bit until your beacon chain starts, and in the other window:
```
bazel run //cmd/validator --config=minimal -- --datadir=/tmp/validator --interop-num-validators=256 --minimal-config --suggested-fee-recipient=0x8A04d14125D0FDCDc742F4A05C051De07232EDa4
bazel run //validator -- --keymanager=interop --keymanageropts='{"keys":64}'
```
This will launch and kickstart the system with your 256 validators performing their duties accordingly.
This will launch and kickstart the system with your 64 validators performing their duties accordingly.
### Launching from `genesis.ssz`
Assuming you generated a `genesis.ssz` file with 64 validators, open up two terminal windows, run:
```
bazel run //beacon-chain -- \
--bootstrap-node= \
--deposit-contract 0x8A04d14125D0FDCDc742F4A05C051De07232EDa4 \
--datadir=/tmp/beacon-chain-interop \
--force-clear-db \
--min-sync-peers=0 \
--interop-genesis-state /path/to/genesis.ssz \
--interop-eth1data-votes
```
Wait a bit until your beacon chain starts, and in the other window:
```
bazel run //validator -- --keymanager=interop --keymanageropts='{"keys":64}'
```
This will launch and kickstart the system with your 64 validators performing their duties accordingly.

1689
MODULE.bazel.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -16,34 +16,6 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
rules_pkg_dependencies()
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
http_archive(
name = "toolchains_protoc",
sha256 = "abb1540f8a9e045422730670ebb2f25b41fa56ca5a7cf795175a110a0a68f4ad",
strip_prefix = "toolchains_protoc-0.3.6",
url = "https://github.com/aspect-build/toolchains_protoc/releases/download/v0.3.6/toolchains_protoc-v0.3.6.tar.gz",
)
load("@toolchains_protoc//protoc:repositories.bzl", "rules_protoc_dependencies")
rules_protoc_dependencies()
load("@rules_proto//proto:repositories.bzl", "rules_proto_dependencies")
rules_proto_dependencies()
load("@bazel_features//:deps.bzl", "bazel_features_deps")
bazel_features_deps()
load("@toolchains_protoc//protoc:toolchain.bzl", "protoc_toolchains")
protoc_toolchains(
name = "protoc_toolchains",
version = "v25.3",
)
HERMETIC_CC_TOOLCHAIN_VERSION = "v3.0.1"
http_archive(
@@ -129,9 +101,9 @@ http_archive(
http_archive(
name = "aspect_bazel_lib",
sha256 = "a272d79bb0ac6b6965aa199b1f84333413452e87f043b53eca7f347a23a478e8",
strip_prefix = "bazel-lib-2.9.3",
url = "https://github.com/bazel-contrib/bazel-lib/releases/download/v2.9.3/bazel-lib-v2.9.3.tar.gz",
sha256 = "f5ea76682b209cc0bd90d0f5a3b26d2f7a6a2885f0c5f615e72913f4805dbb0d",
strip_prefix = "bazel-lib-2.5.0",
url = "https://github.com/aspect-build/bazel-lib/releases/download/v2.5.0/bazel-lib-v2.5.0.tar.gz",
)
load("@aspect_bazel_lib//lib:repositories.bzl", "aspect_bazel_lib_dependencies", "aspect_bazel_lib_register_toolchains")
@@ -165,10 +137,10 @@ http_archive(
# Expose internals of go_test for custom build transitions.
"//third_party:io_bazel_rules_go_test.patch",
],
sha256 = "b2038e2de2cace18f032249cb4bb0048abf583a36369fa98f687af1b3f880b26",
sha256 = "80a98277ad1311dacd837f9b16db62887702e9f1d1c4c9f796d0121a46c8e184",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.48.1/rules_go-v0.48.1.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.48.1/rules_go-v0.48.1.zip",
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.46.0/rules_go-v0.46.0.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.46.0/rules_go-v0.46.0.zip",
],
)
@@ -193,7 +165,7 @@ load("@rules_oci//oci:pull.bzl", "oci_pull")
oci_pull(
name = "linux_debian11_multiarch_base", # Debian bullseye
digest = "sha256:b82f113425c5b5c714151aaacd8039bc141821cdcd3c65202d42bdf9c43ae60b", # 2023-12-12
image = "gcr.io/prysmaticlabs/distroless/cc-debian11",
image = "gcr.io/distroless/cc-debian11",
platforms = [
"linux/amd64",
"linux/arm64/v8",
@@ -210,7 +182,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
go_rules_dependencies()
go_register_toolchains(
go_version = "1.23.5",
go_version = "1.21.8",
nogo = "@//:nogo",
)
@@ -255,7 +227,7 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.5.0-alpha.10"
consensus_spec_version = "v1.5.0-alpha.2"
bls_test_version = "v0.1.1"
@@ -271,7 +243,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-NtWIhbO/mVMb1edq5jqABL0o8R1tNFiuG8PCMAsUHcs=",
integrity = "sha256-NNXBa7SZ2sFb68HPNahgu1p0yDBpjuKJuLfRCl7vvoQ=",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -287,7 +259,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-DFlFlnzls1bBrDm+/xD8NK2ivvkhxR+rSNVLLqScVKc=",
integrity = "sha256-7BnlBvGWU92iAB100cMaAXVQhRrqpMQbavgrI+/paCw=",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -303,7 +275,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-G9ENPF8udZL/BqRHbi60GhFPnZDPZAH6UjcjRiOlvbk=",
integrity = "sha256-VCHhcNt+fynf/sHK11qbRBAy608u9T1qAafvAGfxQhA=",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
@@ -318,7 +290,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-ClOLKkmAcEi8/uKi6LDeqthask5+E3sgxVoA0bqmQ0c=",
integrity = "sha256-a2aCNFyFkYLtf6QSwGOHdx7xXHjA2NNT8x8ZuxB0aes=",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)
@@ -360,30 +332,14 @@ http_archive(
filegroup(
name = "configs",
srcs = [
"metadata/config.yaml",
"custom_config_data/config.yaml",
],
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-b7ZTT+olF+VXEJYNTV5jggNtCkt9dOejm1i2VE+zy+0=",
strip_prefix = "holesky-874c199423ccd180607320c38cbaca05d9a1573a",
url = "https://github.com/eth-clients/holesky/archive/874c199423ccd180607320c38cbaca05d9a1573a.tar.gz", # 2024-06-18
)
http_archive(
name = "sepolia_testnet",
build_file_content = """
filegroup(
name = "configs",
srcs = [
"metadata/config.yaml",
],
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-cY/UgpCcYEhQf7JefD65FI8tn/A+rAvKhcm2/qiVdqY=",
strip_prefix = "sepolia-f2c219a93c4491cee3d90c18f2f8e82aed850eab",
url = "https://github.com/eth-clients/sepolia/archive/f2c219a93c4491cee3d90c18f2f8e82aed850eab.tar.gz", # 2024-09-19
sha256 = "5f4be6fd088683ea9db45c863b9c5a1884422449e5b59fd2d561d3ba0f73ffd9",
strip_prefix = "holesky-9d9aabf2d4de51334ee5fed6c79a4d55097d1a43",
url = "https://github.com/eth-clients/holesky/archive/9d9aabf2d4de51334ee5fed6c79a4d55097d1a43.tar.gz", # 2024-01-22
)
http_archive(

View File

@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"checkpoint.go",
"client.go",
"doc.go",
"health.go",
@@ -15,19 +16,28 @@ go_library(
"//api/client/beacon/iface:go_default_library",
"//api/server:go_default_library",
"//api/server/structs:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/state:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz/detect:go_default_library",
"//io/file:go_default_library",
"//network/forks:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@org_golang_x_mod//semver:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"checkpoint_test.go",
"client_test.go",
"health_test.go",
],
@@ -35,7 +45,19 @@ go_test(
deps = [
"//api/client:go_default_library",
"//api/client/beacon/testing:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/blocks/testing:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/ssz/detect:go_default_library",
"//network/forks:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@org_uber_go_mock//gomock:go_default_library",
],
)

View File

@@ -0,0 +1,276 @@
package beacon
import (
"context"
"fmt"
"path"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
base "github.com/prysmaticlabs/prysm/v5/api/client"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/encoding/ssz/detect"
"github.com/prysmaticlabs/prysm/v5/io/file"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
"golang.org/x/mod/semver"
)
var errCheckpointBlockMismatch = errors.New("mismatch between checkpoint sync state and block")
// OriginData represents the BeaconState and ReadOnlySignedBeaconBlock necessary to start an empty Beacon Node
// using Checkpoint Sync.
type OriginData struct {
sb []byte
bb []byte
st state.BeaconState
b interfaces.ReadOnlySignedBeaconBlock
vu *detect.VersionedUnmarshaler
br [32]byte
sr [32]byte
}
// SaveBlock saves the downloaded block to a unique file in the given path.
// For readability and collision avoidance, the file name includes: type, config name, slot and root
func (o *OriginData) SaveBlock(dir string) (string, error) {
blockPath := path.Join(dir, fname("block", o.vu, o.b.Block().Slot(), o.br))
return blockPath, file.WriteFile(blockPath, o.BlockBytes())
}
// SaveState saves the downloaded state to a unique file in the given path.
// For readability and collision avoidance, the file name includes: type, config name, slot and root
func (o *OriginData) SaveState(dir string) (string, error) {
statePath := path.Join(dir, fname("state", o.vu, o.st.Slot(), o.sr))
return statePath, file.WriteFile(statePath, o.StateBytes())
}
// StateBytes returns the ssz-encoded bytes of the downloaded BeaconState value.
func (o *OriginData) StateBytes() []byte {
return o.sb
}
// BlockBytes returns the ssz-encoded bytes of the downloaded ReadOnlySignedBeaconBlock value.
func (o *OriginData) BlockBytes() []byte {
return o.bb
}
func fname(prefix string, vu *detect.VersionedUnmarshaler, slot primitives.Slot, root [32]byte) string {
return fmt.Sprintf("%s_%s_%s_%d-%#x.ssz", prefix, vu.Config.ConfigName, version.String(vu.Fork), slot, root)
}
// DownloadFinalizedData downloads the most recently finalized state, and the block most recently applied to that state.
// This pair can be used to initialize a new beacon node via checkpoint sync.
func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, error) {
sb, err := client.GetState(ctx, IdFinalized)
if err != nil {
return nil, err
}
vu, err := detect.FromState(sb)
if err != nil {
return nil, errors.Wrap(err, "error detecting chain config for finalized state")
}
log.WithFields(logrus.Fields{
"name": vu.Config.ConfigName,
"fork": version.String(vu.Fork),
}).Info("Detected supported config in remote finalized state")
s, err := vu.UnmarshalBeaconState(sb)
if err != nil {
return nil, errors.Wrap(err, "error unmarshaling finalized state to correct version")
}
slot := s.LatestBlockHeader().Slot
bb, err := client.GetBlock(ctx, IdFromSlot(slot))
if err != nil {
return nil, errors.Wrapf(err, "error requesting block by slot = %d", slot)
}
b, err := vu.UnmarshalBeaconBlock(bb)
if err != nil {
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
}
br, err := b.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block")
}
bodyRoot, err := b.Block().Body().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block body")
}
sbr := bytesutil.ToBytes32(s.LatestBlockHeader().BodyRoot)
if sbr != bodyRoot {
return nil, errors.Wrapf(errCheckpointBlockMismatch, "state body root = %#x, block body root = %#x", sbr, bodyRoot)
}
sr, err := s.HashTreeRoot(ctx)
if err != nil {
return nil, errors.Wrapf(err, "failed to compute htr for finalized state at slot=%d", s.Slot())
}
log.
WithField("blockSlot", b.Block().Slot()).
WithField("stateSlot", s.Slot()).
WithField("stateRoot", hexutil.Encode(sr[:])).
WithField("blockRoot", hexutil.Encode(br[:])).
Info("Downloaded checkpoint sync state and block.")
return &OriginData{
st: s,
b: b,
sb: sb,
bb: bb,
vu: vu,
br: br,
sr: sr,
}, nil
}
// WeakSubjectivityData represents the state root, block root and epoch of the BeaconState + ReadOnlySignedBeaconBlock
// that falls at the beginning of the current weak subjectivity period. These values can be used to construct
// a weak subjectivity checkpoint beacon node flag to be used for validation.
type WeakSubjectivityData struct {
BlockRoot [32]byte
StateRoot [32]byte
Epoch primitives.Epoch
}
// CheckpointString returns the standard string representation of a Checkpoint.
// The format is a hex-encoded block root, followed by the epoch of the block, separated by a colon. For example:
// "0x1c35540cac127315fabb6bf29181f2ae0de1a3fc909d2e76ba771e61312cc49a:74888"
func (wsd *WeakSubjectivityData) CheckpointString() string {
return fmt.Sprintf("%#x:%d", wsd.BlockRoot, wsd.Epoch)
}
// ComputeWeakSubjectivityCheckpoint attempts to use the prysm weak_subjectivity api
// to obtain the current weak_subjectivity checkpoint.
// For non-prysm nodes, the same computation will be performed with extra steps,
// using the head state downloaded from the beacon node api.
func ComputeWeakSubjectivityCheckpoint(ctx context.Context, client *Client) (*WeakSubjectivityData, error) {
ws, err := client.GetWeakSubjectivity(ctx)
if err != nil {
// a 404/405 is expected if querying an endpoint that doesn't support the weak subjectivity checkpoint api
if !errors.Is(err, base.ErrNotOK) {
return nil, errors.Wrap(err, "unexpected API response for prysm-only weak subjectivity checkpoint API")
}
// fall back to vanilla Beacon Node API method
return computeBackwardsCompatible(ctx, client)
}
log.Printf("server weak subjectivity checkpoint response - epoch=%d, block_root=%#x, state_root=%#x", ws.Epoch, ws.BlockRoot, ws.StateRoot)
return ws, nil
}
const (
prysmMinimumVersion = "v2.0.7"
prysmImplementationName = "Prysm"
)
// errUnsupportedPrysmCheckpointVersion indicates remote beacon node can't be used for checkpoint retrieval.
var errUnsupportedPrysmCheckpointVersion = errors.New("node does not meet minimum version requirements for checkpoint retrieval")
// for older endpoints or clients that do not support the weak_subjectivity api method
// we gather the necessary data for a checkpoint sync by:
// - inspecting the remote server's head state and computing the weak subjectivity epoch locally
// - requesting the state at the first slot of the epoch
// - using hash_tree_root(state.latest_block_header) to compute the block the state integrates
// - requesting that block by its root
func computeBackwardsCompatible(ctx context.Context, client *Client) (*WeakSubjectivityData, error) {
log.Print("falling back to generic checkpoint derivation, weak_subjectivity API not supported by server")
nv, err := client.GetNodeVersion(ctx)
if err != nil {
return nil, errors.Wrap(err, "unable to proceed with fallback method without confirming node version")
}
if nv.implementation == prysmImplementationName && semver.Compare(nv.semver, prysmMinimumVersion) < 0 {
return nil, errors.Wrapf(errUnsupportedPrysmCheckpointVersion, "%s < minimum (%s)", nv.semver, prysmMinimumVersion)
}
epoch, err := getWeakSubjectivityEpochFromHead(ctx, client)
if err != nil {
return nil, errors.Wrap(err, "error computing weak subjectivity epoch via head state inspection")
}
// use first slot of the epoch for the state slot
slot, err := slots.EpochStart(epoch)
if err != nil {
return nil, errors.Wrapf(err, "error computing first slot of epoch=%d", epoch)
}
log.Printf("requesting checkpoint state at slot %d", slot)
// get the state at the first slot of the epoch
sb, err := client.GetState(ctx, IdFromSlot(slot))
if err != nil {
return nil, errors.Wrapf(err, "failed to request state by slot from api, slot=%d", slot)
}
// ConfigFork is used to unmarshal the BeaconState so we can read the block root in latest_block_header
vu, err := detect.FromState(sb)
if err != nil {
return nil, errors.Wrap(err, "error detecting chain config for beacon state")
}
log.Printf("detected supported config in checkpoint state, name=%s, fork=%s", vu.Config.ConfigName, version.String(vu.Fork))
s, err := vu.UnmarshalBeaconState(sb)
if err != nil {
return nil, errors.Wrap(err, "error using detected config fork to unmarshal state bytes")
}
// compute state and block roots
sr, err := s.HashTreeRoot(ctx)
if err != nil {
return nil, errors.Wrap(err, "error computing hash_tree_root of state")
}
h := s.LatestBlockHeader()
h.StateRoot = sr[:]
br, err := h.HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error while computing block root using state data")
}
bb, err := client.GetBlock(ctx, IdFromRoot(br))
if err != nil {
return nil, errors.Wrapf(err, "error requesting block by root = %d", br)
}
b, err := vu.UnmarshalBeaconBlock(bb)
if err != nil {
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
}
br, err = b.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error computing hash_tree_root for block obtained via root")
}
return &WeakSubjectivityData{
Epoch: epoch,
BlockRoot: br,
StateRoot: sr,
}, nil
}
// this method downloads the head state, which can be used to find the correct chain config
// and use prysm's helper methods to compute the latest weak subjectivity epoch.
func getWeakSubjectivityEpochFromHead(ctx context.Context, client *Client) (primitives.Epoch, error) {
headBytes, err := client.GetState(ctx, IdHead)
if err != nil {
return 0, err
}
vu, err := detect.FromState(headBytes)
if err != nil {
return 0, errors.Wrap(err, "error detecting chain config for beacon state")
}
log.Printf("detected supported config in remote head state, name=%s, fork=%s", vu.Config.ConfigName, version.String(vu.Fork))
headState, err := vu.UnmarshalBeaconState(headBytes)
if err != nil {
return 0, errors.Wrap(err, "error unmarshaling state to correct version")
}
epoch, err := helpers.LatestWeakSubjectivityEpoch(ctx, headState, vu.Config)
if err != nil {
return 0, errors.Wrap(err, "error computing the weak subjectivity epoch from head state")
}
log.Printf("(computed client-side) weak subjectivity epoch = %d", epoch)
return epoch, nil
}

View File

@@ -1,4 +1,4 @@
package checkpoint
package beacon
import (
"bytes"
@@ -9,23 +9,37 @@ import (
"net/http"
"testing"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api/client"
"github.com/prysmaticlabs/prysm/v5/api/client/beacon"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
blocktest "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks/testing"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/ssz/detect"
"github.com/prysmaticlabs/prysm/v5/network/forks"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/ssz/detect"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
type testRT struct {
rt func(*http.Request) (*http.Response, error)
}
func (rt *testRT) RoundTrip(req *http.Request) (*http.Response, error) {
if rt.rt != nil {
return rt.rt(req)
}
return nil, errors.New("RoundTripper not implemented")
}
var _ http.RoundTripper = &testRT{}
func marshalToEnvelope(val interface{}) ([]byte, error) {
raw, err := json.Marshal(val)
if err != nil {
@@ -51,6 +65,35 @@ func TestMarshalToEnvelope(t *testing.T) {
require.Equal(t, expected, string(encoded))
}
func TestFallbackVersionCheck(t *testing.T) {
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getNodeVersionPath:
res.StatusCode = http.StatusOK
b := bytes.NewBuffer(nil)
d := struct {
Version string `json:"version"`
}{
Version: "Prysm/v2.0.5 (linux amd64)",
}
encoded, err := marshalToEnvelope(d)
require.NoError(t, err)
b.Write(encoded)
res.Body = io.NopCloser(b)
case getWeakSubjectivityPath:
res.StatusCode = http.StatusNotFound
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
ctx := context.Background()
_, err = ComputeWeakSubjectivityCheckpoint(ctx, c)
require.ErrorIs(t, err, errUnsupportedPrysmCheckpointVersion)
}
func TestFname(t *testing.T) {
vu := &detect.VersionedUnmarshaler{
Config: params.MainnetConfig(),
@@ -119,7 +162,7 @@ func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) {
wsSerialized, err := wst.MarshalSSZ()
require.NoError(t, err)
expectedWSD := beacon.WeakSubjectivityData{
expectedWSD := WeakSubjectivityData{
BlockRoot: bRoot,
StateRoot: wRoot,
Epoch: epoch,
@@ -128,7 +171,7 @@ func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) {
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case beacon.GetWeakSubjectivityPath:
case getWeakSubjectivityPath:
res.StatusCode = http.StatusOK
cp := struct {
Epoch string `json:"epoch"`
@@ -147,10 +190,10 @@ func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) {
rb, err := marshalToEnvelope(wsr)
require.NoError(t, err)
res.Body = io.NopCloser(bytes.NewBuffer(rb))
case beacon.RenderGetStatePath(beacon.IdFromSlot(wSlot)):
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case beacon.RenderGetBlockPath(beacon.IdFromRoot(bRoot)):
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
}
@@ -158,7 +201,7 @@ func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) {
return res, nil
}}
c, err := beacon.NewClient("http://localhost:3500", client.WithRoundTripper(trans))
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
wsd, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
@@ -222,7 +265,7 @@ func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case beacon.GetNodeVersionPath:
case getNodeVersionPath:
res.StatusCode = http.StatusOK
b := bytes.NewBuffer(nil)
d := struct {
@@ -234,15 +277,15 @@ func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
require.NoError(t, err)
b.Write(encoded)
res.Body = io.NopCloser(b)
case beacon.GetWeakSubjectivityPath:
case getWeakSubjectivityPath:
res.StatusCode = http.StatusNotFound
case beacon.RenderGetStatePath(beacon.IdHead):
case renderGetStatePath(IdHead):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
case beacon.RenderGetStatePath(beacon.IdFromSlot(wSlot)):
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case beacon.RenderGetBlockPath(beacon.IdFromRoot(bRoot)):
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
}
@@ -250,7 +293,7 @@ func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
return res, nil
}}
c, err := beacon.NewClient("http://localhost:3500", client.WithRoundTripper(trans))
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
wsPub, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
@@ -267,13 +310,13 @@ func TestGetWeakSubjectivityEpochFromHead(t *testing.T) {
require.NoError(t, err)
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
if req.URL.Path == beacon.RenderGetStatePath(beacon.IdHead) {
if req.URL.Path == renderGetStatePath(IdHead) {
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
}
return res, nil
}}
c, err := beacon.NewClient("http://localhost:3500", client.WithRoundTripper(trans))
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
actualEpoch, err := getWeakSubjectivityEpochFromHead(context.Background(), c)
require.NoError(t, err)
@@ -345,3 +388,96 @@ func populateValidators(cfg *params.BeaconChainConfig, st state.BeaconState, val
}
return st.SetBalances(balances)
}
func TestDownloadFinalizedData(t *testing.T) {
ctx := context.Background()
cfg := params.MainnetConfig().Copy()
// avoid the altair zone because genesis tests are easier to set up
epoch := cfg.AltairForkEpoch - 1
// set up checkpoint state, using the epoch that will be computed as the ws checkpoint state based on the head state
slot, err := slots.EpochStart(epoch)
require.NoError(t, err)
st, err := util.NewBeaconState()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, epoch)
require.NoError(t, err)
require.NoError(t, st.SetFork(fork))
require.NoError(t, st.SetSlot(slot))
// set up checkpoint block
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
b, err = blocktest.SetBlockParentRoot(b, cfg.ZeroHash)
require.NoError(t, err)
b, err = blocktest.SetBlockSlot(b, slot)
require.NoError(t, err)
b, err = blocktest.SetProposerIndex(b, 0)
require.NoError(t, err)
// set up state header pointing at checkpoint block - this is how the block is downloaded by root
header, err := b.Header()
require.NoError(t, err)
require.NoError(t, st.SetLatestBlockHeader(header.Header))
// order of operations can be confusing here:
// - when computing the state root, make sure block header is complete, EXCEPT the state root should be zero-value
// - before computing the block root (to match the request route), the block should include the state root
// *computed from the state with a header that does not have a state root set yet*
sr, err := st.HashTreeRoot(ctx)
require.NoError(t, err)
b, err = blocktest.SetBlockStateRoot(b, sr)
require.NoError(t, err)
mb, err := b.MarshalSSZ()
require.NoError(t, err)
br, err := b.Block().HashTreeRoot()
require.NoError(t, err)
ms, err := st.MarshalSSZ()
require.NoError(t, err)
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case renderGetStatePath(IdFinalized):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(ms))
case renderGetBlockPath(IdFromSlot(b.Block().Slot())):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(mb))
default:
res.StatusCode = http.StatusInternalServerError
res.Body = io.NopCloser(bytes.NewBufferString(""))
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
// sanity check before we go through checkpoint
// make sure we can download the state and unmarshal it with the VersionedUnmarshaler
sb, err := c.GetState(ctx, IdFinalized)
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(sb, ms))
vu, err := detect.FromState(sb)
require.NoError(t, err)
us, err := vu.UnmarshalBeaconState(sb)
require.NoError(t, err)
ushtr, err := us.HashTreeRoot(ctx)
require.NoError(t, err)
require.Equal(t, sr, ushtr)
expected := &OriginData{
sb: ms,
bb: mb,
br: br,
sr: sr,
}
od, err := DownloadFinalizedData(ctx, c)
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(expected.sb, od.sb))
require.Equal(t, true, bytes.Equal(expected.bb, od.bb))
require.Equal(t, expected.br, od.br)
require.Equal(t, expected.sr, od.sr)
}

View File

@@ -29,13 +29,12 @@ const (
getSignedBlockPath = "/eth/v2/beacon/blocks"
getBlockRootPath = "/eth/v1/beacon/blocks/{{.Id}}/root"
getForkForStatePath = "/eth/v1/beacon/states/{{.Id}}/fork"
getWeakSubjectivityPath = "/prysm/v1/beacon/weak_subjectivity"
getForkSchedulePath = "/eth/v1/config/fork_schedule"
getConfigSpecPath = "/eth/v1/config/spec"
getStatePath = "/eth/v2/debug/beacon/states"
getNodeVersionPath = "/eth/v1/node/version"
changeBLStoExecutionPath = "/eth/v1/beacon/pool/bls_to_execution_changes"
GetNodeVersionPath = "/eth/v1/node/version"
GetWeakSubjectivityPath = "/prysm/v1/beacon/weak_subjectivity"
)
// StateOrBlockId represents the block_id / state_id parameters that several of the Eth Beacon API methods accept.
@@ -81,8 +80,7 @@ func idTemplate(ts string) func(StateOrBlockId) string {
return f
}
// RenderGetBlockPath formats a block id into a path for the GetBlock API endpoint.
func RenderGetBlockPath(id StateOrBlockId) string {
func renderGetBlockPath(id StateOrBlockId) string {
return path.Join(getSignedBlockPath, string(id))
}
@@ -106,7 +104,7 @@ func NewClient(host string, opts ...client.ClientOpt) (*Client, error) {
// for the named identifiers.
// The return value contains the ssz-encoded bytes.
func (c *Client) GetBlock(ctx context.Context, blockId StateOrBlockId) ([]byte, error) {
blockPath := RenderGetBlockPath(blockId)
blockPath := renderGetBlockPath(blockId)
b, err := c.Get(ctx, blockPath, client.WithSSZEncoding())
if err != nil {
return nil, errors.Wrapf(err, "error requesting state by id = %s", blockId)
@@ -197,10 +195,6 @@ type NodeVersion struct {
systemInfo string
}
func (nv *NodeVersion) SetImplementation(impl string) {
nv.implementation = impl
}
var versionRE = regexp.MustCompile(`^(\w+)/(v\d+\.\d+\.\d+[-a-zA-Z0-9]*)\s*/?(.*)$`)
func parseNodeVersion(v string) (*NodeVersion, error) {
@@ -218,7 +212,7 @@ func parseNodeVersion(v string) (*NodeVersion, error) {
// GetNodeVersion requests that the beacon node identify information about its implementation in a format
// similar to a HTTP User-Agent field. ex: Lighthouse/v0.1.5 (Linux x86_64)
func (c *Client) GetNodeVersion(ctx context.Context) (*NodeVersion, error) {
b, err := c.Get(ctx, GetNodeVersionPath)
b, err := c.Get(ctx, getNodeVersionPath)
if err != nil {
return nil, errors.Wrap(err, "error requesting node version")
}
@@ -234,8 +228,7 @@ func (c *Client) GetNodeVersion(ctx context.Context) (*NodeVersion, error) {
return parseNodeVersion(d.Data.Version)
}
// RenderGetStatePath formats a state id into a path for the GetState API endpoint.
func RenderGetStatePath(id StateOrBlockId) string {
func renderGetStatePath(id StateOrBlockId) string {
return path.Join(getStatePath, string(id))
}
@@ -253,29 +246,13 @@ func (c *Client) GetState(ctx context.Context, stateId StateOrBlockId) ([]byte,
return b, nil
}
// WeakSubjectivityData represents the state root, block root and epoch of the BeaconState + ReadOnlySignedBeaconBlock
// that falls at the beginning of the current weak subjectivity period. These values can be used to construct
// a weak subjectivity checkpoint beacon node flag to be used for validation.
type WeakSubjectivityData struct {
BlockRoot [32]byte
StateRoot [32]byte
Epoch primitives.Epoch
}
// CheckpointString returns the standard string representation of a Checkpoint.
// The format is a hex-encoded block root, followed by the epoch of the block, separated by a colon. For example:
// "0x1c35540cac127315fabb6bf29181f2ae0de1a3fc909d2e76ba771e61312cc49a:74888"
func (wsd *WeakSubjectivityData) CheckpointString() string {
return fmt.Sprintf("%#x:%d", wsd.BlockRoot, wsd.Epoch)
}
// GetWeakSubjectivity calls a proposed API endpoint that is unique to prysm
// This api method does the following:
// - computes weak subjectivity epoch
// - finds the highest non-skipped block preceding the epoch
// - returns the htr of the found block and returns this + the value of state_root from the block
func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData, error) {
body, err := c.Get(ctx, GetWeakSubjectivityPath)
body, err := c.Get(ctx, getWeakSubjectivityPath)
if err != nil {
return nil, err
}

View File

@@ -97,31 +97,31 @@ func TestValidHostname(t *testing.T) {
{
name: "hostname with port",
hostArg: "mydomain.org:3500",
path: GetNodeVersionPath,
path: getNodeVersionPath,
joined: "http://mydomain.org:3500/eth/v1/node/version",
},
{
name: "https scheme, hostname with port",
hostArg: "https://mydomain.org:3500",
path: GetNodeVersionPath,
path: getNodeVersionPath,
joined: "https://mydomain.org:3500/eth/v1/node/version",
},
{
name: "http scheme, hostname without port",
hostArg: "http://mydomain.org",
path: GetNodeVersionPath,
path: getNodeVersionPath,
joined: "http://mydomain.org/eth/v1/node/version",
},
{
name: "http scheme, trailing slash, hostname without port",
hostArg: "http://mydomain.org/",
path: GetNodeVersionPath,
path: getNodeVersionPath,
joined: "http://mydomain.org/eth/v1/node/version",
},
{
name: "http scheme, hostname with basic auth creds and no port",
hostArg: "http://username:pass@mydomain.org/",
path: GetNodeVersionPath,
path: getNodeVersionPath,
joined: "http://username:pass@mydomain.org/eth/v1/node/version",
},
}

View File

@@ -48,13 +48,8 @@ func (n *NodeHealthTracker) CheckHealth(ctx context.Context) bool {
if isStatusChanged {
// Update the health status
n.isHealthy = &newStatus
// Send the new status to the health channel, potentially overwriting the existing value
select {
case <-n.healthChan:
n.healthChan <- newStatus
default:
n.healthChan <- newStatus
}
// Send the new status to the health channel
n.healthChan <- newStatus
}
return newStatus
}

View File

@@ -87,6 +87,12 @@ func TestNodeHealth_Concurrency(t *testing.T) {
// Number of goroutines to spawn for both reading and writing
numGoroutines := 6
go func() {
for range n.HealthUpdates() {
// Consume values to avoid blocking on channel send.
}
}()
wg.Add(numGoroutines * 2) // for readers and writers
// Concurrently update health status

View File

@@ -3,7 +3,6 @@ package testing
import (
"context"
"reflect"
"sync"
"github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface"
"go.uber.org/mock/gomock"
@@ -17,7 +16,6 @@ var (
type MockHealthClient struct {
ctrl *gomock.Controller
recorder *MockHealthClientMockRecorder
sync.Mutex
}
// MockHealthClientMockRecorder is the mock recorder for MockHealthClient.
@@ -27,8 +25,6 @@ type MockHealthClientMockRecorder struct {
// IsHealthy mocks base method.
func (m *MockHealthClient) IsHealthy(arg0 context.Context) bool {
m.Lock()
defer m.Unlock()
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IsHealthy", arg0)
ret0, ok := ret[0].(bool)
@@ -45,8 +41,6 @@ func (m *MockHealthClient) EXPECT() *MockHealthClientMockRecorder {
// IsHealthy indicates an expected call of IsHealthy.
func (mr *MockHealthClientMockRecorder) IsHealthy(arg0 any) *gomock.Call {
mr.mock.Lock()
defer mr.mock.Unlock()
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsHealthy", reflect.TypeOf((*MockHealthClient)(nil).IsHealthy), arg0)
}

View File

@@ -12,11 +12,8 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//api:go_default_library",
"//api/client:go_default_library",
"//api/server:go_default_library",
"//api/server/structs:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
@@ -24,15 +21,14 @@ go_library(
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)
@@ -46,7 +42,6 @@ go_test(
data = glob(["testdata/**"]),
embed = [":go_default_library"],
deps = [
"//api:go_default_library",
"//api/server/structs:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
@@ -60,7 +55,6 @@ go_test(
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],

View File

@@ -1,12 +1,12 @@
package builder
import (
"github.com/pkg/errors"
ssz "github.com/prysmaticlabs/fastssz"
consensus_types "github.com/prysmaticlabs/prysm/v5/consensus-types"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
)
@@ -22,6 +22,7 @@ type SignedBid interface {
// Bid is an interface describing the method set of a builder bid.
type Bid interface {
Header() (interfaces.ExecutionData, error)
BlobKzgCommitments() ([][]byte, error)
Value() primitives.Wei
Pubkey() []byte
Version() int
@@ -30,18 +31,6 @@ type Bid interface {
HashTreeRootWith(hh *ssz.Hasher) error
}
// BidDeneb is an interface that exposes newly added kzg commitments on top of builder bid
type BidDeneb interface {
Bid
BlobKzgCommitments() [][]byte
}
// BidElectra is an interface that exposes the newly added execution requests on top of the builder bid
type BidElectra interface {
BidDeneb
ExecutionRequests() *v1.ExecutionRequests
}
type signedBuilderBid struct {
p *ethpb.SignedBuilderBid
}
@@ -126,6 +115,11 @@ func (b builderBid) Header() (interfaces.ExecutionData, error) {
return blocks.WrappedExecutionPayloadHeader(b.p.Header)
}
// BlobKzgCommitments --
func (b builderBid) BlobKzgCommitments() ([][]byte, error) {
return [][]byte{}, errors.New("blob kzg commitments not available before Deneb")
}
// Version --
func (b builderBid) Version() int {
return version.Bellatrix
@@ -175,6 +169,11 @@ func (b builderBidCapella) Header() (interfaces.ExecutionData, error) {
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header)
}
// BlobKzgCommitments --
func (b builderBidCapella) BlobKzgCommitments() ([][]byte, error) {
return [][]byte{}, errors.New("blob kzg commitments not available before Deneb")
}
// Version --
func (b builderBidCapella) Version() int {
return version.Capella
@@ -255,8 +254,8 @@ func (b builderBidDeneb) Header() (interfaces.ExecutionData, error) {
}
// BlobKzgCommitments --
func (b builderBidDeneb) BlobKzgCommitments() [][]byte {
return b.p.BlobKzgCommitments
func (b builderBidDeneb) BlobKzgCommitments() ([][]byte, error) {
return b.p.BlobKzgCommitments, nil
}
type signedBuilderBidDeneb struct {
@@ -291,95 +290,3 @@ func (b signedBuilderBidDeneb) Version() int {
func (b signedBuilderBidDeneb) IsNil() bool {
return b.p == nil
}
type builderBidElectra struct {
p *ethpb.BuilderBidElectra
}
// WrappedBuilderBidElectra is a constructor which wraps a protobuf bid into an interface.
func WrappedBuilderBidElectra(p *ethpb.BuilderBidElectra) (Bid, error) {
w := builderBidElectra{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
// Version --
func (b builderBidElectra) Version() int {
return version.Electra
}
// Value --
func (b builderBidElectra) Value() primitives.Wei {
return primitives.LittleEndianBytesToWei(b.p.Value)
}
// Pubkey --
func (b builderBidElectra) Pubkey() []byte {
return b.p.Pubkey
}
// IsNil --
func (b builderBidElectra) IsNil() bool {
return b.p == nil
}
// HashTreeRoot --
func (b builderBidElectra) HashTreeRoot() ([32]byte, error) {
return b.p.HashTreeRoot()
}
// HashTreeRootWith --
func (b builderBidElectra) HashTreeRootWith(hh *ssz.Hasher) error {
return b.p.HashTreeRootWith(hh)
}
// Header --
func (b builderBidElectra) Header() (interfaces.ExecutionData, error) {
// We have to convert big endian to little endian because the value is coming from the execution layer.
return blocks.WrappedExecutionPayloadHeaderDeneb(b.p.Header)
}
// ExecutionRequests --
func (b builderBidElectra) ExecutionRequests() *v1.ExecutionRequests {
return b.p.ExecutionRequests // does not copy
}
// BlobKzgCommitments --
func (b builderBidElectra) BlobKzgCommitments() [][]byte {
return b.p.BlobKzgCommitments
}
type signedBuilderBidElectra struct {
p *ethpb.SignedBuilderBidElectra
}
// WrappedSignedBuilderBidElectra is a constructor which wraps a protobuf signed bit into an interface.
func WrappedSignedBuilderBidElectra(p *ethpb.SignedBuilderBidElectra) (SignedBid, error) {
w := signedBuilderBidElectra{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
// Message --
func (b signedBuilderBidElectra) Message() (Bid, error) {
return WrappedBuilderBidElectra(b.p.Message)
}
// Signature --
func (b signedBuilderBidElectra) Signature() []byte {
return b.p.Signature
}
// Version --
func (b signedBuilderBidElectra) Version() int {
return version.Electra
}
// IsNil --
func (b signedBuilderBidElectra) IsNil() bool {
return b.p == nil
}

View File

@@ -14,17 +14,16 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api"
"github.com/prysmaticlabs/prysm/v5/api/client"
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
log "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
const (
@@ -147,17 +146,13 @@ func (c *Client) do(ctx context.Context, method string, path string, body io.Rea
u := c.baseURL.ResolveReference(&url.URL{Path: path})
span.SetAttributes(trace.StringAttribute("url", u.String()),
span.AddAttributes(trace.StringAttribute("url", u.String()),
trace.StringAttribute("method", method))
req, err := http.NewRequestWithContext(ctx, method, u.String(), body)
if err != nil {
return
}
if method == http.MethodPost {
req.Header.Set("Content-Type", api.JsonMediaType)
}
req.Header.Set("Accept", api.JsonMediaType)
req.Header.Add("User-Agent", version.BuildData())
for _, o := range opts {
o(req)
@@ -181,7 +176,7 @@ func (c *Client) do(ctx context.Context, method string, path string, body io.Rea
err = non200Err(r)
return
}
res, err = io.ReadAll(io.LimitReader(r.Body, client.MaxBodySize))
res, err = io.ReadAll(r.Body)
if err != nil {
err = errors.Wrap(err, "error reading http response body from builder server")
return
@@ -223,23 +218,8 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
if err := json.Unmarshal(hb, v); err != nil {
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
}
ver, err := version.FromString(strings.ToLower(v.Version))
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("unsupported header version %s", strings.ToLower(v.Version)))
}
if ver >= version.Electra {
hr := &ExecHeaderResponseElectra{}
if err := json.Unmarshal(hb, hr); err != nil {
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
}
p, err := hr.ToProto()
if err != nil {
return nil, errors.Wrapf(err, "could not extract proto message from header")
}
return WrappedSignedBuilderBidElectra(p)
}
if ver >= version.Deneb {
switch strings.ToLower(v.Version) {
case strings.ToLower(version.String(version.Deneb)):
hr := &ExecHeaderResponseDeneb{}
if err := json.Unmarshal(hb, hr); err != nil {
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
@@ -249,8 +229,7 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
return nil, errors.Wrapf(err, "could not extract proto message from header")
}
return WrappedSignedBuilderBidDeneb(p)
}
if ver >= version.Capella {
case strings.ToLower(version.String(version.Capella)):
hr := &ExecHeaderResponseCapella{}
if err := json.Unmarshal(hb, hr); err != nil {
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
@@ -260,8 +239,7 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
return nil, errors.Wrapf(err, "could not extract proto message from header")
}
return WrappedSignedBuilderBidCapella(p)
}
if ver >= version.Bellatrix {
case strings.ToLower(version.String(version.Bellatrix)):
hr := &ExecHeaderResponse{}
if err := json.Unmarshal(hb, hr); err != nil {
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
@@ -271,8 +249,9 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
return nil, errors.Wrap(err, "could not extract proto message from header")
}
return WrappedSignedBuilderBid(p)
default:
return nil, fmt.Errorf("unsupported header version %s", strings.ToLower(v.Version))
}
return nil, fmt.Errorf("unsupported header version %s", strings.ToLower(v.Version))
}
// RegisterValidator encodes the SignedValidatorRegistrationV1 message to json (including hex-encoding the byte
@@ -280,7 +259,7 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error {
ctx, span := trace.StartSpan(ctx, "builder.client.RegisterValidator")
defer span.End()
span.SetAttributes(trace.Int64Attribute("num_reqs", int64(len(svr))))
span.AddAttributes(trace.Int64Attribute("num_reqs", int64(len(svr))))
if len(svr) == 0 {
err := errors.Wrap(errMalformedRequest, "empty validator registration list")
@@ -299,11 +278,7 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
}
_, err = c.do(ctx, http.MethodPost, postRegisterValidatorPath, bytes.NewBuffer(body))
if err != nil {
return err
}
log.WithField("registrationCount", len(svr)).Debug("Successfully registered validator(s) on builder")
return nil
return err
}
var errResponseVersionMismatch = errors.New("builder API response uses a different version than requested in " + api.VersionHeader + " header")
@@ -379,7 +354,7 @@ func (c *Client) Status(ctx context.Context) error {
}
func non200Err(response *http.Response) error {
bodyBytes, err := io.ReadAll(io.LimitReader(response.Body, client.MaxErrBodySize))
bodyBytes, err := io.ReadAll(response.Body)
var errMessage ErrorMessage
var body string
if err != nil {

View File

@@ -12,11 +12,11 @@ import (
"testing"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/api"
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
types "github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
@@ -90,8 +90,6 @@ func TestClient_RegisterValidator(t *testing.T) {
expectedPath := "/eth/v1/builder/validators"
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
body, err := io.ReadAll(r.Body)
defer func() {
require.NoError(t, r.Body.Close())
@@ -126,7 +124,7 @@ func TestClient_RegisterValidator(t *testing.T) {
func TestClient_GetHeader(t *testing.T) {
ctx := context.Background()
expectedPath := "/eth/v1/builder/header/23/0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2/0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
var slot primitives.Slot = 23
var slot types.Slot = 23
parentHash := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
pubkey := ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
t.Run("server error", func(t *testing.T) {
@@ -269,9 +267,9 @@ func TestClient_GetHeader(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
dbid, ok := bid.(builderBidDeneb)
require.Equal(t, true, ok)
kcgCommitments := dbid.BlobKzgCommitments()
kcgCommitments, err := bid.BlobKzgCommitments()
require.NoError(t, err)
require.Equal(t, len(kcgCommitments) > 0, true)
for i := range kcgCommitments {
require.Equal(t, len(kcgCommitments[i]) == 48, true)
@@ -295,50 +293,6 @@ func TestClient_GetHeader(t *testing.T) {
_, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.ErrorContains(t, "could not extract proto message from header: too many blob commitments: 7", err)
})
t.Run("electra", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponseElectra)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
h, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.NoError(t, err)
expectedWithdrawalsRoot := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
bid, err := h.Message()
require.NoError(t, err)
bidHeader, err := bid.Header()
require.NoError(t, err)
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
bidStr := "652312848583266388373324160190187140051835877600158453279131187530910662656"
value, err := stringToUint256(bidStr)
require.NoError(t, err)
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
ebid, ok := bid.(builderBidElectra)
require.Equal(t, true, ok)
kcgCommitments := ebid.BlobKzgCommitments()
require.Equal(t, len(kcgCommitments) > 0, true)
for i := range kcgCommitments {
require.Equal(t, len(kcgCommitments[i]) == 48, true)
}
requests := ebid.ExecutionRequests()
require.Equal(t, 1, len(requests.Deposits))
require.Equal(t, 1, len(requests.Withdrawals))
require.Equal(t, 1, len(requests.Consolidations))
})
t.Run("unsupported version", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
@@ -367,8 +321,8 @@ func TestSubmitBlindedBlock(t *testing.T) {
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
require.Equal(t, "application/json", r.Header.Get("Accept"))
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayload)),
@@ -395,8 +349,8 @@ func TestSubmitBlindedBlock(t *testing.T) {
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "capella", r.Header.Get("Eth-Consensus-Version"))
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
require.Equal(t, "application/json", r.Header.Get("Accept"))
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadCapella)),
@@ -416,7 +370,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 1, len(withdrawals))
assert.Equal(t, uint64(1), withdrawals[0].Index)
assert.Equal(t, primitives.ValidatorIndex(1), withdrawals[0].ValidatorIndex)
assert.Equal(t, types.ValidatorIndex(1), withdrawals[0].ValidatorIndex)
assert.DeepEqual(t, ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943"), withdrawals[0].Address)
assert.Equal(t, uint64(1), withdrawals[0].Amount)
})
@@ -426,8 +380,8 @@ func TestSubmitBlindedBlock(t *testing.T) {
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "deneb", r.Header.Get("Eth-Consensus-Version"))
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
require.Equal(t, "application/json", r.Header.Get("Accept"))
var req structs.SignedBlindedBeaconBlockDeneb
err := json.NewDecoder(r.Body).Decode(&req)
require.NoError(t, err)
@@ -455,7 +409,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 1, len(withdrawals))
assert.Equal(t, uint64(1), withdrawals[0].Index)
assert.Equal(t, primitives.ValidatorIndex(1), withdrawals[0].ValidatorIndex)
assert.Equal(t, types.ValidatorIndex(1), withdrawals[0].ValidatorIndex)
assert.DeepEqual(t, ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943"), withdrawals[0].Address)
assert.Equal(t, uint64(1), withdrawals[0].Amount)
require.NotNil(t, blobBundle)

View File

@@ -5,15 +5,12 @@ import (
"fmt"
"math/big"
"strconv"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api/server"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/math"
@@ -416,10 +413,54 @@ func FromProtoDeneb(payload *v1.ExecutionPayloadDeneb) (ExecutionPayloadDeneb, e
}, nil
}
var errInvalidTypeConversion = errors.New("unable to translate between api and foreign type")
// ExecutionPayloadResponseFromData converts an ExecutionData interface value to a payload response.
// This involves serializing the execution payload value so that the abstract payload envelope can be used.
func ExecutionPayloadResponseFromData(ed interfaces.ExecutionData, bundle *v1.BlobsBundle) (*ExecutionPayloadResponse, error) {
pb := ed.Proto()
var data interface{}
var err error
var ver string
switch pbStruct := pb.(type) {
case *v1.ExecutionPayload:
ver = version.String(version.Bellatrix)
data, err = FromProto(pbStruct)
if err != nil {
return nil, errors.Wrap(err, "failed to convert a Bellatrix ExecutionPayload to an API response")
}
case *v1.ExecutionPayloadCapella:
ver = version.String(version.Capella)
data, err = FromProtoCapella(pbStruct)
if err != nil {
return nil, errors.Wrap(err, "failed to convert a Capella ExecutionPayload to an API response")
}
case *v1.ExecutionPayloadDeneb:
ver = version.String(version.Deneb)
payloadStruct, err := FromProtoDeneb(pbStruct)
if err != nil {
return nil, errors.Wrap(err, "failed to convert a Deneb ExecutionPayload to an API response")
}
data = &ExecutionPayloadDenebAndBlobsBundle{
ExecutionPayload: &payloadStruct,
BlobsBundle: FromBundleProto(bundle),
}
default:
return nil, errInvalidTypeConversion
}
encoded, err := json.Marshal(data)
if err != nil {
return nil, errors.Wrapf(err, "failed to marshal execution payload version=%s", ver)
}
return &ExecutionPayloadResponse{
Version: ver,
Data: encoded,
}, nil
}
// ExecHeaderResponseCapella is the response of builder API /eth/v1/builder/header/{slot}/{parent_hash}/{pubkey} for Capella.
type ExecHeaderResponseCapella struct {
Version string `json:"version"`
Data struct {
Data struct {
Signature hexutil.Bytes `json:"signature"`
Message *BuilderBidCapella `json:"message"`
} `json:"data"`
@@ -563,25 +604,17 @@ type BlobBundler interface {
BundleProto() (*v1.BlobsBundle, error)
}
// ParsedExecutionRequests can retrieve the underlying execution requests for the given execution payload response.
type ParsedExecutionRequests interface {
ExecutionRequestsProto() (*v1.ExecutionRequests, error)
}
func (r *ExecutionPayloadResponse) ParsePayload() (ParsedPayload, error) {
var toProto ParsedPayload
v, err := version.FromString(strings.ToLower(r.Version))
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("unsupported version %s", strings.ToLower(r.Version)))
}
if v >= version.Deneb {
toProto = &ExecutionPayloadDenebAndBlobsBundle{}
} else if v >= version.Capella {
toProto = &ExecutionPayloadCapella{}
} else if v >= version.Bellatrix {
switch r.Version {
case version.String(version.Bellatrix):
toProto = &ExecutionPayload{}
} else {
return nil, fmt.Errorf("unsupported version %s", strings.ToLower(r.Version))
case version.String(version.Capella):
toProto = &ExecutionPayloadCapella{}
case version.String(version.Deneb):
toProto = &ExecutionPayloadDenebAndBlobsBundle{}
default:
return nil, consensusblocks.ErrUnsupportedVersion
}
if err := json.Unmarshal(r.Data, toProto); err != nil {
@@ -956,8 +989,7 @@ func (ch *BLSToExecutionChange) MarshalJSON() ([]byte, error) {
// ExecHeaderResponseDeneb is the header response for builder API /eth/v1/builder/header/{slot}/{parent_hash}/{pubkey}.
type ExecHeaderResponseDeneb struct {
Version string `json:"version"`
Data struct {
Data struct {
Signature hexutil.Bytes `json:"signature"`
Message *BuilderBidDeneb `json:"message"`
} `json:"data"`
@@ -981,7 +1013,7 @@ func (bb *BuilderBidDeneb) ToProto() (*eth.BuilderBidDeneb, error) {
if err != nil {
return nil, err
}
if len(bb.BlobKzgCommitments) > params.BeaconConfig().DeprecatedMaxBlobsPerBlock {
if len(bb.BlobKzgCommitments) > fieldparams.MaxBlobsPerBlock {
return nil, fmt.Errorf("too many blob commitments: %d", len(bb.BlobKzgCommitments))
}
kzgCommitments := make([][]byte, len(bb.BlobKzgCommitments))
@@ -1274,208 +1306,6 @@ func (p *ExecutionPayloadDeneb) ToProto() (*v1.ExecutionPayloadDeneb, error) {
}, nil
}
// ExecHeaderResponseElectra is the header response for builder API /eth/v1/builder/header/{slot}/{parent_hash}/{pubkey}.
type ExecHeaderResponseElectra struct {
Version string `json:"version"`
Data struct {
Signature hexutil.Bytes `json:"signature"`
Message *BuilderBidElectra `json:"message"`
} `json:"data"`
}
// ToProto creates a SignedBuilderBidElectra Proto from ExecHeaderResponseElectra.
func (ehr *ExecHeaderResponseElectra) ToProto() (*eth.SignedBuilderBidElectra, error) {
bb, err := ehr.Data.Message.ToProto()
if err != nil {
return nil, err
}
return &eth.SignedBuilderBidElectra{
Message: bb,
Signature: bytesutil.SafeCopyBytes(ehr.Data.Signature),
}, nil
}
// ToProto creates a BuilderBidElectra Proto from BuilderBidElectra.
func (bb *BuilderBidElectra) ToProto() (*eth.BuilderBidElectra, error) {
header, err := bb.Header.ToProto()
if err != nil {
return nil, err
}
if len(bb.BlobKzgCommitments) > params.BeaconConfig().MaxBlobsPerBlockByVersion(version.Electra) {
return nil, fmt.Errorf("blob commitment count %d exceeds the maximum %d", len(bb.BlobKzgCommitments), params.BeaconConfig().MaxBlobsPerBlockByVersion(version.Electra))
}
kzgCommitments := make([][]byte, len(bb.BlobKzgCommitments))
for i, commit := range bb.BlobKzgCommitments {
if len(commit) != fieldparams.BLSPubkeyLength {
return nil, fmt.Errorf("commitment length %d is not %d", len(commit), fieldparams.BLSPubkeyLength)
}
kzgCommitments[i] = bytesutil.SafeCopyBytes(commit)
}
// post electra execution requests should not be nil, if no requests exist use an empty request
if bb.ExecutionRequests == nil {
return nil, errors.New("bid contains nil execution requests")
}
executionRequests, err := bb.ExecutionRequests.ToProto()
if err != nil {
return nil, errors.Wrap(err, "failed to convert ExecutionRequests")
}
return &eth.BuilderBidElectra{
Header: header,
BlobKzgCommitments: kzgCommitments,
ExecutionRequests: executionRequests,
// Note that SSZBytes() reverses byte order for the little-endian representation.
// Uint256.Bytes() is big-endian, SSZBytes takes this value and reverses it.
Value: bytesutil.SafeCopyBytes(bb.Value.SSZBytes()),
Pubkey: bytesutil.SafeCopyBytes(bb.Pubkey),
}, nil
}
// ExecutionRequestsV1 is a wrapper for different execution requests
type ExecutionRequestsV1 struct {
Deposits []*DepositRequestV1 `json:"deposits"`
Withdrawals []*WithdrawalRequestV1 `json:"withdrawals"`
Consolidations []*ConsolidationRequestV1 `json:"consolidations"`
}
func (er *ExecutionRequestsV1) ToProto() (*v1.ExecutionRequests, error) {
if uint64(len(er.Deposits)) > params.BeaconConfig().MaxDepositRequestsPerPayload {
return nil, fmt.Errorf("deposit requests count %d exceeds the maximum %d", len(er.Deposits), params.BeaconConfig().MaxDepositRequestsPerPayload)
}
deposits := make([]*v1.DepositRequest, len(er.Deposits))
for i, dep := range er.Deposits {
d, err := dep.ToProto()
if err != nil {
return nil, err
}
deposits[i] = d
}
if uint64(len(er.Withdrawals)) > params.BeaconConfig().MaxWithdrawalRequestsPerPayload {
return nil, fmt.Errorf("withdrawal requests count %d exceeds the maximum %d", len(er.Withdrawals), params.BeaconConfig().MaxWithdrawalRequestsPerPayload)
}
withdrawals := make([]*v1.WithdrawalRequest, len(er.Withdrawals))
for i, wr := range er.Withdrawals {
w, err := wr.ToProto()
if err != nil {
return nil, err
}
withdrawals[i] = w
}
if uint64(len(er.Consolidations)) > params.BeaconConfig().MaxConsolidationsRequestsPerPayload {
return nil, fmt.Errorf("consolidation requests count %d exceeds the maximum %d", len(er.Consolidations), params.BeaconConfig().MaxConsolidationsRequestsPerPayload)
}
consolidations := make([]*v1.ConsolidationRequest, len(er.Consolidations))
for i, con := range er.Consolidations {
c, err := con.ToProto()
if err != nil {
return nil, err
}
consolidations[i] = c
}
return &v1.ExecutionRequests{
Deposits: deposits,
Withdrawals: withdrawals,
Consolidations: consolidations,
}, nil
}
// BuilderBidElectra is a field of ExecHeaderResponseElectra.
type BuilderBidElectra struct {
Header *ExecutionPayloadHeaderDeneb `json:"header"`
BlobKzgCommitments []hexutil.Bytes `json:"blob_kzg_commitments"`
ExecutionRequests *ExecutionRequestsV1 `json:"execution_requests"`
Value Uint256 `json:"value"`
Pubkey hexutil.Bytes `json:"pubkey"`
}
// WithdrawalRequestV1 is a field of ExecutionRequestsV1.
type WithdrawalRequestV1 struct {
SourceAddress hexutil.Bytes `json:"source_address"`
ValidatorPubkey hexutil.Bytes `json:"validator_pubkey"`
Amount Uint256 `json:"amount"`
}
func (wr *WithdrawalRequestV1) ToProto() (*v1.WithdrawalRequest, error) {
srcAddress, err := bytesutil.DecodeHexWithLength(wr.SourceAddress.String(), common.AddressLength)
if err != nil {
return nil, server.NewDecodeError(err, "source_address")
}
pubkey, err := bytesutil.DecodeHexWithLength(wr.ValidatorPubkey.String(), fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, "validator_pubkey")
}
return &v1.WithdrawalRequest{
SourceAddress: srcAddress,
ValidatorPubkey: pubkey,
Amount: wr.Amount.Uint64(),
}, nil
}
// DepositRequestV1 is a field of ExecutionRequestsV1.
type DepositRequestV1 struct {
PubKey hexutil.Bytes `json:"pubkey"`
// withdrawalCredentials: DATA, 32 Bytes
WithdrawalCredentials hexutil.Bytes `json:"withdrawal_credentials"`
// amount: QUANTITY, 64 Bits
Amount Uint256 `json:"amount"`
// signature: DATA, 96 Bytes
Signature hexutil.Bytes `json:"signature"`
// index: QUANTITY, 64 Bits
Index Uint256 `json:"index"`
}
func (dr *DepositRequestV1) ToProto() (*v1.DepositRequest, error) {
pubkey, err := bytesutil.DecodeHexWithLength(dr.PubKey.String(), fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, "pubkey")
}
wc, err := bytesutil.DecodeHexWithLength(dr.WithdrawalCredentials.String(), fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "withdrawal_credentials")
}
sig, err := bytesutil.DecodeHexWithLength(dr.Signature.String(), fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "signature")
}
return &v1.DepositRequest{
Pubkey: pubkey,
WithdrawalCredentials: wc,
Amount: dr.Amount.Uint64(),
Signature: sig,
Index: dr.Index.Uint64(),
}, nil
}
// ConsolidationRequestV1 is a field of ExecutionRequestsV1.
type ConsolidationRequestV1 struct {
// sourceAddress: DATA, 20 Bytes
SourceAddress hexutil.Bytes `json:"source_address"`
// sourcePubkey: DATA, 48 Bytes
SourcePubkey hexutil.Bytes `json:"source_pubkey"`
// targetPubkey: DATA, 48 Bytes
TargetPubkey hexutil.Bytes `json:"target_pubkey"`
}
func (cr *ConsolidationRequestV1) ToProto() (*v1.ConsolidationRequest, error) {
srcAddress, err := bytesutil.DecodeHexWithLength(cr.SourceAddress.String(), common.AddressLength)
if err != nil {
return nil, server.NewDecodeError(err, "source_address")
}
sourcePubkey, err := bytesutil.DecodeHexWithLength(cr.SourcePubkey.String(), fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, "source_pubkey")
}
targetPubkey, err := bytesutil.DecodeHexWithLength(cr.TargetPubkey.String(), fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, "target_pubkey")
}
return &v1.ConsolidationRequest{
SourceAddress: srcAddress,
SourcePubkey: sourcePubkey,
TargetPubkey: targetPubkey,
}, nil
}
// ErrorMessage is a JSON representation of the builder API's returned error message.
type ErrorMessage struct {
Code int `json:"code"`

View File

@@ -12,7 +12,6 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
@@ -154,64 +153,6 @@ var testExampleHeaderResponseDeneb = `{
}
}`
var testExampleHeaderResponseElectra = `{
"version": "electra",
"data": {
"message": {
"header": {
"parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"block_number": "1",
"gas_limit": "1",
"gas_used": "1",
"timestamp": "1",
"extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"base_fee_per_gas": "1",
"blob_gas_used": "1",
"excess_blob_gas": "1",
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"transactions_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"withdrawals_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
},
"blob_kzg_commitments": [
"0xa94170080872584e54a1cf092d845703b13907f2e6b3b1c0ad573b910530499e3bcd48c6378846b80d2bfa58c81cf3d5"
],
"execution_requests": {
"deposits": [
{
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a",
"withdrawal_credentials": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"amount": "1",
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505",
"index": "1"
}
],
"withdrawals": [
{
"source_address": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
"validator_pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a",
"amount": "1"
}
],
"consolidations": [
{
"source_address": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
"source_pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a",
"target_pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
}
]
},
"value": "652312848583266388373324160190187140051835877600158453279131187530910662656",
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
},
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
}
}`
var testExampleHeaderResponseDenebNoBundle = `{
"version": "deneb",
"data": {
@@ -1973,8 +1914,7 @@ func TestEmptyResponseBody(t *testing.T) {
var b []byte
r := &ExecutionPayloadResponse{}
err := json.Unmarshal(b, r)
var syntaxError *json.SyntaxError
ok := errors.As(err, &syntaxError)
_, ok := err.(*json.SyntaxError)
require.Equal(t, true, ok)
})
t.Run("empty object", func(t *testing.T) {
@@ -1982,9 +1922,9 @@ func TestEmptyResponseBody(t *testing.T) {
emptyResponse := &ExecutionPayloadResponse{}
require.NoError(t, json.Unmarshal(empty, emptyResponse))
_, err := emptyResponse.ParsePayload()
require.ErrorContains(t, "unsupported version", err)
require.ErrorIs(t, err, consensusblocks.ErrUnsupportedVersion)
})
versions := []int{version.Bellatrix, version.Capella, version.Deneb, version.Electra}
versions := []int{version.Bellatrix, version.Capella, version.Deneb}
for i := range versions {
vstr := version.String(versions[i])
t.Run("populated version without payload"+vstr, func(t *testing.T) {

View File

@@ -10,18 +10,11 @@ import (
"github.com/pkg/errors"
)
const (
MaxBodySize int64 = 1 << 23 // 8MB default, WithMaxBodySize can override
MaxBodySizeState int64 = 1 << 29 // 512MB
MaxErrBodySize int64 = 1 << 17 // 128KB
)
// Client is a wrapper object around the HTTP client.
type Client struct {
hc *http.Client
baseURL *url.URL
token string
maxBodySize int64
hc *http.Client
baseURL *url.URL
token string
}
// NewClient constructs a new client with the provided options (ex WithTimeout).
@@ -33,9 +26,8 @@ func NewClient(host string, opts ...ClientOpt) (*Client, error) {
return nil, err
}
c := &Client{
hc: &http.Client{},
baseURL: u,
maxBodySize: MaxBodySize,
hc: &http.Client{},
baseURL: u,
}
for _, o := range opts {
o(c)
@@ -80,7 +72,7 @@ func (c *Client) NodeURL() string {
// Get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
func (c *Client) Get(ctx context.Context, path string, opts ...ReqOption) ([]byte, error) {
u := c.baseURL.ResolveReference(&url.URL{Path: path})
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), http.NoBody)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
@@ -97,7 +89,7 @@ func (c *Client) Get(ctx context.Context, path string, opts ...ReqOption) ([]byt
if r.StatusCode != http.StatusOK {
return nil, Non200Err(r)
}
b, err := io.ReadAll(io.LimitReader(r.Body, c.maxBodySize))
b, err := io.ReadAll(r.Body)
if err != nil {
return nil, errors.Wrap(err, "error reading http response body")
}

View File

@@ -25,16 +25,16 @@ var ErrInvalidNodeVersion = errors.New("invalid node version response")
var ErrConnectionIssue = errors.New("could not connect")
// Non200Err is a function that parses an HTTP response to handle responses that are not 200 with a formatted error.
func Non200Err(r *http.Response) error {
b, err := io.ReadAll(io.LimitReader(r.Body, MaxErrBodySize))
func Non200Err(response *http.Response) error {
bodyBytes, err := io.ReadAll(response.Body)
var body string
if err != nil {
body = "(Unable to read response body.)"
} else {
body = "response body:\n" + string(b)
body = "response body:\n" + string(bodyBytes)
}
msg := fmt.Sprintf("code=%d, url=%s, body=%s", r.StatusCode, r.Request.URL, body)
switch r.StatusCode {
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
switch response.StatusCode {
case http.StatusNotFound:
return errors.Wrap(ErrNotFound, msg)
default:

View File

@@ -2,10 +2,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"event_stream.go",
"utils.go",
],
srcs = ["event_stream.go"],
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/event",
visibility = ["//visibility:public"],
deps = [
@@ -18,10 +15,7 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"event_stream_test.go",
"utils_test.go",
],
srcs = ["event_stream_test.go"],
embed = [":go_default_library"],
deps = [
"//testing/require:go_default_library",

View File

@@ -93,7 +93,6 @@ func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
EventType: EventConnectionError,
Data: []byte(errors.Wrap(err, client.ErrConnectionIssue.Error()).Error()),
}
return
}
defer func() {
@@ -103,8 +102,6 @@ func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
}()
// Create a new scanner to read lines from the response body
scanner := bufio.NewScanner(resp.Body)
// Set the split function for the scanning operation
scanner.Split(scanLinesWithCarriage)
var eventType, data string // Variables to store event type and data
@@ -116,7 +113,7 @@ func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
close(eventsChannel)
return
default:
line := scanner.Text()
line := scanner.Text() // TODO(13730): scanner does not handle /r and does not fully adhere to https://html.spec.whatwg.org/multipage/server-sent-events.html#the-eventsource-interface
// Handle the event based on your specific format
if line == "" {
// Empty line indicates the end of an event

View File

@@ -40,12 +40,11 @@ func TestNewEventStream(t *testing.T) {
func TestEventStream(t *testing.T) {
mux := http.NewServeMux()
mux.HandleFunc("/eth/v1/events", func(w http.ResponseWriter, _ *http.Request) {
mux.HandleFunc("/eth/v1/events", func(w http.ResponseWriter, r *http.Request) {
flusher, ok := w.(http.Flusher)
require.Equal(t, true, ok)
for i := 1; i <= 3; i++ {
events := [3]string{"event: head\ndata: data%d\n\n", "event: head\rdata: data%d\r\r", "event: head\r\ndata: data%d\r\n\r\n"}
_, err := fmt.Fprintf(w, events[i-1], i)
for i := 1; i <= 2; i++ {
_, err := fmt.Fprintf(w, "event: head\ndata: data%d\n\n", i)
require.NoError(t, err)
flusher.Flush() // Trigger flush to simulate streaming data
time.Sleep(100 * time.Millisecond) // Simulate delay between events
@@ -63,7 +62,7 @@ func TestEventStream(t *testing.T) {
// Collect events
var events []*Event
for len(events) != 3 {
for len(events) != 2 {
select {
case event := <-eventsChannel:
log.Info(event)
@@ -72,30 +71,10 @@ func TestEventStream(t *testing.T) {
}
// Assertions to verify the events content
expectedData := []string{"data1", "data2", "data3"}
expectedData := []string{"data1", "data2"}
for i, event := range events {
if string(event.Data) != expectedData[i] {
t.Errorf("Expected event data %q, got %q", expectedData[i], string(event.Data))
}
}
}
func TestEventStreamRequestError(t *testing.T) {
topics := []string{"head"}
eventsChannel := make(chan *Event, 1)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// use valid url that will result in failed request with nil body
stream, err := NewEventStream(ctx, http.DefaultClient, "http://badhost:1234", topics)
require.NoError(t, err)
// error will happen when request is made, should be received over events channel
go stream.Subscribe(eventsChannel)
event := <-eventsChannel
if event.EventType != EventConnectionError {
t.Errorf("Expected event type %q, got %q", EventConnectionError, event.EventType)
}
}

View File

@@ -1,36 +0,0 @@
package event
import (
"bytes"
)
// adapted from ScanLines in scan.go to handle carriage return characters as separators
func scanLinesWithCarriage(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
if i, j := bytes.IndexByte(data, '\n'), bytes.IndexByte(data, '\r'); i >= 0 || j >= 0 {
in := i
// Select the first index of \n or \r or the second index of \r if it is followed by \n
if i < 0 || (i > j && i != j+1 && j >= 0) {
in = j
}
// We have a full newline-terminated line.
return in + 1, dropCR(data[0:in]), nil
}
// If we're at EOF, we have a final, non-terminated line. Return it.
if atEOF {
return len(data), dropCR(data), nil
}
// Request more data.
return 0, nil, nil
}
// dropCR drops a terminal \r from the data.
func dropCR(data []byte) []byte {
if len(data) > 0 && data[len(data)-1] == '\r' {
return data[0 : len(data)-1]
}
return data
}

View File

@@ -1,97 +0,0 @@
package event
import (
"bufio"
"bytes"
"testing"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestScanLinesWithCarriage(t *testing.T) {
testCases := []struct {
name string
input string
expected []string
}{
{
name: "LF line endings",
input: "line1\nline2\nline3",
expected: []string{"line1", "line2", "line3"},
},
{
name: "CR line endings",
input: "line1\rline2\rline3",
expected: []string{"line1", "line2", "line3"},
},
{
name: "CRLF line endings",
input: "line1\r\nline2\r\nline3",
expected: []string{"line1", "line2", "line3"},
},
{
name: "Mixed line endings",
input: "line1\nline2\rline3\r\nline4",
expected: []string{"line1", "line2", "line3", "line4"},
},
{
name: "Empty lines",
input: "line1\n\nline2\r\rline3",
expected: []string{"line1", "", "line2", "", "line3"},
},
{
name: "Empty lines 2",
input: "line1\n\rline2\n\rline3",
expected: []string{"line1", "", "line2", "", "line3"},
},
{
name: "No line endings",
input: "single line without ending",
expected: []string{"single line without ending"},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
scanner := bufio.NewScanner(bytes.NewReader([]byte(tc.input)))
scanner.Split(scanLinesWithCarriage)
var lines []string
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
require.NoError(t, scanner.Err())
require.Equal(t, len(tc.expected), len(lines), "Number of lines does not match")
for i, line := range lines {
require.Equal(t, tc.expected[i], line, "Line %d does not match", i)
}
})
}
}
// TestScanLinesWithCarriageEdgeCases tests edge cases and potential error scenarios
func TestScanLinesWithCarriageEdgeCases(t *testing.T) {
t.Run("Empty input", func(t *testing.T) {
scanner := bufio.NewScanner(bytes.NewReader([]byte("")))
scanner.Split(scanLinesWithCarriage)
require.Equal(t, scanner.Scan(), false)
require.NoError(t, scanner.Err())
})
t.Run("Very long line", func(t *testing.T) {
longLine := bytes.Repeat([]byte("a"), bufio.MaxScanTokenSize+1)
scanner := bufio.NewScanner(bytes.NewReader(longLine))
scanner.Split(scanLinesWithCarriage)
require.Equal(t, scanner.Scan(), false)
require.NotNil(t, scanner.Err())
})
t.Run("Line ending at max token size", func(t *testing.T) {
input := append(bytes.Repeat([]byte("a"), bufio.MaxScanTokenSize-1), '\n')
scanner := bufio.NewScanner(bytes.NewReader(input))
scanner.Split(scanLinesWithCarriage)
require.Equal(t, scanner.Scan(), true)
require.Equal(t, string(bytes.Repeat([]byte("a"), bufio.MaxScanTokenSize-1)), scanner.Text())
})
}

View File

@@ -46,10 +46,3 @@ func WithAuthenticationToken(token string) ClientOpt {
c.token = token
}
}
// WithMaxBodySize overrides the default max body size of 8MB.
func WithMaxBodySize(size int64) ClientOpt {
return func(c *Client) {
c.maxBodySize = size
}
}

43
api/gateway/BUILD.bazel Normal file
View File

@@ -0,0 +1,43 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"gateway.go",
"log.go",
"modifiers.go",
"options.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/api/gateway",
visibility = [
"//beacon-chain:__subpackages__",
"//validator:__subpackages__",
],
deps = [
"//api/server:go_default_library",
"//runtime:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
"@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//connectivity:go_default_library",
"@org_golang_google_grpc//credentials:go_default_library",
"@org_golang_google_grpc//credentials/insecure:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["gateway_test.go"],
embed = [":go_default_library"],
deps = [
"//cmd/beacon-chain/flags:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
],
)

212
api/gateway/gateway.go Normal file
View File

@@ -0,0 +1,212 @@
// Package gateway defines a grpc-gateway server that serves HTTP-JSON traffic and acts a proxy between HTTP and gRPC.
package gateway
import (
"context"
"fmt"
"net"
"net/http"
"time"
"github.com/gorilla/mux"
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api/server"
"github.com/prysmaticlabs/prysm/v5/runtime"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
)
var _ runtime.Service = (*Gateway)(nil)
// PbMux serves grpc-gateway requests for selected patterns using registered protobuf handlers.
type PbMux struct {
Registrations []PbHandlerRegistration // Protobuf registrations to be registered in Mux.
Patterns []string // URL patterns that will be handled by Mux.
Mux *gwruntime.ServeMux // The router that will be used for grpc-gateway requests.
}
// PbHandlerRegistration is a function that registers a protobuf handler.
type PbHandlerRegistration func(context.Context, *gwruntime.ServeMux, *grpc.ClientConn) error
// MuxHandler is a function that implements the mux handler functionality.
type MuxHandler func(
h http.HandlerFunc,
w http.ResponseWriter,
req *http.Request,
)
// Config parameters for setting up the gateway service.
type config struct {
maxCallRecvMsgSize uint64
remoteCert string
gatewayAddr string
remoteAddr string
allowedOrigins []string
muxHandler MuxHandler
pbHandlers []*PbMux
router *mux.Router
timeout time.Duration
}
// Gateway is the gRPC gateway to serve HTTP JSON traffic as a proxy and forward it to the gRPC server.
type Gateway struct {
cfg *config
conn *grpc.ClientConn
server *http.Server
cancel context.CancelFunc
ctx context.Context
startFailure error
}
// New returns a new instance of the Gateway.
func New(ctx context.Context, opts ...Option) (*Gateway, error) {
g := &Gateway{
ctx: ctx,
cfg: &config{},
}
for _, opt := range opts {
if err := opt(g); err != nil {
return nil, err
}
}
if g.cfg.router == nil {
g.cfg.router = mux.NewRouter()
}
return g, nil
}
// Start the gateway service.
func (g *Gateway) Start() {
ctx, cancel := context.WithCancel(g.ctx)
g.cancel = cancel
conn, err := g.dial(ctx, "tcp", g.cfg.remoteAddr)
if err != nil {
log.WithError(err).Error("Failed to connect to gRPC server")
g.startFailure = err
return
}
g.conn = conn
for _, h := range g.cfg.pbHandlers {
for _, r := range h.Registrations {
if err := r(ctx, h.Mux, g.conn); err != nil {
log.WithError(err).Error("Failed to register handler")
g.startFailure = err
return
}
}
for _, p := range h.Patterns {
g.cfg.router.PathPrefix(p).Handler(h.Mux)
}
}
corsMux := server.CorsHandler(g.cfg.allowedOrigins).Middleware(g.cfg.router)
if g.cfg.muxHandler != nil {
g.cfg.router.PathPrefix("/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
g.cfg.muxHandler(corsMux.ServeHTTP, w, r)
})
}
g.server = &http.Server{
Addr: g.cfg.gatewayAddr,
Handler: corsMux,
ReadHeaderTimeout: time.Second,
}
go func() {
log.WithField("address", g.cfg.gatewayAddr).Info("Starting gRPC gateway")
if err := g.server.ListenAndServe(); err != http.ErrServerClosed {
log.WithError(err).Error("Failed to start gRPC gateway")
g.startFailure = err
return
}
}()
}
// Status of grpc gateway. Returns an error if this service is unhealthy.
func (g *Gateway) Status() error {
if g.startFailure != nil {
return g.startFailure
}
if s := g.conn.GetState(); s != connectivity.Ready {
return fmt.Errorf("grpc server is %s", s)
}
return nil
}
// Stop the gateway with a graceful shutdown.
func (g *Gateway) Stop() error {
if g.server != nil {
shutdownCtx, shutdownCancel := context.WithTimeout(g.ctx, 2*time.Second)
defer shutdownCancel()
if err := g.server.Shutdown(shutdownCtx); err != nil {
if errors.Is(err, context.DeadlineExceeded) {
log.Warn("Existing connections terminated")
} else {
log.WithError(err).Error("Failed to gracefully shut down server")
}
}
}
if g.cancel != nil {
g.cancel()
}
return nil
}
// dial the gRPC server.
func (g *Gateway) dial(ctx context.Context, network, addr string) (*grpc.ClientConn, error) {
switch network {
case "tcp":
return g.dialTCP(ctx, addr)
case "unix":
return g.dialUnix(ctx, addr)
default:
return nil, fmt.Errorf("unsupported network type %q", network)
}
}
// dialTCP creates a client connection via TCP.
// "addr" must be a valid TCP address with a port number.
func (g *Gateway) dialTCP(ctx context.Context, addr string) (*grpc.ClientConn, error) {
var security grpc.DialOption
if len(g.cfg.remoteCert) > 0 {
creds, err := credentials.NewClientTLSFromFile(g.cfg.remoteCert, "")
if err != nil {
return nil, err
}
security = grpc.WithTransportCredentials(creds)
} else {
// Use insecure credentials when there's no remote cert provided.
security = grpc.WithTransportCredentials(insecure.NewCredentials())
}
opts := []grpc.DialOption{
security,
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(g.cfg.maxCallRecvMsgSize))),
}
return grpc.DialContext(ctx, addr, opts...)
}
// dialUnix creates a client connection via a unix domain socket.
// "addr" must be a valid path to the socket.
func (g *Gateway) dialUnix(ctx context.Context, addr string) (*grpc.ClientConn, error) {
d := func(addr string, timeout time.Duration) (net.Conn, error) {
return net.DialTimeout("unix", addr, timeout)
}
f := func(ctx context.Context, addr string) (net.Conn, error) {
if deadline, ok := ctx.Deadline(); ok {
return d(addr, time.Until(deadline))
}
return d(addr, 0)
}
opts := []grpc.DialOption{
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithContextDialer(f),
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(g.cfg.maxCallRecvMsgSize))),
}
return grpc.DialContext(ctx, addr, opts...)
}

107
api/gateway/gateway_test.go Normal file
View File

@@ -0,0 +1,107 @@
package gateway
import (
"context"
"flag"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"github.com/gorilla/mux"
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
logTest "github.com/sirupsen/logrus/hooks/test"
"github.com/urfave/cli/v2"
)
func TestGateway_Customized(t *testing.T) {
r := mux.NewRouter()
cert := "cert"
origins := []string{"origin"}
size := uint64(100)
opts := []Option{
WithRouter(r),
WithRemoteCert(cert),
WithAllowedOrigins(origins),
WithMaxCallRecvMsgSize(size),
WithMuxHandler(func(
_ http.HandlerFunc,
_ http.ResponseWriter,
_ *http.Request,
) {
}),
}
g, err := New(context.Background(), opts...)
require.NoError(t, err)
assert.Equal(t, r, g.cfg.router)
assert.Equal(t, cert, g.cfg.remoteCert)
require.Equal(t, 1, len(g.cfg.allowedOrigins))
assert.Equal(t, origins[0], g.cfg.allowedOrigins[0])
assert.Equal(t, size, g.cfg.maxCallRecvMsgSize)
}
func TestGateway_StartStop(t *testing.T) {
hook := logTest.NewGlobal()
app := cli.App{}
set := flag.NewFlagSet("test", 0)
ctx := cli.NewContext(&app, set, nil)
gatewayPort := ctx.Int(flags.GRPCGatewayPort.Name)
gatewayHost := ctx.String(flags.GRPCGatewayHost.Name)
rpcHost := ctx.String(flags.RPCHost.Name)
selfAddress := fmt.Sprintf("%s:%d", rpcHost, ctx.Int(flags.RPCPort.Name))
gatewayAddress := fmt.Sprintf("%s:%d", gatewayHost, gatewayPort)
opts := []Option{
WithGatewayAddr(gatewayAddress),
WithRemoteAddr(selfAddress),
WithMuxHandler(func(
_ http.HandlerFunc,
_ http.ResponseWriter,
_ *http.Request,
) {
}),
}
g, err := New(context.Background(), opts...)
require.NoError(t, err)
g.Start()
go func() {
require.LogsContain(t, hook, "Starting gRPC gateway")
require.LogsDoNotContain(t, hook, "Starting API middleware")
}()
err = g.Stop()
require.NoError(t, err)
}
func TestGateway_NilHandler_NotFoundHandlerRegistered(t *testing.T) {
app := cli.App{}
set := flag.NewFlagSet("test", 0)
ctx := cli.NewContext(&app, set, nil)
gatewayPort := ctx.Int(flags.GRPCGatewayPort.Name)
gatewayHost := ctx.String(flags.GRPCGatewayHost.Name)
rpcHost := ctx.String(flags.RPCHost.Name)
selfAddress := fmt.Sprintf("%s:%d", rpcHost, ctx.Int(flags.RPCPort.Name))
gatewayAddress := fmt.Sprintf("%s:%d", gatewayHost, gatewayPort)
opts := []Option{
WithGatewayAddr(gatewayAddress),
WithRemoteAddr(selfAddress),
}
g, err := New(context.Background(), opts...)
require.NoError(t, err)
writer := httptest.NewRecorder()
g.cfg.router.ServeHTTP(writer, &http.Request{Method: "GET", Host: "localhost", URL: &url.URL{Path: "/foo"}})
assert.Equal(t, http.StatusNotFound, writer.Code)
}

5
api/gateway/log.go Normal file
View File

@@ -0,0 +1,5 @@
package gateway
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "gateway")

30
api/gateway/modifiers.go Normal file
View File

@@ -0,0 +1,30 @@
package gateway
import (
"context"
"net/http"
"strconv"
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"google.golang.org/protobuf/proto"
)
func HttpResponseModifier(ctx context.Context, w http.ResponseWriter, _ proto.Message) error {
md, ok := gwruntime.ServerMetadataFromContext(ctx)
if !ok {
return nil
}
// set http status code
if vals := md.HeaderMD.Get("x-http-code"); len(vals) > 0 {
code, err := strconv.Atoi(vals[0])
if err != nil {
return err
}
// delete the headers to not expose any grpc-metadata in http response
delete(md.HeaderMD, "x-http-code")
delete(w.Header(), "Grpc-Metadata-X-Http-Code")
w.WriteHeader(code)
}
return nil
}

79
api/gateway/options.go Normal file
View File

@@ -0,0 +1,79 @@
package gateway
import (
"time"
"github.com/gorilla/mux"
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
)
type Option func(g *Gateway) error
func WithPbHandlers(handlers []*PbMux) Option {
return func(g *Gateway) error {
g.cfg.pbHandlers = handlers
return nil
}
}
func WithMuxHandler(m MuxHandler) Option {
return func(g *Gateway) error {
g.cfg.muxHandler = m
return nil
}
}
func WithGatewayAddr(addr string) Option {
return func(g *Gateway) error {
g.cfg.gatewayAddr = addr
return nil
}
}
func WithRemoteAddr(addr string) Option {
return func(g *Gateway) error {
g.cfg.remoteAddr = addr
return nil
}
}
// WithRouter allows adding a custom mux router to the gateway.
func WithRouter(r *mux.Router) Option {
return func(g *Gateway) error {
g.cfg.router = r
return nil
}
}
// WithAllowedOrigins allows adding a set of allowed origins to the gateway.
func WithAllowedOrigins(origins []string) Option {
return func(g *Gateway) error {
g.cfg.allowedOrigins = origins
return nil
}
}
// WithRemoteCert allows adding a custom certificate to the gateway,
func WithRemoteCert(cert string) Option {
return func(g *Gateway) error {
g.cfg.remoteCert = cert
return nil
}
}
// WithMaxCallRecvMsgSize allows specifying the maximum allowed gRPC message size.
func WithMaxCallRecvMsgSize(size uint64) Option {
return func(g *Gateway) error {
g.cfg.maxCallRecvMsgSize = size
return nil
}
}
// WithTimeout allows changing the timeout value for API calls.
func WithTimeout(seconds uint64) Option {
return func(g *Gateway) error {
g.cfg.timeout = time.Second * time.Duration(seconds)
gwruntime.DefaultContextTimeout = time.Second * time.Duration(seconds)
return nil
}
}

View File

@@ -22,7 +22,9 @@ go_test(
deps = [
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",
],
)

View File

@@ -2,6 +2,8 @@ package grpc
import (
"context"
"encoding/json"
"fmt"
"strings"
"time"
@@ -74,8 +76,21 @@ func AppendHeaders(parent context.Context, headers []string) context.Context {
logrus.Warnf("Incorrect gRPC header flag format. Skipping %v", keyValue[0])
continue
}
parent = metadata.AppendToOutgoingContext(parent, keyValue[0], strings.Join(keyValue[1:], "=")) // nolint:fatcontext
parent = metadata.AppendToOutgoingContext(parent, keyValue[0], strings.Join(keyValue[1:], "="))
}
}
return parent
}
// AppendCustomErrorHeader sets a CustomErrorMetadataKey gRPC header on the passed in context,
// using the passed in error data as the header's value. The data is serialized as JSON.
func AppendCustomErrorHeader(ctx context.Context, errorData interface{}) error {
j, err := json.Marshal(errorData)
if err != nil {
return fmt.Errorf("could not marshal error data into JSON: %w", err)
}
if err := grpc.SetHeader(ctx, metadata.Pairs(CustomErrorMetadataKey, string(j))); err != nil {
return fmt.Errorf("could not set custom error header: %w", err)
}
return nil
}

View File

@@ -2,11 +2,15 @@ package grpc
import (
"context"
"encoding/json"
"strings"
"testing"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
logTest "github.com/sirupsen/logrus/hooks/test"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
@@ -58,3 +62,17 @@ func TestAppendHeaders(t *testing.T) {
assert.Equal(t, "value=1", md.Get("first")[0])
})
}
func TestAppendCustomErrorHeader(t *testing.T) {
stream := &runtime.ServerTransportStream{}
ctx := grpc.NewContextWithServerTransportStream(context.Background(), stream)
data := &customErrorData{Message: "foo"}
require.NoError(t, AppendCustomErrorHeader(ctx, data))
// The stream used in test setup sets the metadata key in lowercase.
value, ok := stream.Header()[strings.ToLower(CustomErrorMetadataKey)]
require.Equal(t, true, ok, "Failed to retrieve custom error metadata value")
expected, err := json.Marshal(data)
require.NoError(t, err)
assert.Equal(t, string(expected), value[0])
}

View File

@@ -1,7 +1,5 @@
package api
import "net/http"
const (
VersionHeader = "Eth-Consensus-Version"
ExecutionPayloadBlindedHeader = "Eth-Execution-Payload-Blinded"
@@ -12,9 +10,3 @@ const (
EventStreamMediaType = "text/event-stream"
KeepAlive = "keep-alive"
)
// SetSSEHeaders sets the headers needed for a server-sent event response.
func SetSSEHeaders(w http.ResponseWriter) {
w.Header().Set("Content-Type", EventStreamMediaType)
w.Header().Set("Connection", KeepAlive)
}

View File

@@ -2,14 +2,29 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["error.go"],
srcs = [
"error.go",
"middleware.go",
"util.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/api/server",
visibility = ["//visibility:public"],
deps = [
"@com_github_gorilla_mux//:go_default_library",
"@com_github_rs_cors//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["error_test.go"],
srcs = [
"error_test.go",
"middleware_test.go",
"util_test.go",
],
embed = [":go_default_library"],
deps = ["//testing/assert:go_default_library"],
deps = [
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
],
)

View File

@@ -1,7 +1,6 @@
package server
import (
"errors"
"fmt"
"strings"
)
@@ -16,8 +15,7 @@ type DecodeError struct {
// NewDecodeError wraps an error (either the initial decoding error or another DecodeError).
// The current field that failed decoding must be passed in.
func NewDecodeError(err error, field string) *DecodeError {
var de *DecodeError
ok := errors.As(err, &de)
de, ok := err.(*DecodeError)
if ok {
return &DecodeError{path: append([]string{field}, de.path...), err: de.err}
}

View File

@@ -1,31 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"log.go",
"options.go",
"server.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/api/server/httprest",
visibility = ["//visibility:public"],
deps = [
"//api/server/middleware:go_default_library",
"//runtime:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["server_test.go"],
embed = [":go_default_library"],
deps = [
"//cmd/beacon-chain/flags:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
],
)

View File

@@ -1,5 +0,0 @@
package httprest
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "httprest")

View File

@@ -1,44 +0,0 @@
package httprest
import (
"time"
"net/http"
"github.com/prysmaticlabs/prysm/v5/api/server/middleware"
)
// Option is a http rest server functional parameter type.
type Option func(g *Server) error
// WithMiddlewares sets the list of middlewares to be applied on routes.
func WithMiddlewares(mw []middleware.Middleware) Option {
return func(g *Server) error {
g.cfg.middlewares = mw
return nil
}
}
// WithHTTPAddr sets the full address ( host and port ) of the server.
func WithHTTPAddr(addr string) Option {
return func(g *Server) error {
g.cfg.httpAddr = addr
return nil
}
}
// WithRouter sets the internal router of the server, this is required.
func WithRouter(r *http.ServeMux) Option {
return func(g *Server) error {
g.cfg.router = r
return nil
}
}
// WithTimeout allows changing the timeout value for API calls.
func WithTimeout(duration time.Duration) Option {
return func(g *Server) error {
g.cfg.timeout = duration
return nil
}
}

View File

@@ -1,101 +0,0 @@
package httprest
import (
"context"
"net/http"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api/server/middleware"
"github.com/prysmaticlabs/prysm/v5/runtime"
)
var _ runtime.Service = (*Server)(nil)
// Config parameters for setting up the http-rest service.
type config struct {
httpAddr string
middlewares []middleware.Middleware
router http.Handler
timeout time.Duration
}
// Server serves HTTP traffic.
type Server struct {
cfg *config
server *http.Server
cancel context.CancelFunc
ctx context.Context
startFailure error
}
// New returns a new instance of the Server.
func New(ctx context.Context, opts ...Option) (*Server, error) {
g := &Server{
ctx: ctx,
cfg: &config{},
}
for _, opt := range opts {
if err := opt(g); err != nil {
return nil, err
}
}
if g.cfg.router == nil {
return nil, errors.New("router option not configured")
}
var handler http.Handler
defaultReadHeaderTimeout := time.Second
handler = middleware.MiddlewareChain(g.cfg.router, g.cfg.middlewares)
if g.cfg.timeout > 0*time.Second {
defaultReadHeaderTimeout = g.cfg.timeout
handler = http.TimeoutHandler(handler, g.cfg.timeout, "request timed out")
}
g.server = &http.Server{
Addr: g.cfg.httpAddr,
Handler: handler,
ReadHeaderTimeout: defaultReadHeaderTimeout,
}
return g, nil
}
// Start the http rest service.
func (g *Server) Start() {
g.ctx, g.cancel = context.WithCancel(g.ctx)
go func() {
log.WithField("address", g.cfg.httpAddr).Info("Starting HTTP server")
if err := g.server.ListenAndServe(); err != http.ErrServerClosed {
log.WithError(err).Error("Failed to start HTTP server")
g.startFailure = err
return
}
}()
}
// Status of the HTTP server. Returns an error if this service is unhealthy.
func (g *Server) Status() error {
if g.startFailure != nil {
return g.startFailure
}
return nil
}
// Stop the HTTP server with a graceful shutdown.
func (g *Server) Stop() error {
if g.server != nil {
shutdownCtx, shutdownCancel := context.WithTimeout(g.ctx, 2*time.Second)
defer shutdownCancel()
if err := g.server.Shutdown(shutdownCtx); err != nil {
if errors.Is(err, context.DeadlineExceeded) {
log.Warn("Existing connections terminated")
} else {
log.WithError(err).Error("Failed to gracefully shut down server")
}
}
}
if g.cancel != nil {
g.cancel()
}
return nil
}

View File

@@ -1,71 +0,0 @@
package httprest
import (
"context"
"flag"
"fmt"
"net"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
logTest "github.com/sirupsen/logrus/hooks/test"
"github.com/urfave/cli/v2"
)
func TestServer_StartStop(t *testing.T) {
hook := logTest.NewGlobal()
app := cli.App{}
set := flag.NewFlagSet("test", 0)
ctx := cli.NewContext(&app, set, nil)
port := ctx.Int(flags.HTTPServerPort.Name)
portStr := fmt.Sprintf("%d", port) // Convert port to string
host := ctx.String(flags.HTTPServerHost.Name)
address := net.JoinHostPort(host, portStr)
handler := http.NewServeMux()
opts := []Option{
WithHTTPAddr(address),
WithRouter(handler),
}
g, err := New(context.Background(), opts...)
require.NoError(t, err)
g.Start()
go func() {
require.LogsContain(t, hook, "Starting HTTP server")
require.LogsDoNotContain(t, hook, "Starting API middleware")
}()
err = g.Stop()
require.NoError(t, err)
}
func TestServer_NilHandler_NotFoundHandlerRegistered(t *testing.T) {
app := cli.App{}
set := flag.NewFlagSet("test", 0)
ctx := cli.NewContext(&app, set, nil)
handler := http.NewServeMux()
port := ctx.Int(flags.HTTPServerPort.Name)
portStr := fmt.Sprintf("%d", port) // Convert port to string
host := ctx.String(flags.HTTPServerHost.Name)
address := net.JoinHostPort(host, portStr)
opts := []Option{
WithHTTPAddr(address),
WithRouter(handler),
}
g, err := New(context.Background(), opts...)
require.NoError(t, err)
writer := httptest.NewRecorder()
g.cfg.router.ServeHTTP(writer, &http.Request{Method: "GET", Host: "localhost", URL: &url.URL{Path: "/foo"}})
assert.Equal(t, http.StatusNotFound, writer.Code)
}

32
api/server/middleware.go Normal file
View File

@@ -0,0 +1,32 @@
package server
import (
"net/http"
"github.com/gorilla/mux"
"github.com/rs/cors"
)
// NormalizeQueryValuesHandler normalizes an input query of "key=value1,value2,value3" to "key=value1&key=value2&key=value3"
func NormalizeQueryValuesHandler(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
query := r.URL.Query()
NormalizeQueryValues(query)
r.URL.RawQuery = query.Encode()
next.ServeHTTP(w, r)
})
}
// CorsHandler sets the cors settings on api endpoints
func CorsHandler(allowOrigins []string) mux.MiddlewareFunc {
c := cors.New(cors.Options{
AllowedOrigins: allowOrigins,
AllowedMethods: []string{http.MethodPost, http.MethodGet, http.MethodDelete, http.MethodOptions},
AllowCredentials: true,
MaxAge: 600,
AllowedHeaders: []string{"*"},
})
return c.Handler
}

View File

@@ -1,26 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"middleware.go",
"util.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/api/server/middleware",
visibility = ["//visibility:public"],
deps = ["@com_github_rs_cors//:go_default_library"],
)
go_test(
name = "go_default_test",
srcs = [
"middleware_test.go",
"util_test.go",
],
embed = [":go_default_library"],
deps = [
"//api:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
],
)

View File

@@ -1,125 +0,0 @@
package middleware
import (
"fmt"
"net/http"
"strings"
"github.com/rs/cors"
)
type Middleware func(http.Handler) http.Handler
// NormalizeQueryValuesHandler normalizes an input query of "key=value1,value2,value3" to "key=value1&key=value2&key=value3"
func NormalizeQueryValuesHandler(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
query := r.URL.Query()
NormalizeQueryValues(query)
r.URL.RawQuery = query.Encode()
next.ServeHTTP(w, r)
})
}
// CorsHandler sets the cors settings on api endpoints
func CorsHandler(allowOrigins []string) Middleware {
c := cors.New(cors.Options{
AllowedOrigins: allowOrigins,
AllowedMethods: []string{http.MethodPost, http.MethodGet, http.MethodDelete, http.MethodOptions},
AllowCredentials: true,
MaxAge: 600,
AllowedHeaders: []string{"*"},
})
return c.Handler
}
// ContentTypeHandler checks request for the appropriate media types otherwise returning a http.StatusUnsupportedMediaType error
func ContentTypeHandler(acceptedMediaTypes []string) Middleware {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// skip the GET request
if r.Method == http.MethodGet {
next.ServeHTTP(w, r)
return
}
contentType := r.Header.Get("Content-Type")
if contentType == "" {
http.Error(w, "Content-Type header is missing", http.StatusUnsupportedMediaType)
return
}
accepted := false
for _, acceptedType := range acceptedMediaTypes {
if strings.Contains(strings.TrimSpace(contentType), strings.TrimSpace(acceptedType)) {
accepted = true
break
}
}
if !accepted {
http.Error(w, fmt.Sprintf("Unsupported media type: %s", contentType), http.StatusUnsupportedMediaType)
return
}
next.ServeHTTP(w, r)
})
}
}
// AcceptHeaderHandler checks if the client's response preference is handled
func AcceptHeaderHandler(serverAcceptedTypes []string) Middleware {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
acceptHeader := r.Header.Get("Accept")
// header is optional and should skip if not provided
if acceptHeader == "" {
next.ServeHTTP(w, r)
return
}
accepted := false
acceptTypes := strings.Split(acceptHeader, ",")
// follows rules defined in https://datatracker.ietf.org/doc/html/rfc2616#section-14.1
for _, acceptType := range acceptTypes {
acceptType = strings.TrimSpace(acceptType)
if acceptType == "*/*" {
accepted = true
break
}
for _, serverAcceptedType := range serverAcceptedTypes {
if strings.HasPrefix(acceptType, serverAcceptedType) {
accepted = true
break
}
if acceptType != "/*" && strings.HasSuffix(acceptType, "/*") && strings.HasPrefix(serverAcceptedType, acceptType[:len(acceptType)-2]) {
accepted = true
break
}
}
if accepted {
break
}
}
if !accepted {
http.Error(w, fmt.Sprintf("Not Acceptable: %s", acceptHeader), http.StatusNotAcceptable)
return
}
next.ServeHTTP(w, r)
})
}
}
func MiddlewareChain(h http.Handler, mw []Middleware) http.Handler {
if len(mw) < 1 {
return h
}
wrapped := h
for i := len(mw) - 1; i >= 0; i-- {
wrapped = mw[i](wrapped)
}
return wrapped
}

View File

@@ -1,204 +0,0 @@
package middleware
import (
"net/http"
"net/http/httptest"
"testing"
"github.com/prysmaticlabs/prysm/v5/api"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestNormalizeQueryValuesHandler(t *testing.T) {
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, err := w.Write([]byte("next handler"))
require.NoError(t, err)
})
handler := NormalizeQueryValuesHandler(nextHandler)
tests := []struct {
name string
inputQuery string
expectedQuery string
}{
{
name: "3 values",
inputQuery: "key=value1,value2,value3",
expectedQuery: "key=value1&key=value2&key=value3", // replace with expected normalized value
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
req, err := http.NewRequest("GET", "/test?"+test.inputQuery, http.NoBody)
if err != nil {
t.Fatal(err)
}
rr := httptest.NewRecorder()
handler.ServeHTTP(rr, req)
if rr.Code != http.StatusOK {
t.Errorf("handler returned wrong status code: got %v want %v", rr.Code, http.StatusOK)
}
if req.URL.RawQuery != test.expectedQuery {
t.Errorf("query not normalized: got %v want %v", req.URL.RawQuery, test.expectedQuery)
}
if rr.Body.String() != "next handler" {
t.Errorf("next handler was not executed")
}
})
}
}
func TestContentTypeHandler(t *testing.T) {
acceptedMediaTypes := []string{api.JsonMediaType, api.OctetStreamMediaType}
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, err := w.Write([]byte("next handler"))
require.NoError(t, err)
})
handler := ContentTypeHandler(acceptedMediaTypes)(nextHandler)
tests := []struct {
name string
contentType string
expectedStatusCode int
isGet bool
}{
{
name: "Accepted Content-Type - application/json",
contentType: api.JsonMediaType,
expectedStatusCode: http.StatusOK,
},
{
name: "Accepted Content-Type - ssz format",
contentType: api.OctetStreamMediaType,
expectedStatusCode: http.StatusOK,
},
{
name: "Unsupported Content-Type - text/plain",
contentType: "text/plain",
expectedStatusCode: http.StatusUnsupportedMediaType,
},
{
name: "Missing Content-Type",
contentType: "",
expectedStatusCode: http.StatusUnsupportedMediaType,
},
{
name: "GET request skips content type check",
contentType: "",
expectedStatusCode: http.StatusOK,
isGet: true,
},
{
name: "Content type contains charset is ok",
contentType: "application/json; charset=utf-8",
expectedStatusCode: http.StatusOK,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
httpMethod := http.MethodPost
if tt.isGet {
httpMethod = http.MethodGet
}
req := httptest.NewRequest(httpMethod, "/", nil)
if tt.contentType != "" {
req.Header.Set("Content-Type", tt.contentType)
}
rr := httptest.NewRecorder()
handler.ServeHTTP(rr, req)
if status := rr.Code; status != tt.expectedStatusCode {
t.Errorf("handler returned wrong status code: got %v want %v", status, tt.expectedStatusCode)
}
})
}
}
func TestAcceptHeaderHandler(t *testing.T) {
acceptedTypes := []string{"application/json", "application/octet-stream"}
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, err := w.Write([]byte("next handler"))
require.NoError(t, err)
})
handler := AcceptHeaderHandler(acceptedTypes)(nextHandler)
tests := []struct {
name string
acceptHeader string
expectedStatusCode int
}{
{
name: "Accepted Accept-Type - application/json",
acceptHeader: "application/json",
expectedStatusCode: http.StatusOK,
},
{
name: "Accepted Accept-Type - application/octet-stream",
acceptHeader: "application/octet-stream",
expectedStatusCode: http.StatusOK,
},
{
name: "Accepted Accept-Type with parameters",
acceptHeader: "application/json;q=0.9, application/octet-stream;q=0.8",
expectedStatusCode: http.StatusOK,
},
{
name: "Unsupported Accept-Type - text/plain",
acceptHeader: "text/plain",
expectedStatusCode: http.StatusNotAcceptable,
},
{
name: "Missing Accept header",
acceptHeader: "",
expectedStatusCode: http.StatusOK,
},
{
name: "*/* is accepted",
acceptHeader: "*/*",
expectedStatusCode: http.StatusOK,
},
{
name: "application/* is accepted",
acceptHeader: "application/*",
expectedStatusCode: http.StatusOK,
},
{
name: "/* is unsupported",
acceptHeader: "/*",
expectedStatusCode: http.StatusNotAcceptable,
},
{
name: "application/ is unsupported",
acceptHeader: "application/",
expectedStatusCode: http.StatusNotAcceptable,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
req := httptest.NewRequest("GET", "/", nil)
if tt.acceptHeader != "" {
req.Header.Set("Accept", tt.acceptHeader)
}
rr := httptest.NewRecorder()
handler.ServeHTTP(rr, req)
if status := rr.Code; status != tt.expectedStatusCode {
t.Errorf("handler returned wrong status code: got %v want %v", status, tt.expectedStatusCode)
}
})
}
}

View File

@@ -0,0 +1,54 @@
package server
import (
"net/http"
"net/http/httptest"
"testing"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestNormalizeQueryValuesHandler(t *testing.T) {
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, err := w.Write([]byte("next handler"))
require.NoError(t, err)
})
handler := NormalizeQueryValuesHandler(nextHandler)
tests := []struct {
name string
inputQuery string
expectedQuery string
}{
{
name: "3 values",
inputQuery: "key=value1,value2,value3",
expectedQuery: "key=value1&key=value2&key=value3", // replace with expected normalized value
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
req, err := http.NewRequest("GET", "/test?"+test.inputQuery, nil)
if err != nil {
t.Fatal(err)
}
rr := httptest.NewRecorder()
handler.ServeHTTP(rr, req)
if rr.Code != http.StatusOK {
t.Errorf("handler returned wrong status code: got %v want %v", rr.Code, http.StatusOK)
}
if req.URL.RawQuery != test.expectedQuery {
t.Errorf("query not normalized: got %v want %v", req.URL.RawQuery, test.expectedQuery)
}
if rr.Body.String() != "next handler" {
t.Errorf("next handler was not executed")
}
})
}
}

View File

@@ -5,9 +5,7 @@ go_library(
srcs = [
"block.go",
"conversions.go",
"conversions_blob.go",
"conversions_block.go",
"conversions_lightclient.go",
"conversions_state.go",
"endpoints_beacon.go",
"endpoints_blob.go",
@@ -28,7 +26,6 @@ go_library(
"//api/server:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
@@ -36,9 +33,7 @@ go_library(
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",

View File

@@ -14,10 +14,6 @@ type SignedMessageJsoner interface {
SigString() string
}
// ----------------------------------------------------------------------------
// Phase 0
// ----------------------------------------------------------------------------
type SignedBeaconBlock struct {
Message *BeaconBlock `json:"message"`
Signature string `json:"signature"`
@@ -52,29 +48,6 @@ type BeaconBlockBody struct {
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
}
type SignedBeaconBlockHeaderContainer struct {
Header *SignedBeaconBlockHeader `json:"header"`
Root string `json:"root"`
Canonical bool `json:"canonical"`
}
type SignedBeaconBlockHeader struct {
Message *BeaconBlockHeader `json:"message"`
Signature string `json:"signature"`
}
type BeaconBlockHeader struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
ParentRoot string `json:"parent_root"`
StateRoot string `json:"state_root"`
BodyRoot string `json:"body_root"`
}
// ----------------------------------------------------------------------------
// Altair
// ----------------------------------------------------------------------------
type SignedBeaconBlockAltair struct {
Message *BeaconBlockAltair `json:"message"`
Signature string `json:"signature"`
@@ -110,10 +83,6 @@ type BeaconBlockBodyAltair struct {
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
}
// ----------------------------------------------------------------------------
// Bellatrix
// ----------------------------------------------------------------------------
type SignedBeaconBlockBellatrix struct {
Message *BeaconBlockBellatrix `json:"message"`
Signature string `json:"signature"`
@@ -186,44 +155,6 @@ type BlindedBeaconBlockBodyBellatrix struct {
ExecutionPayloadHeader *ExecutionPayloadHeader `json:"execution_payload_header"`
}
type ExecutionPayload struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
Transactions []string `json:"transactions"`
}
type ExecutionPayloadHeader struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
TransactionsRoot string `json:"transactions_root"`
}
// ----------------------------------------------------------------------------
// Capella
// ----------------------------------------------------------------------------
type SignedBeaconBlockCapella struct {
Message *BeaconBlockCapella `json:"message"`
Signature string `json:"signature"`
@@ -298,46 +229,6 @@ type BlindedBeaconBlockBodyCapella struct {
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
}
type ExecutionPayloadCapella struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
Transactions []string `json:"transactions"`
Withdrawals []*Withdrawal `json:"withdrawals"`
}
type ExecutionPayloadHeaderCapella struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
TransactionsRoot string `json:"transactions_root"`
WithdrawalsRoot string `json:"withdrawals_root"`
}
// ----------------------------------------------------------------------------
// Deneb
// ----------------------------------------------------------------------------
type SignedBeaconBlockContentsDeneb struct {
SignedBlock *SignedBeaconBlockDeneb `json:"signed_block"`
KzgProofs []string `json:"kzg_proofs"`
@@ -426,6 +317,95 @@ type BlindedBeaconBlockBodyDeneb struct {
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
}
type SignedBeaconBlockHeaderContainer struct {
Header *SignedBeaconBlockHeader `json:"header"`
Root string `json:"root"`
Canonical bool `json:"canonical"`
}
type SignedBeaconBlockHeader struct {
Message *BeaconBlockHeader `json:"message"`
Signature string `json:"signature"`
}
type BeaconBlockHeader struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
ParentRoot string `json:"parent_root"`
StateRoot string `json:"state_root"`
BodyRoot string `json:"body_root"`
}
type ExecutionPayload struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
Transactions []string `json:"transactions"`
}
type ExecutionPayloadHeader struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
TransactionsRoot string `json:"transactions_root"`
}
type ExecutionPayloadCapella struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
Transactions []string `json:"transactions"`
Withdrawals []*Withdrawal `json:"withdrawals"`
}
type ExecutionPayloadHeaderCapella struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
TransactionsRoot string `json:"transactions_root"`
WithdrawalsRoot string `json:"withdrawals_root"`
}
type ExecutionPayloadDeneb struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
@@ -465,159 +445,3 @@ type ExecutionPayloadHeaderDeneb struct {
BlobGasUsed string `json:"blob_gas_used"`
ExcessBlobGas string `json:"excess_blob_gas"`
}
// ----------------------------------------------------------------------------
// Electra
// ----------------------------------------------------------------------------
type SignedBeaconBlockContentsElectra struct {
SignedBlock *SignedBeaconBlockElectra `json:"signed_block"`
KzgProofs []string `json:"kzg_proofs"`
Blobs []string `json:"blobs"`
}
type BeaconBlockContentsElectra struct {
Block *BeaconBlockElectra `json:"block"`
KzgProofs []string `json:"kzg_proofs"`
Blobs []string `json:"blobs"`
}
type SignedBeaconBlockElectra struct {
Message *BeaconBlockElectra `json:"message"`
Signature string `json:"signature"`
}
var _ SignedMessageJsoner = &SignedBeaconBlockElectra{}
func (s *SignedBeaconBlockElectra) MessageRawJson() ([]byte, error) {
return json.Marshal(s.Message)
}
func (s *SignedBeaconBlockElectra) SigString() string {
return s.Signature
}
type BeaconBlockElectra struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
ParentRoot string `json:"parent_root"`
StateRoot string `json:"state_root"`
Body *BeaconBlockBodyElectra `json:"body"`
}
type BeaconBlockBodyElectra struct {
RandaoReveal string `json:"randao_reveal"`
Eth1Data *Eth1Data `json:"eth1_data"`
Graffiti string `json:"graffiti"`
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
Attestations []*AttestationElectra `json:"attestations"`
Deposits []*Deposit `json:"deposits"`
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
ExecutionPayload *ExecutionPayloadDeneb `json:"execution_payload"`
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
}
type BlindedBeaconBlockElectra struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
ParentRoot string `json:"parent_root"`
StateRoot string `json:"state_root"`
Body *BlindedBeaconBlockBodyElectra `json:"body"`
}
type SignedBlindedBeaconBlockElectra struct {
Message *BlindedBeaconBlockElectra `json:"message"`
Signature string `json:"signature"`
}
var _ SignedMessageJsoner = &SignedBlindedBeaconBlockElectra{}
func (s *SignedBlindedBeaconBlockElectra) MessageRawJson() ([]byte, error) {
return json.Marshal(s.Message)
}
func (s *SignedBlindedBeaconBlockElectra) SigString() string {
return s.Signature
}
type BlindedBeaconBlockBodyElectra struct {
RandaoReveal string `json:"randao_reveal"`
Eth1Data *Eth1Data `json:"eth1_data"`
Graffiti string `json:"graffiti"`
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
Attestations []*AttestationElectra `json:"attestations"`
Deposits []*Deposit `json:"deposits"`
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
ExecutionPayloadHeader *ExecutionPayloadHeaderDeneb `json:"execution_payload_header"`
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
}
type (
ExecutionRequests struct {
Deposits []*DepositRequest `json:"deposits"`
Withdrawals []*WithdrawalRequest `json:"withdrawals"`
Consolidations []*ConsolidationRequest `json:"consolidations"`
}
)
// ----------------------------------------------------------------------------
// Fulu
// ----------------------------------------------------------------------------
type SignedBeaconBlockContentsFulu struct {
SignedBlock *SignedBeaconBlockFulu `json:"signed_block"`
KzgProofs []string `json:"kzg_proofs"`
Blobs []string `json:"blobs"`
}
type BeaconBlockContentsFulu struct {
Block *BeaconBlockElectra `json:"block"`
KzgProofs []string `json:"kzg_proofs"`
Blobs []string `json:"blobs"`
}
type SignedBeaconBlockFulu struct {
Message *BeaconBlockElectra `json:"message"`
Signature string `json:"signature"`
}
var _ SignedMessageJsoner = &SignedBeaconBlockFulu{}
func (s *SignedBeaconBlockFulu) MessageRawJson() ([]byte, error) {
return json.Marshal(s.Message)
}
func (s *SignedBeaconBlockFulu) SigString() string {
return s.Signature
}
type BlindedBeaconBlockFulu struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
ParentRoot string `json:"parent_root"`
StateRoot string `json:"state_root"`
Body *BlindedBeaconBlockBodyElectra `json:"body"`
}
type SignedBlindedBeaconBlockFulu struct {
Message *BlindedBeaconBlockFulu `json:"message"`
Signature string `json:"signature"`
}
var _ SignedMessageJsoner = &SignedBlindedBeaconBlockFulu{}
func (s *SignedBlindedBeaconBlockFulu) MessageRawJson() ([]byte, error) {
return json.Marshal(s.Message)
}
func (s *SignedBlindedBeaconBlockFulu) SigString() string {
return s.Signature
}

View File

@@ -8,15 +8,14 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api/server"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/consensus-types/validator"
"github.com/prysmaticlabs/prysm/v5/container/slice"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/math"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
ethv1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
@@ -52,9 +51,6 @@ func HistoricalSummaryFromConsensus(s *eth.HistoricalSummary) *HistoricalSummary
}
func (s *SignedBLSToExecutionChange) ToConsensus() (*eth.SignedBLSToExecutionChange, error) {
if s.Message == nil {
return nil, server.NewDecodeError(errNilValue, "Message")
}
change, err := s.Message.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Message")
@@ -106,17 +102,14 @@ func SignedBLSChangeFromConsensus(ch *eth.SignedBLSToExecutionChange) *SignedBLS
func SignedBLSChangesToConsensus(src []*SignedBLSToExecutionChange) ([]*eth.SignedBLSToExecutionChange, error) {
if src == nil {
return nil, server.NewDecodeError(errNilValue, "SignedBLSToExecutionChanges")
return nil, errNilValue
}
err := slice.VerifyMaxLength(src, 16)
if err != nil {
return nil, server.NewDecodeError(err, "SignedBLSToExecutionChanges")
return nil, err
}
changes := make([]*eth.SignedBLSToExecutionChange, len(src))
for i, ch := range src {
if ch == nil {
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d]", i))
}
changes[i], err = ch.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d]", i))
@@ -162,9 +155,6 @@ func ForkFromConsensus(f *eth.Fork) *Fork {
}
func (s *SignedValidatorRegistration) ToConsensus() (*eth.SignedValidatorRegistrationV1, error) {
if s.Message == nil {
return nil, server.NewDecodeError(errNilValue, "Message")
}
msg, err := s.Message.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Message")
@@ -221,9 +211,6 @@ func SignedValidatorRegistrationFromConsensus(vr *eth.SignedValidatorRegistratio
}
func (s *SignedContributionAndProof) ToConsensus() (*eth.SignedContributionAndProof, error) {
if s.Message == nil {
return nil, server.NewDecodeError(errNilValue, "Message")
}
msg, err := s.Message.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Message")
@@ -248,9 +235,6 @@ func SignedContributionAndProofFromConsensus(c *eth.SignedContributionAndProof)
}
func (c *ContributionAndProof) ToConsensus() (*eth.ContributionAndProof, error) {
if c.Contribution == nil {
return nil, server.NewDecodeError(errNilValue, "Contribution")
}
contribution, err := c.Contribution.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Contribution")
@@ -259,7 +243,7 @@ func (c *ContributionAndProof) ToConsensus() (*eth.ContributionAndProof, error)
if err != nil {
return nil, server.NewDecodeError(err, "AggregatorIndex")
}
selectionProof, err := bytesutil.DecodeHexWithLength(c.SelectionProof, fieldparams.BLSSignatureLength)
selectionProof, err := bytesutil.DecodeHexWithLength(c.SelectionProof, 96)
if err != nil {
return nil, server.NewDecodeError(err, "SelectionProof")
}
@@ -322,9 +306,6 @@ func SyncCommitteeContributionFromConsensus(c *eth.SyncCommitteeContribution) *S
}
func (s *SignedAggregateAttestationAndProof) ToConsensus() (*eth.SignedAggregateAttestationAndProof, error) {
if s.Message == nil {
return nil, server.NewDecodeError(errNilValue, "Message")
}
msg, err := s.Message.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Message")
@@ -345,14 +326,11 @@ func (a *AggregateAttestationAndProof) ToConsensus() (*eth.AggregateAttestationA
if err != nil {
return nil, server.NewDecodeError(err, "AggregatorIndex")
}
if a.Aggregate == nil {
return nil, server.NewDecodeError(errNilValue, "Aggregate")
}
agg, err := a.Aggregate.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Aggregate")
}
proof, err := bytesutil.DecodeHexWithLength(a.SelectionProof, fieldparams.BLSSignatureLength)
proof, err := bytesutil.DecodeHexWithLength(a.SelectionProof, 96)
if err != nil {
return nil, server.NewDecodeError(err, "SelectionProof")
}
@@ -363,56 +341,11 @@ func (a *AggregateAttestationAndProof) ToConsensus() (*eth.AggregateAttestationA
}, nil
}
func (s *SignedAggregateAttestationAndProofElectra) ToConsensus() (*eth.SignedAggregateAttestationAndProofElectra, error) {
if s.Message == nil {
return nil, server.NewDecodeError(errNilValue, "Message")
}
msg, err := s.Message.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Message")
}
sig, err := bytesutil.DecodeHexWithLength(s.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
return &eth.SignedAggregateAttestationAndProofElectra{
Message: msg,
Signature: sig,
}, nil
}
func (a *AggregateAttestationAndProofElectra) ToConsensus() (*eth.AggregateAttestationAndProofElectra, error) {
aggIndex, err := strconv.ParseUint(a.AggregatorIndex, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "AggregatorIndex")
}
if a.Aggregate == nil {
return nil, server.NewDecodeError(errNilValue, "Aggregate")
}
agg, err := a.Aggregate.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Aggregate")
}
proof, err := bytesutil.DecodeHexWithLength(a.SelectionProof, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "SelectionProof")
}
return &eth.AggregateAttestationAndProofElectra{
AggregatorIndex: primitives.ValidatorIndex(aggIndex),
Aggregate: agg,
SelectionProof: proof,
}, nil
}
func (a *Attestation) ToConsensus() (*eth.Attestation, error) {
aggBits, err := hexutil.Decode(a.AggregationBits)
if err != nil {
return nil, server.NewDecodeError(err, "AggregationBits")
}
if a.Data == nil {
return nil, server.NewDecodeError(errNilValue, "Data")
}
data, err := a.Data.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Data")
@@ -437,82 +370,6 @@ func AttFromConsensus(a *eth.Attestation) *Attestation {
}
}
func (a *AttestationElectra) ToConsensus() (*eth.AttestationElectra, error) {
aggBits, err := hexutil.Decode(a.AggregationBits)
if err != nil {
return nil, server.NewDecodeError(err, "AggregationBits")
}
if a.Data == nil {
return nil, server.NewDecodeError(errNilValue, "Data")
}
data, err := a.Data.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Data")
}
sig, err := bytesutil.DecodeHexWithLength(a.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
committeeBits, err := hexutil.Decode(a.CommitteeBits)
if err != nil {
return nil, server.NewDecodeError(err, "CommitteeBits")
}
return &eth.AttestationElectra{
AggregationBits: aggBits,
Data: data,
Signature: sig,
CommitteeBits: committeeBits,
}, nil
}
func SingleAttFromConsensus(a *eth.SingleAttestation) *SingleAttestation {
return &SingleAttestation{
CommitteeIndex: fmt.Sprintf("%d", a.CommitteeId),
AttesterIndex: fmt.Sprintf("%d", a.AttesterIndex),
Data: AttDataFromConsensus(a.Data),
Signature: hexutil.Encode(a.Signature),
}
}
func (a *SingleAttestation) ToConsensus() (*eth.SingleAttestation, error) {
ci, err := strconv.ParseUint(a.CommitteeIndex, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "CommitteeIndex")
}
ai, err := strconv.ParseUint(a.AttesterIndex, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "AttesterIndex")
}
if a.Data == nil {
return nil, server.NewDecodeError(errNilValue, "Data")
}
data, err := a.Data.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Data")
}
sig, err := bytesutil.DecodeHexWithLength(a.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
return &eth.SingleAttestation{
CommitteeId: primitives.CommitteeIndex(ci),
AttesterIndex: primitives.ValidatorIndex(ai),
Data: data,
Signature: sig,
}, nil
}
func AttElectraFromConsensus(a *eth.AttestationElectra) *AttestationElectra {
return &AttestationElectra{
AggregationBits: hexutil.Encode(a.AggregationBits),
Data: AttDataFromConsensus(a.Data),
Signature: hexutil.Encode(a.Signature),
CommitteeBits: hexutil.Encode(a.CommitteeBits),
}
}
func (a *AttestationData) ToConsensus() (*eth.AttestationData, error) {
slot, err := strconv.ParseUint(a.Slot, 10, 64)
if err != nil {
@@ -526,16 +383,10 @@ func (a *AttestationData) ToConsensus() (*eth.AttestationData, error) {
if err != nil {
return nil, server.NewDecodeError(err, "BeaconBlockRoot")
}
if a.Source == nil {
return nil, server.NewDecodeError(errNilValue, "Source")
}
source, err := a.Source.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Source")
}
if a.Target == nil {
return nil, server.NewDecodeError(errNilValue, "Target")
}
target, err := a.Target.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Target")
@@ -635,17 +486,15 @@ func (b *BeaconCommitteeSubscription) ToConsensus() (*validator.BeaconCommitteeS
}
func (e *SignedVoluntaryExit) ToConsensus() (*eth.SignedVoluntaryExit, error) {
if e.Message == nil {
return nil, server.NewDecodeError(errNilValue, "Message")
sig, err := bytesutil.DecodeHexWithLength(e.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
exit, err := e.Message.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Message")
}
sig, err := bytesutil.DecodeHexWithLength(e.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
return &eth.SignedVoluntaryExit{
Exit: exit,
Signature: sig,
@@ -748,16 +597,10 @@ func Eth1DataFromConsensus(e1d *eth.Eth1Data) *Eth1Data {
}
func (s *ProposerSlashing) ToConsensus() (*eth.ProposerSlashing, error) {
if s.SignedHeader1 == nil {
return nil, server.NewDecodeError(errNilValue, "SignedHeader1")
}
h1, err := s.SignedHeader1.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "SignedHeader1")
}
if s.SignedHeader2 == nil {
return nil, server.NewDecodeError(errNilValue, "SignedHeader2")
}
h2, err := s.SignedHeader2.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "SignedHeader2")
@@ -770,16 +613,10 @@ func (s *ProposerSlashing) ToConsensus() (*eth.ProposerSlashing, error) {
}
func (s *AttesterSlashing) ToConsensus() (*eth.AttesterSlashing, error) {
if s.Attestation1 == nil {
return nil, server.NewDecodeError(errNilValue, "Attestation1")
}
att1, err := s.Attestation1.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Attestation1")
}
if s.Attestation2 == nil {
return nil, server.NewDecodeError(errNilValue, "Attestation2")
}
att2, err := s.Attestation2.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Attestation2")
@@ -787,29 +624,7 @@ func (s *AttesterSlashing) ToConsensus() (*eth.AttesterSlashing, error) {
return &eth.AttesterSlashing{Attestation_1: att1, Attestation_2: att2}, nil
}
func (s *AttesterSlashingElectra) ToConsensus() (*eth.AttesterSlashingElectra, error) {
if s.Attestation1 == nil {
return nil, server.NewDecodeError(errNilValue, "Attestation1")
}
att1, err := s.Attestation1.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Attestation1")
}
if s.Attestation2 == nil {
return nil, server.NewDecodeError(errNilValue, "Attestation2")
}
att2, err := s.Attestation2.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Attestation2")
}
return &eth.AttesterSlashingElectra{Attestation_1: att1, Attestation_2: att2}, nil
}
func (a *IndexedAttestation) ToConsensus() (*eth.IndexedAttestation, error) {
if err := slice.VerifyMaxLength(a.AttestingIndices, params.BeaconConfig().MaxValidatorsPerCommittee); err != nil {
return nil, err
}
indices := make([]uint64, len(a.AttestingIndices))
var err error
for i, ix := range a.AttestingIndices {
@@ -818,9 +633,6 @@ func (a *IndexedAttestation) ToConsensus() (*eth.IndexedAttestation, error) {
return nil, server.NewDecodeError(err, fmt.Sprintf("AttestingIndices[%d]", i))
}
}
if a.Data == nil {
return nil, server.NewDecodeError(errNilValue, "Data")
}
data, err := a.Data.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Data")
@@ -837,41 +649,6 @@ func (a *IndexedAttestation) ToConsensus() (*eth.IndexedAttestation, error) {
}, nil
}
func (a *IndexedAttestationElectra) ToConsensus() (*eth.IndexedAttestationElectra, error) {
if err := slice.VerifyMaxLength(
a.AttestingIndices,
params.BeaconConfig().MaxValidatorsPerCommittee*params.BeaconConfig().MaxCommitteesPerSlot,
); err != nil {
return nil, err
}
indices := make([]uint64, len(a.AttestingIndices))
var err error
for i, ix := range a.AttestingIndices {
indices[i], err = strconv.ParseUint(ix, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("AttestingIndices[%d]", i))
}
}
if a.Data == nil {
return nil, server.NewDecodeError(errNilValue, "Data")
}
data, err := a.Data.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Data")
}
sig, err := bytesutil.DecodeHexWithLength(a.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
return &eth.IndexedAttestationElectra{
AttestingIndices: indices,
Data: data,
Signature: sig,
}, nil
}
func WithdrawalsFromConsensus(ws []*enginev1.Withdrawal) []*Withdrawal {
result := make([]*Withdrawal, len(ws))
for i, w := range ws {
@@ -889,133 +666,13 @@ func WithdrawalFromConsensus(w *enginev1.Withdrawal) *Withdrawal {
}
}
func WithdrawalRequestsFromConsensus(ws []*enginev1.WithdrawalRequest) []*WithdrawalRequest {
result := make([]*WithdrawalRequest, len(ws))
for i, w := range ws {
result[i] = WithdrawalRequestFromConsensus(w)
}
return result
}
func WithdrawalRequestFromConsensus(w *enginev1.WithdrawalRequest) *WithdrawalRequest {
return &WithdrawalRequest{
SourceAddress: hexutil.Encode(w.SourceAddress),
ValidatorPubkey: hexutil.Encode(w.ValidatorPubkey),
Amount: fmt.Sprintf("%d", w.Amount),
}
}
func (w *WithdrawalRequest) ToConsensus() (*enginev1.WithdrawalRequest, error) {
src, err := bytesutil.DecodeHexWithLength(w.SourceAddress, common.AddressLength)
if err != nil {
return nil, server.NewDecodeError(err, "SourceAddress")
}
pubkey, err := bytesutil.DecodeHexWithLength(w.ValidatorPubkey, fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, "ValidatorPubkey")
}
amount, err := strconv.ParseUint(w.Amount, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "Amount")
}
return &enginev1.WithdrawalRequest{
SourceAddress: src,
ValidatorPubkey: pubkey,
Amount: amount,
}, nil
}
func ConsolidationRequestsFromConsensus(cs []*enginev1.ConsolidationRequest) []*ConsolidationRequest {
result := make([]*ConsolidationRequest, len(cs))
for i, c := range cs {
result[i] = ConsolidationRequestFromConsensus(c)
}
return result
}
func ConsolidationRequestFromConsensus(c *enginev1.ConsolidationRequest) *ConsolidationRequest {
return &ConsolidationRequest{
SourceAddress: hexutil.Encode(c.SourceAddress),
SourcePubkey: hexutil.Encode(c.SourcePubkey),
TargetPubkey: hexutil.Encode(c.TargetPubkey),
}
}
func (c *ConsolidationRequest) ToConsensus() (*enginev1.ConsolidationRequest, error) {
srcAddress, err := bytesutil.DecodeHexWithLength(c.SourceAddress, common.AddressLength)
if err != nil {
return nil, server.NewDecodeError(err, "SourceAddress")
}
srcPubkey, err := bytesutil.DecodeHexWithLength(c.SourcePubkey, fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, "SourcePubkey")
}
targetPubkey, err := bytesutil.DecodeHexWithLength(c.TargetPubkey, fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, "TargetPubkey")
}
return &enginev1.ConsolidationRequest{
SourceAddress: srcAddress,
SourcePubkey: srcPubkey,
TargetPubkey: targetPubkey,
}, nil
}
func DepositRequestsFromConsensus(ds []*enginev1.DepositRequest) []*DepositRequest {
result := make([]*DepositRequest, len(ds))
for i, d := range ds {
result[i] = DepositRequestFromConsensus(d)
}
return result
}
func DepositRequestFromConsensus(d *enginev1.DepositRequest) *DepositRequest {
return &DepositRequest{
Pubkey: hexutil.Encode(d.Pubkey),
WithdrawalCredentials: hexutil.Encode(d.WithdrawalCredentials),
Amount: fmt.Sprintf("%d", d.Amount),
Signature: hexutil.Encode(d.Signature),
Index: fmt.Sprintf("%d", d.Index),
}
}
func (d *DepositRequest) ToConsensus() (*enginev1.DepositRequest, error) {
pubkey, err := bytesutil.DecodeHexWithLength(d.Pubkey, fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, "Pubkey")
}
withdrawalCredentials, err := bytesutil.DecodeHexWithLength(d.WithdrawalCredentials, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "WithdrawalCredentials")
}
amount, err := strconv.ParseUint(d.Amount, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "Amount")
}
sig, err := bytesutil.DecodeHexWithLength(d.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
index, err := strconv.ParseUint(d.Index, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "Index")
}
return &enginev1.DepositRequest{
Pubkey: pubkey,
WithdrawalCredentials: withdrawalCredentials,
Amount: amount,
Signature: sig,
Index: index,
}, nil
}
func ProposerSlashingsToConsensus(src []*ProposerSlashing) ([]*eth.ProposerSlashing, error) {
if src == nil {
return nil, server.NewDecodeError(errNilValue, "ProposerSlashings")
return nil, errNilValue
}
err := slice.VerifyMaxLength(src, 16)
if err != nil {
return nil, server.NewDecodeError(err, "ProposerSlashings")
return nil, err
}
proposerSlashings := make([]*eth.ProposerSlashing, len(src))
for i, s := range src {
@@ -1144,11 +801,11 @@ func ProposerSlashingFromConsensus(src *eth.ProposerSlashing) *ProposerSlashing
func AttesterSlashingsToConsensus(src []*AttesterSlashing) ([]*eth.AttesterSlashing, error) {
if src == nil {
return nil, server.NewDecodeError(errNilValue, "AttesterSlashings")
return nil, errNilValue
}
err := slice.VerifyMaxLength(src, 2)
if err != nil {
return nil, server.NewDecodeError(err, "AttesterSlashings")
return nil, err
}
attesterSlashings := make([]*eth.AttesterSlashing, len(src))
@@ -1159,19 +816,10 @@ func AttesterSlashingsToConsensus(src []*AttesterSlashing) ([]*eth.AttesterSlash
if s.Attestation1 == nil {
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation1", i))
}
if s.Attestation1.Data == nil {
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation1.Data", i))
}
if s.Attestation2 == nil {
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation2", i))
}
if s.Attestation2.Data == nil {
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation2.Data", i))
}
a1Sig, err := bytesutil.DecodeHexWithLength(s.Attestation1.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.Signature", i))
@@ -1188,7 +836,6 @@ func AttesterSlashingsToConsensus(src []*AttesterSlashing) ([]*eth.AttesterSlash
}
a1AttestingIndices[j] = attestingIndex
}
a1Data, err := s.Attestation1.Data.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.Data", i))
@@ -1284,162 +931,17 @@ func AttesterSlashingFromConsensus(src *eth.AttesterSlashing) *AttesterSlashing
}
}
func AttesterSlashingsElectraToConsensus(src []*AttesterSlashingElectra) ([]*eth.AttesterSlashingElectra, error) {
if src == nil {
return nil, server.NewDecodeError(errNilValue, "AttesterSlashingsElectra")
}
err := slice.VerifyMaxLength(src, fieldparams.MaxAttesterSlashingsElectra)
if err != nil {
return nil, server.NewDecodeError(err, "AttesterSlashingsElectra")
}
attesterSlashings := make([]*eth.AttesterSlashingElectra, len(src))
for i, s := range src {
if s == nil {
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d]", i))
}
if s.Attestation1 == nil {
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation1", i))
}
if s.Attestation1.Data == nil {
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation1.Data", i))
}
if s.Attestation2 == nil {
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation2", i))
}
if s.Attestation2.Data == nil {
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation2.Data", i))
}
a1Sig, err := bytesutil.DecodeHexWithLength(s.Attestation1.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.Signature", i))
}
err = slice.VerifyMaxLength(s.Attestation1.AttestingIndices, params.BeaconConfig().MaxValidatorsPerCommittee*params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.AttestingIndices", i))
}
a1AttestingIndices := make([]uint64, len(s.Attestation1.AttestingIndices))
for j, ix := range s.Attestation1.AttestingIndices {
attestingIndex, err := strconv.ParseUint(ix, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.AttestingIndices[%d]", i, j))
}
a1AttestingIndices[j] = attestingIndex
}
a1Data, err := s.Attestation1.Data.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.Data", i))
}
a2Sig, err := bytesutil.DecodeHexWithLength(s.Attestation2.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.Signature", i))
}
err = slice.VerifyMaxLength(s.Attestation2.AttestingIndices, params.BeaconConfig().MaxValidatorsPerCommittee*params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.AttestingIndices", i))
}
a2AttestingIndices := make([]uint64, len(s.Attestation2.AttestingIndices))
for j, ix := range s.Attestation2.AttestingIndices {
attestingIndex, err := strconv.ParseUint(ix, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.AttestingIndices[%d]", i, j))
}
a2AttestingIndices[j] = attestingIndex
}
a2Data, err := s.Attestation2.Data.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.Data", i))
}
attesterSlashings[i] = &eth.AttesterSlashingElectra{
Attestation_1: &eth.IndexedAttestationElectra{
AttestingIndices: a1AttestingIndices,
Data: a1Data,
Signature: a1Sig,
},
Attestation_2: &eth.IndexedAttestationElectra{
AttestingIndices: a2AttestingIndices,
Data: a2Data,
Signature: a2Sig,
},
}
}
return attesterSlashings, nil
}
func AttesterSlashingsElectraFromConsensus(src []*eth.AttesterSlashingElectra) []*AttesterSlashingElectra {
attesterSlashings := make([]*AttesterSlashingElectra, len(src))
for i, s := range src {
attesterSlashings[i] = AttesterSlashingElectraFromConsensus(s)
}
return attesterSlashings
}
func AttesterSlashingElectraFromConsensus(src *eth.AttesterSlashingElectra) *AttesterSlashingElectra {
a1AttestingIndices := make([]string, len(src.Attestation_1.AttestingIndices))
for j, ix := range src.Attestation_1.AttestingIndices {
a1AttestingIndices[j] = fmt.Sprintf("%d", ix)
}
a2AttestingIndices := make([]string, len(src.Attestation_2.AttestingIndices))
for j, ix := range src.Attestation_2.AttestingIndices {
a2AttestingIndices[j] = fmt.Sprintf("%d", ix)
}
return &AttesterSlashingElectra{
Attestation1: &IndexedAttestationElectra{
AttestingIndices: a1AttestingIndices,
Data: &AttestationData{
Slot: fmt.Sprintf("%d", src.Attestation_1.Data.Slot),
CommitteeIndex: fmt.Sprintf("%d", src.Attestation_1.Data.CommitteeIndex),
BeaconBlockRoot: hexutil.Encode(src.Attestation_1.Data.BeaconBlockRoot),
Source: &Checkpoint{
Epoch: fmt.Sprintf("%d", src.Attestation_1.Data.Source.Epoch),
Root: hexutil.Encode(src.Attestation_1.Data.Source.Root),
},
Target: &Checkpoint{
Epoch: fmt.Sprintf("%d", src.Attestation_1.Data.Target.Epoch),
Root: hexutil.Encode(src.Attestation_1.Data.Target.Root),
},
},
Signature: hexutil.Encode(src.Attestation_1.Signature),
},
Attestation2: &IndexedAttestationElectra{
AttestingIndices: a2AttestingIndices,
Data: &AttestationData{
Slot: fmt.Sprintf("%d", src.Attestation_2.Data.Slot),
CommitteeIndex: fmt.Sprintf("%d", src.Attestation_2.Data.CommitteeIndex),
BeaconBlockRoot: hexutil.Encode(src.Attestation_2.Data.BeaconBlockRoot),
Source: &Checkpoint{
Epoch: fmt.Sprintf("%d", src.Attestation_2.Data.Source.Epoch),
Root: hexutil.Encode(src.Attestation_2.Data.Source.Root),
},
Target: &Checkpoint{
Epoch: fmt.Sprintf("%d", src.Attestation_2.Data.Target.Epoch),
Root: hexutil.Encode(src.Attestation_2.Data.Target.Root),
},
},
Signature: hexutil.Encode(src.Attestation_2.Signature),
},
}
}
func AttsToConsensus(src []*Attestation) ([]*eth.Attestation, error) {
if src == nil {
return nil, server.NewDecodeError(errNilValue, "Attestations")
return nil, errNilValue
}
err := slice.VerifyMaxLength(src, 128)
if err != nil {
return nil, server.NewDecodeError(err, "Attestations")
return nil, err
}
atts := make([]*eth.Attestation, len(src))
for i, a := range src {
if a == nil {
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d]", i))
}
atts[i], err = a.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d]", i))
@@ -1456,43 +958,13 @@ func AttsFromConsensus(src []*eth.Attestation) []*Attestation {
return atts
}
func AttsElectraToConsensus(src []*AttestationElectra) ([]*eth.AttestationElectra, error) {
if src == nil {
return nil, server.NewDecodeError(errNilValue, "AttestationsElectra")
}
err := slice.VerifyMaxLength(src, 8)
if err != nil {
return nil, server.NewDecodeError(err, "AttestationsElectra")
}
atts := make([]*eth.AttestationElectra, len(src))
for i, a := range src {
if a == nil {
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d]", i))
}
atts[i], err = a.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d]", i))
}
}
return atts, nil
}
func AttsElectraFromConsensus(src []*eth.AttestationElectra) []*AttestationElectra {
atts := make([]*AttestationElectra, len(src))
for i, a := range src {
atts[i] = AttElectraFromConsensus(a)
}
return atts
}
func DepositsToConsensus(src []*Deposit) ([]*eth.Deposit, error) {
if src == nil {
return nil, server.NewDecodeError(errNilValue, "Deposits")
return nil, errNilValue
}
err := slice.VerifyMaxLength(src, 16)
if err != nil {
return nil, server.NewDecodeError(err, "Deposits")
return nil, err
}
deposits := make([]*eth.Deposit, len(src))
@@ -1564,18 +1036,15 @@ func DepositsFromConsensus(src []*eth.Deposit) []*Deposit {
func SignedExitsToConsensus(src []*SignedVoluntaryExit) ([]*eth.SignedVoluntaryExit, error) {
if src == nil {
return nil, server.NewDecodeError(errNilValue, "SignedVoluntaryExits")
return nil, errNilValue
}
err := slice.VerifyMaxLength(src, 16)
if err != nil {
return nil, server.NewDecodeError(err, "SignedVoluntaryExits")
return nil, err
}
exits := make([]*eth.SignedVoluntaryExit, len(src))
for i, e := range src {
if e == nil {
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d]", i))
}
exits[i], err = e.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d]", i))
@@ -1619,81 +1088,3 @@ func DepositSnapshotFromConsensus(ds *eth.DepositSnapshot) *DepositSnapshot {
ExecutionBlockHeight: fmt.Sprintf("%d", ds.ExecutionDepth),
}
}
func PendingDepositsFromConsensus(ds []*eth.PendingDeposit) []*PendingDeposit {
deposits := make([]*PendingDeposit, len(ds))
for i, d := range ds {
deposits[i] = &PendingDeposit{
Pubkey: hexutil.Encode(d.PublicKey),
WithdrawalCredentials: hexutil.Encode(d.WithdrawalCredentials),
Amount: fmt.Sprintf("%d", d.Amount),
Signature: hexutil.Encode(d.Signature),
Slot: fmt.Sprintf("%d", d.Slot),
}
}
return deposits
}
func PendingPartialWithdrawalsFromConsensus(ws []*eth.PendingPartialWithdrawal) []*PendingPartialWithdrawal {
withdrawals := make([]*PendingPartialWithdrawal, len(ws))
for i, w := range ws {
withdrawals[i] = &PendingPartialWithdrawal{
Index: fmt.Sprintf("%d", w.Index),
Amount: fmt.Sprintf("%d", w.Amount),
WithdrawableEpoch: fmt.Sprintf("%d", w.WithdrawableEpoch),
}
}
return withdrawals
}
func PendingConsolidationsFromConsensus(cs []*eth.PendingConsolidation) []*PendingConsolidation {
consolidations := make([]*PendingConsolidation, len(cs))
for i, c := range cs {
consolidations[i] = &PendingConsolidation{
SourceIndex: fmt.Sprintf("%d", c.SourceIndex),
TargetIndex: fmt.Sprintf("%d", c.TargetIndex),
}
}
return consolidations
}
func HeadEventFromV1(event *ethv1.EventHead) *HeadEvent {
return &HeadEvent{
Slot: fmt.Sprintf("%d", event.Slot),
Block: hexutil.Encode(event.Block),
State: hexutil.Encode(event.State),
EpochTransition: event.EpochTransition,
ExecutionOptimistic: event.ExecutionOptimistic,
PreviousDutyDependentRoot: hexutil.Encode(event.PreviousDutyDependentRoot),
CurrentDutyDependentRoot: hexutil.Encode(event.CurrentDutyDependentRoot),
}
}
func FinalizedCheckpointEventFromV1(event *ethv1.EventFinalizedCheckpoint) *FinalizedCheckpointEvent {
return &FinalizedCheckpointEvent{
Block: hexutil.Encode(event.Block),
State: hexutil.Encode(event.State),
Epoch: fmt.Sprintf("%d", event.Epoch),
ExecutionOptimistic: event.ExecutionOptimistic,
}
}
func EventChainReorgFromV1(event *ethv1.EventChainReorg) *ChainReorgEvent {
return &ChainReorgEvent{
Slot: fmt.Sprintf("%d", event.Slot),
Depth: fmt.Sprintf("%d", event.Depth),
OldHeadBlock: hexutil.Encode(event.OldHeadBlock),
NewHeadBlock: hexutil.Encode(event.NewHeadBlock),
OldHeadState: hexutil.Encode(event.OldHeadState),
NewHeadState: hexutil.Encode(event.NewHeadState),
Epoch: fmt.Sprintf("%d", event.Epoch),
ExecutionOptimistic: event.ExecutionOptimistic,
}
}
func SyncAggregateFromConsensus(sa *eth.SyncAggregate) *SyncAggregate {
return &SyncAggregate{
SyncCommitteeBits: hexutil.Encode(sa.SyncCommitteeBits),
SyncCommitteeSignature: hexutil.Encode(sa.SyncCommitteeSignature),
}
}

View File

@@ -1,61 +0,0 @@
package structs
import (
"strconv"
"github.com/prysmaticlabs/prysm/v5/api/server"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
func (sc *Sidecar) ToConsensus() (*eth.BlobSidecar, error) {
if sc == nil {
return nil, errNilValue
}
index, err := strconv.ParseUint(sc.Index, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "Index")
}
blob, err := bytesutil.DecodeHexWithLength(sc.Blob, 131072)
if err != nil {
return nil, server.NewDecodeError(err, "Blob")
}
kzgCommitment, err := bytesutil.DecodeHexWithLength(sc.KzgCommitment, 48)
if err != nil {
return nil, server.NewDecodeError(err, "KzgCommitment")
}
kzgProof, err := bytesutil.DecodeHexWithLength(sc.KzgProof, 48)
if err != nil {
return nil, server.NewDecodeError(err, "KzgProof")
}
header, err := sc.SignedBeaconBlockHeader.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "SignedBeaconBlockHeader")
}
// decode the commitment inclusion proof
var commitmentInclusionProof [][]byte
for _, proof := range sc.CommitmentInclusionProof {
proofBytes, err := bytesutil.DecodeHexWithLength(proof, 32)
if err != nil {
return nil, server.NewDecodeError(err, "CommitmentInclusionProof")
}
commitmentInclusionProof = append(commitmentInclusionProof, proofBytes)
}
bsc := &eth.BlobSidecar{
Index: index,
Blob: blob,
KzgCommitment: kzgCommitment,
KzgProof: kzgProof,
SignedBlockHeader: header,
CommitmentInclusionProof: commitmentInclusionProof,
}
return bsc, nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,229 +0,0 @@
package structs
import (
"encoding/json"
"fmt"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
)
func LightClientUpdateFromConsensus(update interfaces.LightClientUpdate) (*LightClientUpdate, error) {
attestedHeader, err := lightClientHeaderToJSON(update.AttestedHeader())
if err != nil {
return nil, errors.Wrap(err, "could not marshal attested light client header")
}
finalizedHeader, err := lightClientHeaderToJSON(update.FinalizedHeader())
if err != nil {
return nil, errors.Wrap(err, "could not marshal finalized light client header")
}
var scBranch [][32]byte
var finalityBranch [][32]byte
if update.Version() >= version.Electra {
scb, err := update.NextSyncCommitteeBranchElectra()
if err != nil {
return nil, err
}
scBranch = scb[:]
fb, err := update.FinalityBranchElectra()
if err != nil {
return nil, err
}
finalityBranch = fb[:]
} else {
scb, err := update.NextSyncCommitteeBranch()
if err != nil {
return nil, err
}
scBranch = scb[:]
fb, err := update.FinalityBranch()
if err != nil {
return nil, err
}
finalityBranch = fb[:]
}
return &LightClientUpdate{
AttestedHeader: attestedHeader,
NextSyncCommittee: SyncCommitteeFromConsensus(update.NextSyncCommittee()),
NextSyncCommitteeBranch: branchToJSON(scBranch),
FinalizedHeader: finalizedHeader,
FinalityBranch: branchToJSON(finalityBranch),
SyncAggregate: SyncAggregateFromConsensus(update.SyncAggregate()),
SignatureSlot: fmt.Sprintf("%d", update.SignatureSlot()),
}, nil
}
func LightClientFinalityUpdateFromConsensus(update interfaces.LightClientFinalityUpdate) (*LightClientFinalityUpdate, error) {
attestedHeader, err := lightClientHeaderToJSON(update.AttestedHeader())
if err != nil {
return nil, errors.Wrap(err, "could not marshal attested light client header")
}
finalizedHeader, err := lightClientHeaderToJSON(update.FinalizedHeader())
if err != nil {
return nil, errors.Wrap(err, "could not marshal finalized light client header")
}
var finalityBranch [][32]byte
if update.Version() >= version.Electra {
b, err := update.FinalityBranchElectra()
if err != nil {
return nil, err
}
finalityBranch = b[:]
} else {
b, err := update.FinalityBranch()
if err != nil {
return nil, err
}
finalityBranch = b[:]
}
return &LightClientFinalityUpdate{
AttestedHeader: attestedHeader,
FinalizedHeader: finalizedHeader,
FinalityBranch: branchToJSON(finalityBranch),
SyncAggregate: SyncAggregateFromConsensus(update.SyncAggregate()),
SignatureSlot: fmt.Sprintf("%d", update.SignatureSlot()),
}, nil
}
func LightClientOptimisticUpdateFromConsensus(update interfaces.LightClientOptimisticUpdate) (*LightClientOptimisticUpdate, error) {
attestedHeader, err := lightClientHeaderToJSON(update.AttestedHeader())
if err != nil {
return nil, errors.Wrap(err, "could not marshal attested light client header")
}
return &LightClientOptimisticUpdate{
AttestedHeader: attestedHeader,
SyncAggregate: SyncAggregateFromConsensus(update.SyncAggregate()),
SignatureSlot: fmt.Sprintf("%d", update.SignatureSlot()),
}, nil
}
func branchToJSON[S [][32]byte](branchBytes S) []string {
if branchBytes == nil {
return nil
}
branch := make([]string, len(branchBytes))
for i, root := range branchBytes {
branch[i] = hexutil.Encode(root[:])
}
return branch
}
func lightClientHeaderToJSON(header interfaces.LightClientHeader) (json.RawMessage, error) {
// In the case that a finalizedHeader is nil.
if header == nil {
return nil, nil
}
var result any
switch v := header.Version(); v {
case version.Altair:
result = &LightClientHeader{Beacon: BeaconBlockHeaderFromConsensus(header.Beacon())}
case version.Capella:
exInterface, err := header.Execution()
if err != nil {
return nil, err
}
ex, ok := exInterface.Proto().(*enginev1.ExecutionPayloadHeaderCapella)
if !ok {
return nil, fmt.Errorf("execution data is not %T", &enginev1.ExecutionPayloadHeaderCapella{})
}
execution, err := ExecutionPayloadHeaderCapellaFromConsensus(ex)
if err != nil {
return nil, err
}
executionBranch, err := header.ExecutionBranch()
if err != nil {
return nil, err
}
result = &LightClientHeaderCapella{
Beacon: BeaconBlockHeaderFromConsensus(header.Beacon()),
Execution: execution,
ExecutionBranch: branchToJSON(executionBranch[:]),
}
case version.Deneb:
exInterface, err := header.Execution()
if err != nil {
return nil, err
}
ex, ok := exInterface.Proto().(*enginev1.ExecutionPayloadHeaderDeneb)
if !ok {
return nil, fmt.Errorf("execution data is not %T", &enginev1.ExecutionPayloadHeaderDeneb{})
}
execution, err := ExecutionPayloadHeaderDenebFromConsensus(ex)
if err != nil {
return nil, err
}
executionBranch, err := header.ExecutionBranch()
if err != nil {
return nil, err
}
result = &LightClientHeaderDeneb{
Beacon: BeaconBlockHeaderFromConsensus(header.Beacon()),
Execution: execution,
ExecutionBranch: branchToJSON(executionBranch[:]),
}
case version.Electra:
exInterface, err := header.Execution()
if err != nil {
return nil, err
}
ex, ok := exInterface.Proto().(*enginev1.ExecutionPayloadHeaderDeneb)
if !ok {
return nil, fmt.Errorf("execution data is not %T", &enginev1.ExecutionPayloadHeaderDeneb{})
}
execution, err := ExecutionPayloadHeaderElectraFromConsensus(ex)
if err != nil {
return nil, err
}
executionBranch, err := header.ExecutionBranch()
if err != nil {
return nil, err
}
result = &LightClientHeaderDeneb{
Beacon: BeaconBlockHeaderFromConsensus(header.Beacon()),
Execution: execution,
ExecutionBranch: branchToJSON(executionBranch[:]),
}
default:
return nil, fmt.Errorf("unsupported header version %s", version.String(v))
}
return json.Marshal(result)
}
func LightClientBootstrapFromConsensus(bootstrap interfaces.LightClientBootstrap) (*LightClientBootstrap, error) {
header, err := lightClientHeaderToJSON(bootstrap.Header())
if err != nil {
return nil, errors.Wrap(err, "could not marshal light client header")
}
var scBranch [][32]byte
if bootstrap.Version() >= version.Electra {
b, err := bootstrap.CurrentSyncCommitteeBranchElectra()
if err != nil {
return nil, err
}
scBranch = b[:]
} else {
b, err := bootstrap.CurrentSyncCommitteeBranch()
if err != nil {
return nil, err
}
scBranch = b[:]
}
return &LightClientBootstrap{
Header: header,
CurrentSyncCommittee: SyncCommitteeFromConsensus(bootstrap.CurrentSyncCommittee()),
CurrentSyncCommitteeBranch: branchToJSON(scBranch),
}, nil
}

View File

@@ -11,10 +11,6 @@ import (
var errPayloadHeaderNotFound = errors.New("expected payload header not found")
// ----------------------------------------------------------------------------
// Phase 0
// ----------------------------------------------------------------------------
func BeaconStateFromConsensus(st beaconState.BeaconState) (*BeaconState, error) {
srcBr := st.BlockRoots()
br := make([]string, len(srcBr))
@@ -101,10 +97,6 @@ func BeaconStateFromConsensus(st beaconState.BeaconState) (*BeaconState, error)
}, nil
}
// ----------------------------------------------------------------------------
// Altair
// ----------------------------------------------------------------------------
func BeaconStateAltairFromConsensus(st beaconState.BeaconState) (*BeaconStateAltair, error) {
srcBr := st.BlockRoots()
br := make([]string, len(srcBr))
@@ -210,10 +202,6 @@ func BeaconStateAltairFromConsensus(st beaconState.BeaconState) (*BeaconStateAlt
}, nil
}
// ----------------------------------------------------------------------------
// Bellatrix
// ----------------------------------------------------------------------------
func BeaconStateBellatrixFromConsensus(st beaconState.BeaconState) (*BeaconStateBellatrix, error) {
srcBr := st.BlockRoots()
br := make([]string, len(srcBr))
@@ -332,10 +320,6 @@ func BeaconStateBellatrixFromConsensus(st beaconState.BeaconState) (*BeaconState
}, nil
}
// ----------------------------------------------------------------------------
// Capella
// ----------------------------------------------------------------------------
func BeaconStateCapellaFromConsensus(st beaconState.BeaconState) (*BeaconStateCapella, error) {
srcBr := st.BlockRoots()
br := make([]string, len(srcBr))
@@ -473,10 +457,6 @@ func BeaconStateCapellaFromConsensus(st beaconState.BeaconState) (*BeaconStateCa
}, nil
}
// ----------------------------------------------------------------------------
// Deneb
// ----------------------------------------------------------------------------
func BeaconStateDenebFromConsensus(st beaconState.BeaconState) (*BeaconStateDeneb, error) {
srcBr := st.BlockRoots()
br := make([]string, len(srcBr))
@@ -613,375 +593,3 @@ func BeaconStateDenebFromConsensus(st beaconState.BeaconState) (*BeaconStateDene
HistoricalSummaries: hs,
}, nil
}
// ----------------------------------------------------------------------------
// Electra
// ----------------------------------------------------------------------------
func BeaconStateElectraFromConsensus(st beaconState.BeaconState) (*BeaconStateElectra, error) {
srcBr := st.BlockRoots()
br := make([]string, len(srcBr))
for i, r := range srcBr {
br[i] = hexutil.Encode(r)
}
srcSr := st.StateRoots()
sr := make([]string, len(srcSr))
for i, r := range srcSr {
sr[i] = hexutil.Encode(r)
}
srcHr, err := st.HistoricalRoots()
if err != nil {
return nil, err
}
hr := make([]string, len(srcHr))
for i, r := range srcHr {
hr[i] = hexutil.Encode(r)
}
srcVotes := st.Eth1DataVotes()
votes := make([]*Eth1Data, len(srcVotes))
for i, e := range srcVotes {
votes[i] = Eth1DataFromConsensus(e)
}
srcVals := st.Validators()
vals := make([]*Validator, len(srcVals))
for i, v := range srcVals {
vals[i] = ValidatorFromConsensus(v)
}
srcBals := st.Balances()
bals := make([]string, len(srcBals))
for i, b := range srcBals {
bals[i] = fmt.Sprintf("%d", b)
}
srcRm := st.RandaoMixes()
rm := make([]string, len(srcRm))
for i, m := range srcRm {
rm[i] = hexutil.Encode(m)
}
srcSlashings := st.Slashings()
slashings := make([]string, len(srcSlashings))
for i, s := range srcSlashings {
slashings[i] = fmt.Sprintf("%d", s)
}
srcPrevPart, err := st.PreviousEpochParticipation()
if err != nil {
return nil, err
}
prevPart := make([]string, len(srcPrevPart))
for i, p := range srcPrevPart {
prevPart[i] = fmt.Sprintf("%d", p)
}
srcCurrPart, err := st.CurrentEpochParticipation()
if err != nil {
return nil, err
}
currPart := make([]string, len(srcCurrPart))
for i, p := range srcCurrPart {
currPart[i] = fmt.Sprintf("%d", p)
}
srcIs, err := st.InactivityScores()
if err != nil {
return nil, err
}
is := make([]string, len(srcIs))
for i, s := range srcIs {
is[i] = fmt.Sprintf("%d", s)
}
currSc, err := st.CurrentSyncCommittee()
if err != nil {
return nil, err
}
nextSc, err := st.NextSyncCommittee()
if err != nil {
return nil, err
}
execData, err := st.LatestExecutionPayloadHeader()
if err != nil {
return nil, err
}
srcPayload, ok := execData.Proto().(*enginev1.ExecutionPayloadHeaderDeneb)
if !ok {
return nil, errPayloadHeaderNotFound
}
payload, err := ExecutionPayloadHeaderElectraFromConsensus(srcPayload)
if err != nil {
return nil, err
}
srcHs, err := st.HistoricalSummaries()
if err != nil {
return nil, err
}
hs := make([]*HistoricalSummary, len(srcHs))
for i, s := range srcHs {
hs[i] = HistoricalSummaryFromConsensus(s)
}
nwi, err := st.NextWithdrawalIndex()
if err != nil {
return nil, err
}
nwvi, err := st.NextWithdrawalValidatorIndex()
if err != nil {
return nil, err
}
drsi, err := st.DepositRequestsStartIndex()
if err != nil {
return nil, err
}
dbtc, err := st.DepositBalanceToConsume()
if err != nil {
return nil, err
}
ebtc, err := st.ExitBalanceToConsume()
if err != nil {
return nil, err
}
eee, err := st.EarliestExitEpoch()
if err != nil {
return nil, err
}
cbtc, err := st.ConsolidationBalanceToConsume()
if err != nil {
return nil, err
}
ece, err := st.EarliestConsolidationEpoch()
if err != nil {
return nil, err
}
pbd, err := st.PendingDeposits()
if err != nil {
return nil, err
}
ppw, err := st.PendingPartialWithdrawals()
if err != nil {
return nil, err
}
pc, err := st.PendingConsolidations()
if err != nil {
return nil, err
}
return &BeaconStateElectra{
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
Slot: fmt.Sprintf("%d", st.Slot()),
Fork: ForkFromConsensus(st.Fork()),
LatestBlockHeader: BeaconBlockHeaderFromConsensus(st.LatestBlockHeader()),
BlockRoots: br,
StateRoots: sr,
HistoricalRoots: hr,
Eth1Data: Eth1DataFromConsensus(st.Eth1Data()),
Eth1DataVotes: votes,
Eth1DepositIndex: fmt.Sprintf("%d", st.Eth1DepositIndex()),
Validators: vals,
Balances: bals,
RandaoMixes: rm,
Slashings: slashings,
PreviousEpochParticipation: prevPart,
CurrentEpochParticipation: currPart,
JustificationBits: hexutil.Encode(st.JustificationBits()),
PreviousJustifiedCheckpoint: CheckpointFromConsensus(st.PreviousJustifiedCheckpoint()),
CurrentJustifiedCheckpoint: CheckpointFromConsensus(st.CurrentJustifiedCheckpoint()),
FinalizedCheckpoint: CheckpointFromConsensus(st.FinalizedCheckpoint()),
InactivityScores: is,
CurrentSyncCommittee: SyncCommitteeFromConsensus(currSc),
NextSyncCommittee: SyncCommitteeFromConsensus(nextSc),
LatestExecutionPayloadHeader: payload,
NextWithdrawalIndex: fmt.Sprintf("%d", nwi),
NextWithdrawalValidatorIndex: fmt.Sprintf("%d", nwvi),
HistoricalSummaries: hs,
DepositRequestsStartIndex: fmt.Sprintf("%d", drsi),
DepositBalanceToConsume: fmt.Sprintf("%d", dbtc),
ExitBalanceToConsume: fmt.Sprintf("%d", ebtc),
EarliestExitEpoch: fmt.Sprintf("%d", eee),
ConsolidationBalanceToConsume: fmt.Sprintf("%d", cbtc),
EarliestConsolidationEpoch: fmt.Sprintf("%d", ece),
PendingDeposits: PendingDepositsFromConsensus(pbd),
PendingPartialWithdrawals: PendingPartialWithdrawalsFromConsensus(ppw),
PendingConsolidations: PendingConsolidationsFromConsensus(pc),
}, nil
}
// ----------------------------------------------------------------------------
// Fulu
// ----------------------------------------------------------------------------
func BeaconStateFuluFromConsensus(st beaconState.BeaconState) (*BeaconStateFulu, error) {
srcBr := st.BlockRoots()
br := make([]string, len(srcBr))
for i, r := range srcBr {
br[i] = hexutil.Encode(r)
}
srcSr := st.StateRoots()
sr := make([]string, len(srcSr))
for i, r := range srcSr {
sr[i] = hexutil.Encode(r)
}
srcHr, err := st.HistoricalRoots()
if err != nil {
return nil, err
}
hr := make([]string, len(srcHr))
for i, r := range srcHr {
hr[i] = hexutil.Encode(r)
}
srcVotes := st.Eth1DataVotes()
votes := make([]*Eth1Data, len(srcVotes))
for i, e := range srcVotes {
votes[i] = Eth1DataFromConsensus(e)
}
srcVals := st.Validators()
vals := make([]*Validator, len(srcVals))
for i, v := range srcVals {
vals[i] = ValidatorFromConsensus(v)
}
srcBals := st.Balances()
bals := make([]string, len(srcBals))
for i, b := range srcBals {
bals[i] = fmt.Sprintf("%d", b)
}
srcRm := st.RandaoMixes()
rm := make([]string, len(srcRm))
for i, m := range srcRm {
rm[i] = hexutil.Encode(m)
}
srcSlashings := st.Slashings()
slashings := make([]string, len(srcSlashings))
for i, s := range srcSlashings {
slashings[i] = fmt.Sprintf("%d", s)
}
srcPrevPart, err := st.PreviousEpochParticipation()
if err != nil {
return nil, err
}
prevPart := make([]string, len(srcPrevPart))
for i, p := range srcPrevPart {
prevPart[i] = fmt.Sprintf("%d", p)
}
srcCurrPart, err := st.CurrentEpochParticipation()
if err != nil {
return nil, err
}
currPart := make([]string, len(srcCurrPart))
for i, p := range srcCurrPart {
currPart[i] = fmt.Sprintf("%d", p)
}
srcIs, err := st.InactivityScores()
if err != nil {
return nil, err
}
is := make([]string, len(srcIs))
for i, s := range srcIs {
is[i] = fmt.Sprintf("%d", s)
}
currSc, err := st.CurrentSyncCommittee()
if err != nil {
return nil, err
}
nextSc, err := st.NextSyncCommittee()
if err != nil {
return nil, err
}
execData, err := st.LatestExecutionPayloadHeader()
if err != nil {
return nil, err
}
srcPayload, ok := execData.Proto().(*enginev1.ExecutionPayloadHeaderDeneb)
if !ok {
return nil, errPayloadHeaderNotFound
}
payload, err := ExecutionPayloadHeaderElectraFromConsensus(srcPayload)
if err != nil {
return nil, err
}
srcHs, err := st.HistoricalSummaries()
if err != nil {
return nil, err
}
hs := make([]*HistoricalSummary, len(srcHs))
for i, s := range srcHs {
hs[i] = HistoricalSummaryFromConsensus(s)
}
nwi, err := st.NextWithdrawalIndex()
if err != nil {
return nil, err
}
nwvi, err := st.NextWithdrawalValidatorIndex()
if err != nil {
return nil, err
}
drsi, err := st.DepositRequestsStartIndex()
if err != nil {
return nil, err
}
dbtc, err := st.DepositBalanceToConsume()
if err != nil {
return nil, err
}
ebtc, err := st.ExitBalanceToConsume()
if err != nil {
return nil, err
}
eee, err := st.EarliestExitEpoch()
if err != nil {
return nil, err
}
cbtc, err := st.ConsolidationBalanceToConsume()
if err != nil {
return nil, err
}
ece, err := st.EarliestConsolidationEpoch()
if err != nil {
return nil, err
}
pbd, err := st.PendingDeposits()
if err != nil {
return nil, err
}
ppw, err := st.PendingPartialWithdrawals()
if err != nil {
return nil, err
}
pc, err := st.PendingConsolidations()
if err != nil {
return nil, err
}
return &BeaconStateFulu{
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
Slot: fmt.Sprintf("%d", st.Slot()),
Fork: ForkFromConsensus(st.Fork()),
LatestBlockHeader: BeaconBlockHeaderFromConsensus(st.LatestBlockHeader()),
BlockRoots: br,
StateRoots: sr,
HistoricalRoots: hr,
Eth1Data: Eth1DataFromConsensus(st.Eth1Data()),
Eth1DataVotes: votes,
Eth1DepositIndex: fmt.Sprintf("%d", st.Eth1DepositIndex()),
Validators: vals,
Balances: bals,
RandaoMixes: rm,
Slashings: slashings,
PreviousEpochParticipation: prevPart,
CurrentEpochParticipation: currPart,
JustificationBits: hexutil.Encode(st.JustificationBits()),
PreviousJustifiedCheckpoint: CheckpointFromConsensus(st.PreviousJustifiedCheckpoint()),
CurrentJustifiedCheckpoint: CheckpointFromConsensus(st.CurrentJustifiedCheckpoint()),
FinalizedCheckpoint: CheckpointFromConsensus(st.FinalizedCheckpoint()),
InactivityScores: is,
CurrentSyncCommittee: SyncCommitteeFromConsensus(currSc),
NextSyncCommittee: SyncCommitteeFromConsensus(nextSc),
LatestExecutionPayloadHeader: payload,
NextWithdrawalIndex: fmt.Sprintf("%d", nwi),
NextWithdrawalValidatorIndex: fmt.Sprintf("%d", nwvi),
HistoricalSummaries: hs,
DepositRequestsStartIndex: fmt.Sprintf("%d", drsi),
DepositBalanceToConsume: fmt.Sprintf("%d", dbtc),
ExitBalanceToConsume: fmt.Sprintf("%d", ebtc),
EarliestExitEpoch: fmt.Sprintf("%d", eee),
ConsolidationBalanceToConsume: fmt.Sprintf("%d", cbtc),
EarliestConsolidationEpoch: fmt.Sprintf("%d", ece),
PendingDeposits: PendingDepositsFromConsensus(pbd),
PendingPartialWithdrawals: PendingPartialWithdrawalsFromConsensus(ppw),
PendingConsolidations: PendingConsolidationsFromConsensus(pc),
}, nil
}

View File

@@ -24,96 +24,3 @@ func TestDepositSnapshotFromConsensus(t *testing.T) {
require.Equal(t, "0x1234", res.ExecutionBlockHash)
require.Equal(t, "67890", res.ExecutionBlockHeight)
}
func TestSignedBLSToExecutionChange_ToConsensus(t *testing.T) {
s := &SignedBLSToExecutionChange{Message: nil, Signature: ""}
_, err := s.ToConsensus()
require.ErrorContains(t, errNilValue.Error(), err)
}
func TestSignedValidatorRegistration_ToConsensus(t *testing.T) {
s := &SignedValidatorRegistration{Message: nil, Signature: ""}
_, err := s.ToConsensus()
require.ErrorContains(t, errNilValue.Error(), err)
}
func TestSignedContributionAndProof_ToConsensus(t *testing.T) {
s := &SignedContributionAndProof{Message: nil, Signature: ""}
_, err := s.ToConsensus()
require.ErrorContains(t, errNilValue.Error(), err)
}
func TestContributionAndProof_ToConsensus(t *testing.T) {
c := &ContributionAndProof{
Contribution: nil,
AggregatorIndex: "invalid",
SelectionProof: "",
}
_, err := c.ToConsensus()
require.ErrorContains(t, errNilValue.Error(), err)
}
func TestSignedAggregateAttestationAndProof_ToConsensus(t *testing.T) {
s := &SignedAggregateAttestationAndProof{Message: nil, Signature: ""}
_, err := s.ToConsensus()
require.ErrorContains(t, errNilValue.Error(), err)
}
func TestAggregateAttestationAndProof_ToConsensus(t *testing.T) {
a := &AggregateAttestationAndProof{
AggregatorIndex: "1",
Aggregate: nil,
SelectionProof: "",
}
_, err := a.ToConsensus()
require.ErrorContains(t, errNilValue.Error(), err)
}
func TestAttestation_ToConsensus(t *testing.T) {
a := &Attestation{
AggregationBits: "0x10",
Data: nil,
Signature: "",
}
_, err := a.ToConsensus()
require.ErrorContains(t, errNilValue.Error(), err)
}
func TestSingleAttestation_ToConsensus(t *testing.T) {
s := &SingleAttestation{
CommitteeIndex: "1",
AttesterIndex: "1",
Data: nil,
Signature: "",
}
_, err := s.ToConsensus()
require.ErrorContains(t, errNilValue.Error(), err)
}
func TestSignedVoluntaryExit_ToConsensus(t *testing.T) {
s := &SignedVoluntaryExit{Message: nil, Signature: ""}
_, err := s.ToConsensus()
require.ErrorContains(t, errNilValue.Error(), err)
}
func TestProposerSlashing_ToConsensus(t *testing.T) {
p := &ProposerSlashing{SignedHeader1: nil, SignedHeader2: nil}
_, err := p.ToConsensus()
require.ErrorContains(t, errNilValue.Error(), err)
}
func TestAttesterSlashing_ToConsensus(t *testing.T) {
a := &AttesterSlashing{Attestation1: nil, Attestation2: nil}
_, err := a.ToConsensus()
require.ErrorContains(t, errNilValue.Error(), err)
}
func TestIndexedAttestation_ToConsensus(t *testing.T) {
a := &IndexedAttestation{
AttestingIndices: []string{"1"},
Data: nil,
Signature: "invalid",
}
_, err := a.ToConsensus()
require.ErrorContains(t, errNilValue.Error(), err)
}

View File

@@ -21,12 +21,11 @@ type GetCommitteesResponse struct {
}
type ListAttestationsResponse struct {
Version string `json:"version,omitempty"`
Data json.RawMessage `json:"data"`
Data []*Attestation `json:"data"`
}
type SubmitAttestationsRequest struct {
Data json.RawMessage `json:"data"`
Data []*Attestation `json:"data"`
}
type ListVoluntaryExitsResponse struct {
@@ -134,13 +133,6 @@ type GetBlockAttestationsResponse struct {
Data []*Attestation `json:"data"`
}
type GetBlockAttestationsV2Response struct {
Version string `json:"version"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
Data json.RawMessage `json:"data"` // Accepts both `Attestation` and `AttestationElectra` types
}
type GetStateRootResponse struct {
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
@@ -177,8 +169,7 @@ type BLSToExecutionChangesPoolResponse struct {
}
type GetAttesterSlashingsResponse struct {
Version string `json:"version,omitempty"`
Data json.RawMessage `json:"data"` // Accepts both `[]*AttesterSlashing` and `[]*AttesterSlashingElectra` types
Data []*AttesterSlashing `json:"data"`
}
type GetProposerSlashingsResponse struct {
@@ -205,62 +196,3 @@ type DepositSnapshot struct {
ExecutionBlockHash string `json:"execution_block_hash"`
ExecutionBlockHeight string `json:"execution_block_height"`
}
type GetIndividualVotesRequest struct {
Epoch string `json:"epoch"`
PublicKeys []string `json:"public_keys,omitempty"`
Indices []string `json:"indices,omitempty"`
}
type GetIndividualVotesResponse struct {
IndividualVotes []*IndividualVote `json:"individual_votes"`
}
type IndividualVote struct {
Epoch string `json:"epoch"`
PublicKey string `json:"public_keys,omitempty"`
ValidatorIndex string `json:"validator_index"`
IsSlashed bool `json:"is_slashed"`
IsWithdrawableInCurrentEpoch bool `json:"is_withdrawable_in_current_epoch"`
IsActiveInCurrentEpoch bool `json:"is_active_in_current_epoch"`
IsActiveInPreviousEpoch bool `json:"is_active_in_previous_epoch"`
IsCurrentEpochAttester bool `json:"is_current_epoch_attester"`
IsCurrentEpochTargetAttester bool `json:"is_current_epoch_target_attester"`
IsPreviousEpochAttester bool `json:"is_previous_epoch_attester"`
IsPreviousEpochTargetAttester bool `json:"is_previous_epoch_target_attester"`
IsPreviousEpochHeadAttester bool `json:"is_previous_epoch_head_attester"`
CurrentEpochEffectiveBalanceGwei string `json:"current_epoch_effective_balance_gwei"`
InclusionSlot string `json:"inclusion_slot"`
InclusionDistance string `json:"inclusion_distance"`
InactivityScore string `json:"inactivity_score"`
}
type ChainHead struct {
HeadSlot string `json:"head_slot"`
HeadEpoch string `json:"head_epoch"`
HeadBlockRoot string `json:"head_block_root"`
FinalizedSlot string `json:"finalized_slot"`
FinalizedEpoch string `json:"finalized_epoch"`
FinalizedBlockRoot string `json:"finalized_block_root"`
JustifiedSlot string `json:"justified_slot"`
JustifiedEpoch string `json:"justified_epoch"`
JustifiedBlockRoot string `json:"justified_block_root"`
PreviousJustifiedSlot string `json:"previous_justified_slot"`
PreviousJustifiedEpoch string `json:"previous_justified_epoch"`
PreviousJustifiedBlockRoot string `json:"previous_justified_block_root"`
OptimisticStatus bool `json:"optimistic_status"`
}
type GetPendingDepositsResponse struct {
Version string `json:"version"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
Data []*PendingDeposit `json:"data"`
}
type GetPendingPartialWithdrawalsResponse struct {
Version string `json:"version"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
Data []*PendingPartialWithdrawal `json:"data"`
}

View File

@@ -1,10 +1,7 @@
package structs
type SidecarsResponse struct {
Version string `json:"version"`
Data []*Sidecar `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
Data []*Sidecar `json:"data"`
}
type Sidecar struct {
@@ -15,12 +12,3 @@ type Sidecar struct {
KzgProof string `json:"kzg_proof"`
CommitmentInclusionProof []string `json:"kzg_commitment_inclusion_proof"`
}
type BlobSidecars struct {
Sidecars []*Sidecar `json:"sidecars"`
}
type PublishBlobsRequest struct {
BlobSidecars *BlobSidecars `json:"blob_sidecars"`
BlockRoot string `json:"block_root"`
}

View File

@@ -96,7 +96,21 @@ type LightClientFinalityUpdateEvent struct {
Data *LightClientFinalityUpdate `json:"data"`
}
type LightClientFinalityUpdate struct {
AttestedHeader *BeaconBlockHeader `json:"attested_header"`
FinalizedHeader *BeaconBlockHeader `json:"finalized_header"`
FinalityBranch []string `json:"finality_branch"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
SignatureSlot string `json:"signature_slot"`
}
type LightClientOptimisticUpdateEvent struct {
Version string `json:"version"`
Data *LightClientOptimisticUpdate `json:"data"`
}
type LightClientOptimisticUpdate struct {
AttestedHeader *BeaconBlockHeader `json:"attested_header"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
SignatureSlot string `json:"signature_slot"`
}

View File

@@ -1,73 +1,31 @@
package structs
import "encoding/json"
type LightClientHeader struct {
Beacon *BeaconBlockHeader `json:"beacon"`
}
type LightClientHeaderCapella struct {
Beacon *BeaconBlockHeader `json:"beacon"`
Execution *ExecutionPayloadHeaderCapella `json:"execution"`
ExecutionBranch []string `json:"execution_branch"`
}
type LightClientHeaderDeneb struct {
Beacon *BeaconBlockHeader `json:"beacon"`
Execution *ExecutionPayloadHeaderDeneb `json:"execution"`
ExecutionBranch []string `json:"execution_branch"`
}
type LightClientBootstrap struct {
Header json.RawMessage `json:"header"`
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
CurrentSyncCommitteeBranch []string `json:"current_sync_committee_branch"`
}
type LightClientUpdate struct {
AttestedHeader json.RawMessage `json:"attested_header"`
NextSyncCommittee *SyncCommittee `json:"next_sync_committee,omitempty"`
FinalizedHeader json.RawMessage `json:"finalized_header,omitempty"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
NextSyncCommitteeBranch []string `json:"next_sync_committee_branch,omitempty"`
FinalityBranch []string `json:"finality_branch,omitempty"`
SignatureSlot string `json:"signature_slot"`
}
type LightClientFinalityUpdate struct {
AttestedHeader json.RawMessage `json:"attested_header"`
FinalizedHeader json.RawMessage `json:"finalized_header"`
FinalityBranch []string `json:"finality_branch"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
SignatureSlot string `json:"signature_slot"`
}
type LightClientOptimisticUpdate struct {
AttestedHeader json.RawMessage `json:"attested_header"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
SignatureSlot string `json:"signature_slot"`
}
type LightClientBootstrapResponse struct {
Version string `json:"version"`
Data *LightClientBootstrap `json:"data"`
}
type LightClientUpdateResponse struct {
type LightClientBootstrap struct {
Header *BeaconBlockHeader `json:"header"`
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
CurrentSyncCommitteeBranch []string `json:"current_sync_committee_branch"`
}
type LightClientUpdate struct {
AttestedHeader *BeaconBlockHeader `json:"attested_header"`
NextSyncCommittee *SyncCommittee `json:"next_sync_committee,omitempty"`
FinalizedHeader *BeaconBlockHeader `json:"finalized_header,omitempty"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
NextSyncCommitteeBranch []string `json:"next_sync_committee_branch,omitempty"`
FinalityBranch []string `json:"finality_branch,omitempty"`
SignatureSlot string `json:"signature_slot"`
}
type LightClientUpdateWithVersion struct {
Version string `json:"version"`
Data *LightClientUpdate `json:"data"`
}
type LightClientFinalityUpdateResponse struct {
Version string `json:"version"`
Data *LightClientFinalityUpdate `json:"data"`
}
type LightClientOptimisticUpdateResponse struct {
Version string `json:"version"`
Data *LightClientOptimisticUpdate `json:"data"`
}
type LightClientUpdatesByRangeResponse struct {
Updates []*LightClientUpdateResponse `json:"updates"`
Updates []*LightClientUpdateWithVersion `json:"updates"`
}

View File

@@ -7,8 +7,7 @@ import (
)
type AggregateAttestationResponse struct {
Version string `json:"version,omitempty"`
Data json.RawMessage `json:"data"`
Data *Attestation `json:"data"`
}
type SubmitContributionAndProofsRequest struct {
@@ -16,7 +15,7 @@ type SubmitContributionAndProofsRequest struct {
}
type SubmitAggregateAndProofsRequest struct {
Data []json.RawMessage `json:"data"`
Data []*SignedAggregateAttestationAndProof `json:"data"`
}
type SubmitSyncCommitteeSubscriptionsRequest struct {
@@ -119,34 +118,3 @@ type GetValidatorPerformanceResponse struct {
MissingValidators [][]byte `json:"missing_validators,omitempty"`
InactivityScores []uint64 `json:"inactivity_scores,omitempty"`
}
type GetValidatorParticipationResponse struct {
Epoch string `json:"epoch"`
Finalized bool `json:"finalized"`
Participation *ValidatorParticipation `json:"participation"`
}
type ValidatorParticipation struct {
GlobalParticipationRate string `json:"global_participation_rate" deprecated:"true"`
VotedEther string `json:"voted_ether" deprecated:"true"`
EligibleEther string `json:"eligible_ether" deprecated:"true"`
CurrentEpochActiveGwei string `json:"current_epoch_active_gwei"`
CurrentEpochAttestingGwei string `json:"current_epoch_attesting_gwei"`
CurrentEpochTargetAttestingGwei string `json:"current_epoch_target_attesting_gwei"`
PreviousEpochActiveGwei string `json:"previous_epoch_active_gwei"`
PreviousEpochAttestingGwei string `json:"previous_epoch_attesting_gwei"`
PreviousEpochTargetAttestingGwei string `json:"previous_epoch_target_attesting_gwei"`
PreviousEpochHeadAttestingGwei string `json:"previous_epoch_head_attesting_gwei"`
}
type ActiveSetChanges struct {
Epoch string `json:"epoch"`
ActivatedPublicKeys []string `json:"activated_public_keys"`
ActivatedIndices []string `json:"activated_indices"`
ExitedPublicKeys []string `json:"exited_public_keys"`
ExitedIndices []string `json:"exited_indices"`
SlashedPublicKeys []string `json:"slashed_public_keys"`
SlashedIndices []string `json:"slashed_indices"`
EjectedPublicKeys []string `json:"ejected_public_keys"`
EjectedIndices []string `json:"ejected_indices"`
}

View File

@@ -29,20 +29,6 @@ type Attestation struct {
Signature string `json:"signature"`
}
type AttestationElectra struct {
AggregationBits string `json:"aggregation_bits"`
Data *AttestationData `json:"data"`
Signature string `json:"signature"`
CommitteeBits string `json:"committee_bits"`
}
type SingleAttestation struct {
CommitteeIndex string `json:"committee_index"`
AttesterIndex string `json:"attester_index"`
Data *AttestationData `json:"data"`
Signature string `json:"signature"`
}
type AttestationData struct {
Slot string `json:"slot"`
CommitteeIndex string `json:"index"`
@@ -92,17 +78,6 @@ type AggregateAttestationAndProof struct {
SelectionProof string `json:"selection_proof"`
}
type SignedAggregateAttestationAndProofElectra struct {
Message *AggregateAttestationAndProofElectra `json:"message"`
Signature string `json:"signature"`
}
type AggregateAttestationAndProofElectra struct {
AggregatorIndex string `json:"aggregator_index"`
Aggregate *AttestationElectra `json:"aggregate"`
SelectionProof string `json:"selection_proof"`
}
type SyncCommitteeSubscription struct {
ValidatorIndex string `json:"validator_index"`
SyncCommitteeIndices []string `json:"sync_committee_indices"`
@@ -203,11 +178,6 @@ type AttesterSlashing struct {
Attestation2 *IndexedAttestation `json:"attestation_2"`
}
type AttesterSlashingElectra struct {
Attestation1 *IndexedAttestationElectra `json:"attestation_1"`
Attestation2 *IndexedAttestationElectra `json:"attestation_2"`
}
type Deposit struct {
Proof []string `json:"proof"`
Data *DepositData `json:"data"`
@@ -226,12 +196,6 @@ type IndexedAttestation struct {
Signature string `json:"signature"`
}
type IndexedAttestationElectra struct {
AttestingIndices []string `json:"attesting_indices"`
Data *AttestationData `json:"data"`
Signature string `json:"signature"`
}
type SyncAggregate struct {
SyncCommitteeBits string `json:"sync_committee_bits"`
SyncCommitteeSignature string `json:"sync_committee_signature"`
@@ -243,42 +207,3 @@ type Withdrawal struct {
ExecutionAddress string `json:"address"`
Amount string `json:"amount"`
}
type DepositRequest struct {
Pubkey string `json:"pubkey"`
WithdrawalCredentials string `json:"withdrawal_credentials"`
Amount string `json:"amount"`
Signature string `json:"signature"`
Index string `json:"index"`
}
type WithdrawalRequest struct {
SourceAddress string `json:"source_address"`
ValidatorPubkey string `json:"validator_pubkey"`
Amount string `json:"amount"`
}
type ConsolidationRequest struct {
SourceAddress string `json:"source_address"`
SourcePubkey string `json:"source_pubkey"`
TargetPubkey string `json:"target_pubkey"`
}
type PendingDeposit struct {
Pubkey string `json:"pubkey"`
WithdrawalCredentials string `json:"withdrawal_credentials"`
Amount string `json:"amount"`
Signature string `json:"signature"`
Slot string `json:"slot"`
}
type PendingPartialWithdrawal struct {
Index string `json:"index"`
Amount string `json:"amount"`
WithdrawableEpoch string `json:"withdrawable_epoch"`
}
type PendingConsolidation struct {
SourceIndex string `json:"source_index"`
TargetIndex string `json:"target_index"`
}

View File

@@ -140,83 +140,3 @@ type BeaconStateDeneb struct {
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
HistoricalSummaries []*HistoricalSummary `json:"historical_summaries"`
}
type BeaconStateElectra struct {
GenesisTime string `json:"genesis_time"`
GenesisValidatorsRoot string `json:"genesis_validators_root"`
Slot string `json:"slot"`
Fork *Fork `json:"fork"`
LatestBlockHeader *BeaconBlockHeader `json:"latest_block_header"`
BlockRoots []string `json:"block_roots"`
StateRoots []string `json:"state_roots"`
HistoricalRoots []string `json:"historical_roots"`
Eth1Data *Eth1Data `json:"eth1_data"`
Eth1DataVotes []*Eth1Data `json:"eth1_data_votes"`
Eth1DepositIndex string `json:"eth1_deposit_index"`
Validators []*Validator `json:"validators"`
Balances []string `json:"balances"`
RandaoMixes []string `json:"randao_mixes"`
Slashings []string `json:"slashings"`
PreviousEpochParticipation []string `json:"previous_epoch_participation"`
CurrentEpochParticipation []string `json:"current_epoch_participation"`
JustificationBits string `json:"justification_bits"`
PreviousJustifiedCheckpoint *Checkpoint `json:"previous_justified_checkpoint"`
CurrentJustifiedCheckpoint *Checkpoint `json:"current_justified_checkpoint"`
FinalizedCheckpoint *Checkpoint `json:"finalized_checkpoint"`
InactivityScores []string `json:"inactivity_scores"`
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
NextSyncCommittee *SyncCommittee `json:"next_sync_committee"`
LatestExecutionPayloadHeader *ExecutionPayloadHeaderDeneb `json:"latest_execution_payload_header"`
NextWithdrawalIndex string `json:"next_withdrawal_index"`
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
HistoricalSummaries []*HistoricalSummary `json:"historical_summaries"`
DepositRequestsStartIndex string `json:"deposit_requests_start_index"`
DepositBalanceToConsume string `json:"deposit_balance_to_consume"`
ExitBalanceToConsume string `json:"exit_balance_to_consume"`
EarliestExitEpoch string `json:"earliest_exit_epoch"`
ConsolidationBalanceToConsume string `json:"consolidation_balance_to_consume"`
EarliestConsolidationEpoch string `json:"earliest_consolidation_epoch"`
PendingDeposits []*PendingDeposit `json:"pending_deposits"`
PendingPartialWithdrawals []*PendingPartialWithdrawal `json:"pending_partial_withdrawals"`
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
}
type BeaconStateFulu struct {
GenesisTime string `json:"genesis_time"`
GenesisValidatorsRoot string `json:"genesis_validators_root"`
Slot string `json:"slot"`
Fork *Fork `json:"fork"`
LatestBlockHeader *BeaconBlockHeader `json:"latest_block_header"`
BlockRoots []string `json:"block_roots"`
StateRoots []string `json:"state_roots"`
HistoricalRoots []string `json:"historical_roots"`
Eth1Data *Eth1Data `json:"eth1_data"`
Eth1DataVotes []*Eth1Data `json:"eth1_data_votes"`
Eth1DepositIndex string `json:"eth1_deposit_index"`
Validators []*Validator `json:"validators"`
Balances []string `json:"balances"`
RandaoMixes []string `json:"randao_mixes"`
Slashings []string `json:"slashings"`
PreviousEpochParticipation []string `json:"previous_epoch_participation"`
CurrentEpochParticipation []string `json:"current_epoch_participation"`
JustificationBits string `json:"justification_bits"`
PreviousJustifiedCheckpoint *Checkpoint `json:"previous_justified_checkpoint"`
CurrentJustifiedCheckpoint *Checkpoint `json:"current_justified_checkpoint"`
FinalizedCheckpoint *Checkpoint `json:"finalized_checkpoint"`
InactivityScores []string `json:"inactivity_scores"`
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
NextSyncCommittee *SyncCommittee `json:"next_sync_committee"`
LatestExecutionPayloadHeader *ExecutionPayloadHeaderDeneb `json:"latest_execution_payload_header"`
NextWithdrawalIndex string `json:"next_withdrawal_index"`
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
HistoricalSummaries []*HistoricalSummary `json:"historical_summaries"`
DepositRequestsStartIndex string `json:"deposit_requests_start_index"`
DepositBalanceToConsume string `json:"deposit_balance_to_consume"`
ExitBalanceToConsume string `json:"exit_balance_to_consume"`
EarliestExitEpoch string `json:"earliest_exit_epoch"`
ConsolidationBalanceToConsume string `json:"consolidation_balance_to_consume"`
EarliestConsolidationEpoch string `json:"earliest_consolidation_epoch"`
PendingDeposits []*PendingDeposit `json:"pending_deposits"`
PendingPartialWithdrawals []*PendingPartialWithdrawal `json:"pending_partial_withdrawals"`
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
}

View File

@@ -1,4 +1,4 @@
package middleware
package server
import (
"net/url"

View File

@@ -1,4 +1,4 @@
package middleware
package server
import (
"testing"

View File

@@ -4,25 +4,26 @@ go_library(
name = "go_default_library",
srcs = [
"feed.go",
"interface.go",
"subscription.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/async/event",
visibility = ["//visibility:public"],
deps = [
"//time/mclock:go_default_library",
"@com_github_ethereum_go_ethereum//event:go_default_library",
],
deps = ["//time/mclock:go_default_library"],
)
go_test(
name = "go_default_test",
size = "small",
srcs = [
"example_feed_test.go",
"example_scope_test.go",
"example_subscription_test.go",
"feed_test.go",
"subscription_test.go",
],
embed = [":go_default_library"],
deps = ["//testing/require:go_default_library"],
deps = [
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
],
)

View File

@@ -0,0 +1,73 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package event_test
import (
"fmt"
"github.com/prysmaticlabs/prysm/v5/async/event"
)
func ExampleFeed_acknowledgedEvents() {
// This example shows how the return value of Send can be used for request/reply
// interaction between event consumers and producers.
var feed event.Feed
type ackedEvent struct {
i int
ack chan<- struct{}
}
// Consumers wait for events on the feed and acknowledge processing.
done := make(chan struct{})
defer close(done)
for i := 0; i < 3; i++ {
ch := make(chan ackedEvent, 100)
sub := feed.Subscribe(ch)
go func() {
defer sub.Unsubscribe()
for {
select {
case ev := <-ch:
fmt.Println(ev.i) // "process" the event
ev.ack <- struct{}{}
case <-done:
return
}
}
}()
}
// The producer sends values of type ackedEvent with increasing values of i.
// It waits for all consumers to acknowledge before sending the next event.
for i := 0; i < 3; i++ {
acksignal := make(chan struct{})
n := feed.Send(ackedEvent{i, acksignal})
for ack := 0; ack < n; ack++ {
<-acksignal
}
}
// Output:
// 0
// 0
// 0
// 1
// 1
// 1
// 2
// 2
// 2
}

View File

@@ -14,12 +14,241 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package event contains an event feed implementation for process communication.
package event
import (
geth_event "github.com/ethereum/go-ethereum/event"
"errors"
"reflect"
"slices"
"sync"
)
// Feed is a re-export of the go-ethereum event feed.
type Feed = geth_event.Feed
type Subscription = geth_event.Subscription
var errBadChannel = errors.New("event: Subscribe argument does not have sendable channel type")
// Feed implements one-to-many subscriptions where the carrier of events is a channel.
// Values sent to a Feed are delivered to all subscribed channels simultaneously.
//
// Feeds can only be used with a single type. The type is determined by the first Send or
// Subscribe operation. Subsequent calls to these methods panic if the type does not
// match.
//
// The zero value is ready to use.
type Feed struct {
once sync.Once // ensures that init only runs once
sendLock chan struct{} // sendLock has a one-element buffer and is empty when held.It protects sendCases.
removeSub chan interface{} // interrupts Send
sendCases caseList // the active set of select cases used by Send
// The inbox holds newly subscribed channels until they are added to sendCases.
mu sync.Mutex
inbox caseList
etype reflect.Type
}
// This is the index of the first actual subscription channel in sendCases.
// sendCases[0] is a SelectRecv case for the removeSub channel.
const firstSubSendCase = 1
type feedTypeError struct {
got, want reflect.Type
op string
}
func (e feedTypeError) Error() string {
return "event: wrong type in " + e.op + " got " + e.got.String() + ", want " + e.want.String()
}
func (f *Feed) init() {
f.removeSub = make(chan interface{})
f.sendLock = make(chan struct{}, 1)
f.sendLock <- struct{}{}
f.sendCases = caseList{{Chan: reflect.ValueOf(f.removeSub), Dir: reflect.SelectRecv}}
}
// Subscribe adds a channel to the feed. Future sends will be delivered on the channel
// until the subscription is canceled. All channels added must have the same element type.
//
// The channel should have ample buffer space to avoid blocking other subscribers.
// Slow subscribers are not dropped.
func (f *Feed) Subscribe(channel interface{}) Subscription {
f.once.Do(f.init)
chanval := reflect.ValueOf(channel)
chantyp := chanval.Type()
if chantyp.Kind() != reflect.Chan || chantyp.ChanDir()&reflect.SendDir == 0 {
panic(errBadChannel)
}
sub := &feedSub{feed: f, channel: chanval, err: make(chan error, 1)}
f.mu.Lock()
defer f.mu.Unlock()
if !f.typecheck(chantyp.Elem()) {
panic(feedTypeError{op: "Subscribe", got: chantyp, want: reflect.ChanOf(reflect.SendDir, f.etype)})
}
// Add the select case to the inbox.
// The next Send will add it to f.sendCases.
cas := reflect.SelectCase{Dir: reflect.SelectSend, Chan: chanval}
f.inbox = append(f.inbox, cas)
return sub
}
// note: callers must hold f.mu
func (f *Feed) typecheck(typ reflect.Type) bool {
if f.etype == nil {
f.etype = typ
return true
}
// In the event the feed's type is an actual interface, we
// perform an interface conformance check here.
if f.etype.Kind() == reflect.Interface && typ.Implements(f.etype) {
return true
}
return f.etype == typ
}
func (f *Feed) remove(sub *feedSub) {
// Delete from inbox first, which covers channels
// that have not been added to f.sendCases yet.
ch := sub.channel.Interface()
f.mu.Lock()
index := f.inbox.find(ch)
if index != -1 {
f.inbox = f.inbox.delete(index)
f.mu.Unlock()
return
}
f.mu.Unlock()
select {
case f.removeSub <- ch:
// Send will remove the channel from f.sendCases.
case <-f.sendLock:
// No Send is in progress, delete the channel now that we have the send lock.
f.sendCases = f.sendCases.delete(f.sendCases.find(ch))
f.sendLock <- struct{}{}
}
}
// Send delivers to all subscribed channels simultaneously.
// It returns the number of subscribers that the value was sent to.
func (f *Feed) Send(value interface{}) (nsent int) {
rvalue := reflect.ValueOf(value)
f.once.Do(f.init)
<-f.sendLock
// Add new cases from the inbox after taking the send lock.
f.mu.Lock()
f.sendCases = append(f.sendCases, f.inbox...)
f.inbox = nil
if !f.typecheck(rvalue.Type()) {
f.sendLock <- struct{}{}
f.mu.Unlock()
panic(feedTypeError{op: "Send", got: rvalue.Type(), want: f.etype})
}
f.mu.Unlock()
// Set the sent value on all channels.
for i := firstSubSendCase; i < len(f.sendCases); i++ {
f.sendCases[i].Send = rvalue
}
// Send until all channels except removeSub have been chosen. 'cases' tracks a prefix
// of sendCases. When a send succeeds, the corresponding case moves to the end of
// 'cases' and it shrinks by one element.
cases := f.sendCases
for {
// Fast path: try sending without blocking before adding to the select set.
// This should usually succeed if subscribers are fast enough and have free
// buffer space.
for i := firstSubSendCase; i < len(cases); i++ {
if cases[i].Chan.TrySend(rvalue) {
nsent++
cases = cases.deactivate(i)
i--
}
}
if len(cases) == firstSubSendCase {
break
}
// Select on all the receivers, waiting for them to unblock.
chosen, recv, _ := reflect.Select(cases)
if chosen == 0 /* <-f.removeSub */ {
index := f.sendCases.find(recv.Interface())
f.sendCases = f.sendCases.delete(index)
if index >= 0 && index < len(cases) {
// Shrink 'cases' too because the removed case was still active.
cases = f.sendCases[:len(cases)-1]
}
} else {
cases = cases.deactivate(chosen)
nsent++
}
}
// Forget about the sent value and hand off the send lock.
for i := firstSubSendCase; i < len(f.sendCases); i++ {
f.sendCases[i].Send = reflect.Value{}
}
f.sendLock <- struct{}{}
return nsent
}
type feedSub struct {
feed *Feed
channel reflect.Value
errOnce sync.Once
err chan error
}
// Unsubscribe remove feed subscription.
func (sub *feedSub) Unsubscribe() {
sub.errOnce.Do(func() {
sub.feed.remove(sub)
close(sub.err)
})
}
// Err returns error channel.
func (sub *feedSub) Err() <-chan error {
return sub.err
}
type caseList []reflect.SelectCase
// find returns the index of a case containing the given channel.
func (cs caseList) find(channel interface{}) int {
return slices.IndexFunc(cs, func(selectCase reflect.SelectCase) bool {
return selectCase.Chan.Interface() == channel
})
}
// delete removes the given case from cs.
func (cs caseList) delete(index int) caseList {
return append(cs[:index], cs[index+1:]...)
}
// deactivate moves the case at index into the non-accessible portion of the cs slice.
func (cs caseList) deactivate(index int) caseList {
last := len(cs) - 1
cs[index], cs[last] = cs[last], cs[index]
return cs[:last]
}
// func (cs caseList) String() string {
// s := "["
// for i, cas := range cs {
// if i != 0 {
// s += ", "
// }
// switch cas.Dir {
// case reflect.SelectSend:
// s += fmt.Sprintf("%v<-", cas.Chan.Interface())
// case reflect.SelectRecv:
// s += fmt.Sprintf("<-%v", cas.Chan.Interface())
// }
// }
// return s + "]"
// }

509
async/event/feed_test.go Normal file
View File

@@ -0,0 +1,509 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package event
import (
"fmt"
"reflect"
"sync"
"testing"
"time"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
)
func TestFeedPanics(t *testing.T) {
{
var f Feed
f.Send(2)
want := feedTypeError{op: "Send", got: reflect.TypeOf(uint64(0)), want: reflect.TypeOf(0)}
assert.NoError(t, checkPanic(want, func() { f.Send(uint64(2)) }))
// Validate it doesn't deadlock.
assert.NoError(t, checkPanic(want, func() { f.Send(uint64(2)) }))
}
{
var f Feed
ch := make(chan int)
f.Subscribe(ch)
want := feedTypeError{op: "Send", got: reflect.TypeOf(uint64(0)), want: reflect.TypeOf(0)}
assert.NoError(t, checkPanic(want, func() { f.Send(uint64(2)) }))
}
{
var f Feed
f.Send(2)
want := feedTypeError{op: "Subscribe", got: reflect.TypeOf(make(chan uint64)), want: reflect.TypeOf(make(chan<- int))}
assert.NoError(t, checkPanic(want, func() { f.Subscribe(make(chan uint64)) }))
}
{
var f Feed
assert.NoError(t, checkPanic(errBadChannel, func() { f.Subscribe(make(<-chan int)) }))
}
{
var f Feed
assert.NoError(t, checkPanic(errBadChannel, func() { f.Subscribe(0) }))
}
}
func checkPanic(want error, fn func()) (err error) {
defer func() {
panicResult := recover()
if panicResult == nil {
err = fmt.Errorf("didn't panic")
} else if !reflect.DeepEqual(panicResult, want) {
err = fmt.Errorf("panicked with wrong error: got %q, want %q", panicResult, want)
}
}()
fn()
return nil
}
func TestFeed(t *testing.T) {
var feed Feed
var done, subscribed sync.WaitGroup
subscriber := func(i int) {
defer done.Done()
subchan := make(chan int)
sub := feed.Subscribe(subchan)
timeout := time.NewTimer(2 * time.Second)
subscribed.Done()
select {
case v := <-subchan:
if v != 1 {
t.Errorf("%d: received value %d, want 1", i, v)
}
case <-timeout.C:
t.Errorf("%d: receive timeout", i)
}
sub.Unsubscribe()
select {
case _, ok := <-sub.Err():
if ok {
t.Errorf("%d: error channel not closed after unsubscribe", i)
}
case <-timeout.C:
t.Errorf("%d: unsubscribe timeout", i)
}
}
const n = 1000
done.Add(n)
subscribed.Add(n)
for i := 0; i < n; i++ {
go subscriber(i)
}
subscribed.Wait()
if nsent := feed.Send(1); nsent != n {
t.Errorf("first send delivered %d times, want %d", nsent, n)
}
if nsent := feed.Send(2); nsent != 0 {
t.Errorf("second send delivered %d times, want 0", nsent)
}
done.Wait()
}
func TestFeedSubscribeSameChannel(t *testing.T) {
var (
feed Feed
done sync.WaitGroup
ch = make(chan int)
sub1 = feed.Subscribe(ch)
sub2 = feed.Subscribe(ch)
_ = feed.Subscribe(ch)
)
expectSends := func(value, n int) {
if nsent := feed.Send(value); nsent != n {
t.Errorf("send delivered %d times, want %d", nsent, n)
}
done.Done()
}
expectRecv := func(wantValue, n int) {
for i := 0; i < n; i++ {
if v := <-ch; v != wantValue {
t.Errorf("received %d, want %d", v, wantValue)
}
}
}
done.Add(1)
go expectSends(1, 3)
expectRecv(1, 3)
done.Wait()
sub1.Unsubscribe()
done.Add(1)
go expectSends(2, 2)
expectRecv(2, 2)
done.Wait()
sub2.Unsubscribe()
done.Add(1)
go expectSends(3, 1)
expectRecv(3, 1)
done.Wait()
}
func TestFeedSubscribeBlockedPost(_ *testing.T) {
var (
feed Feed
nsends = 2000
ch1 = make(chan int)
ch2 = make(chan int)
wg sync.WaitGroup
)
defer wg.Wait()
feed.Subscribe(ch1)
wg.Add(nsends)
for i := 0; i < nsends; i++ {
go func() {
feed.Send(99)
wg.Done()
}()
}
sub2 := feed.Subscribe(ch2)
defer sub2.Unsubscribe()
// We're done when ch1 has received N times.
// The number of receives on ch2 depends on scheduling.
for i := 0; i < nsends; {
select {
case <-ch1:
i++
case <-ch2:
}
}
}
func TestFeedUnsubscribeBlockedPost(_ *testing.T) {
var (
feed Feed
nsends = 200
chans = make([]chan int, 2000)
subs = make([]Subscription, len(chans))
bchan = make(chan int)
bsub = feed.Subscribe(bchan)
wg sync.WaitGroup
)
for i := range chans {
chans[i] = make(chan int, nsends)
}
// Queue up some Sends. None of these can make progress while bchan isn't read.
wg.Add(nsends)
for i := 0; i < nsends; i++ {
go func() {
feed.Send(99)
wg.Done()
}()
}
// Subscribe the other channels.
for i, ch := range chans {
subs[i] = feed.Subscribe(ch)
}
// Unsubscribe them again.
for _, sub := range subs {
sub.Unsubscribe()
}
// Unblock the Sends.
bsub.Unsubscribe()
wg.Wait()
}
// Checks that unsubscribing a channel during Send works even if that
// channel has already been sent on.
func TestFeedUnsubscribeSentChan(_ *testing.T) {
var (
feed Feed
ch1 = make(chan int)
ch2 = make(chan int)
sub1 = feed.Subscribe(ch1)
sub2 = feed.Subscribe(ch2)
wg sync.WaitGroup
)
defer sub2.Unsubscribe()
wg.Add(1)
go func() {
feed.Send(0)
wg.Done()
}()
// Wait for the value on ch1.
<-ch1
// Unsubscribe ch1, removing it from the send cases.
sub1.Unsubscribe()
// Receive ch2, finishing Send.
<-ch2
wg.Wait()
// Send again. This should send to ch2 only, so the wait group will unblock
// as soon as a value is received on ch2.
wg.Add(1)
go func() {
feed.Send(0)
wg.Done()
}()
<-ch2
wg.Wait()
}
func TestFeedUnsubscribeFromInbox(t *testing.T) {
var (
feed Feed
ch1 = make(chan int)
ch2 = make(chan int)
sub1 = feed.Subscribe(ch1)
sub2 = feed.Subscribe(ch1)
sub3 = feed.Subscribe(ch2)
)
assert.Equal(t, 3, len(feed.inbox))
assert.Equal(t, 1, len(feed.sendCases), "sendCases is non-empty after unsubscribe")
sub1.Unsubscribe()
sub2.Unsubscribe()
sub3.Unsubscribe()
assert.Equal(t, 0, len(feed.inbox), "Inbox is non-empty after unsubscribe")
assert.Equal(t, 1, len(feed.sendCases), "sendCases is non-empty after unsubscribe")
}
func BenchmarkFeedSend1000(b *testing.B) {
var (
done sync.WaitGroup
feed Feed
nsubs = 1000
)
subscriber := func(ch <-chan int) {
for i := 0; i < b.N; i++ {
<-ch
}
done.Done()
}
done.Add(nsubs)
for i := 0; i < nsubs; i++ {
ch := make(chan int, 200)
feed.Subscribe(ch)
go subscriber(ch)
}
// The actual benchmark.
b.ResetTimer()
for i := 0; i < b.N; i++ {
if feed.Send(i) != nsubs {
panic("wrong number of sends")
}
}
b.StopTimer()
done.Wait()
}
func TestFeed_Send(t *testing.T) {
tests := []struct {
name string
evFeed *Feed
testSetup func(fd *Feed, t *testing.T, o interface{})
obj interface{}
expectPanic bool
}{
{
name: "normal struct",
evFeed: new(Feed),
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
testChan := make(chan testFeedWithPointer, 1)
fd.Subscribe(testChan)
},
obj: testFeedWithPointer{
a: new(uint64),
b: new(string),
},
expectPanic: false,
},
{
name: "un-implemented interface",
evFeed: new(Feed),
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
testChan := make(chan testFeedIface, 1)
fd.Subscribe(testChan)
},
obj: testFeedWithPointer{
a: new(uint64),
b: new(string),
},
expectPanic: true,
},
{
name: "semi-implemented interface",
evFeed: new(Feed),
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
testChan := make(chan testFeedIface, 1)
fd.Subscribe(testChan)
},
obj: testFeed2{
a: 0,
b: "",
c: []byte{'A'},
},
expectPanic: true,
},
{
name: "fully-implemented interface",
evFeed: new(Feed),
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
testChan := make(chan testFeedIface)
// Make it unbuffered to allow message to
// pass through
go func() {
a := <-testChan
if !reflect.DeepEqual(a, o) {
t.Errorf("Got = %v, want = %v", a, o)
}
}()
fd.Subscribe(testChan)
},
obj: testFeed{
a: 0,
b: "",
},
expectPanic: false,
},
{
name: "fully-implemented interface with additional methods",
evFeed: new(Feed),
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
testChan := make(chan testFeedIface)
// Make it unbuffered to allow message to
// pass through
go func() {
a := <-testChan
if !reflect.DeepEqual(a, o) {
t.Errorf("Got = %v, want = %v", a, o)
}
}()
fd.Subscribe(testChan)
},
obj: testFeed3{
a: 0,
b: "",
c: []byte{'A'},
d: []byte{'B'},
},
expectPanic: false,
},
{
name: "concrete types implementing the same interface",
evFeed: new(Feed),
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
testChan := make(chan testFeed, 1)
// Make it unbuffered to allow message to
// pass through
go func() {
a := <-testChan
if !reflect.DeepEqual(a, o) {
t.Errorf("Got = %v, want = %v", a, o)
}
}()
fd.Subscribe(testChan)
},
obj: testFeed3{
a: 0,
b: "",
c: []byte{'A'},
d: []byte{'B'},
},
expectPanic: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
defer func() {
if r := recover(); r != nil {
if !tt.expectPanic {
t.Errorf("panic triggered when unexpected: %v", r)
}
} else {
if tt.expectPanic {
t.Error("panic not triggered when expected")
}
}
}()
tt.testSetup(tt.evFeed, t, tt.obj)
if gotNsent := tt.evFeed.Send(tt.obj); gotNsent != 1 {
t.Errorf("Send() = %v, want %v", gotNsent, 1)
}
})
}
}
// The following objects below are a collection of different
// struct types to test with.
type testFeed struct {
a uint64
b string
}
func (testFeed) method1() {
}
func (testFeed) method2() {
}
type testFeedWithPointer struct {
a *uint64
b *string
}
type testFeed2 struct {
a uint64
b string
c []byte
}
func (testFeed2) method1() {
}
type testFeed3 struct {
a uint64
b string
c, d []byte
}
func (testFeed3) method1() {
}
func (testFeed3) method2() {
}
func (testFeed3) method3() {
}
type testFeedIface interface {
method1()
method2()
}

View File

@@ -1,8 +0,0 @@
package event
// SubscriberSender is an abstract representation of an *event.Feed
// to use in describing types that accept or return an *event.Feed.
type SubscriberSender interface {
Subscribe(channel interface{}) Subscription
Send(value interface{}) (nsent int)
}

View File

@@ -28,6 +28,25 @@ import (
// request backoff time.
const waitQuotient = 10
// Subscription represents a stream of events. The carrier of the events is typically a
// channel, but isn't part of the interface.
//
// Subscriptions can fail while established. Failures are reported through an error
// channel. It receives a value if there is an issue with the subscription (e.g. the
// network connection delivering the events has been closed). Only one value will ever be
// sent.
//
// The error channel is closed when the subscription ends successfully (i.e. when the
// source of events is closed). It is also closed when Unsubscribe is called.
//
// The Unsubscribe method cancels the sending of events. You must call Unsubscribe in all
// cases to ensure that resources related to the subscription are released. It can be
// called any number of times.
type Subscription interface {
Err() <-chan error // returns the error channel
Unsubscribe() // cancels sending of events, closing the error channel
}
// NewSubscription runs a producer function as a subscription in a new goroutine. The
// channel given to the producer is closed when Unsubscribe is called. If fn returns an
// error, it is sent on the subscription's error channel.

View File

@@ -13,6 +13,7 @@ go_library(
"head.go",
"head_sync_committee_info.go",
"init_sync_process_block.go",
"lightclient.go",
"log.go",
"merge_ascii_art.go",
"metrics.go",
@@ -25,7 +26,6 @@ go_library(
"receive_attestation.go",
"receive_blob.go",
"receive_block.go",
"receive_data_column.go",
"service.go",
"tracked_proposer.go",
"weak_subjectivity_checks.go",
@@ -44,13 +44,10 @@ go_library(
"//beacon-chain/cache:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/electra:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/light-client:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",
@@ -67,7 +64,6 @@ go_library(
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/slasher/types:go_default_library",
"//beacon-chain/startup:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
@@ -84,9 +80,10 @@ go_library(
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/eth/v2:go_default_library",
"//proto/migration:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//runtime/version:go_default_library",
@@ -99,6 +96,7 @@ go_library(
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
)
@@ -118,6 +116,7 @@ go_test(
"head_test.go",
"init_sync_process_block_test.go",
"init_test.go",
"lightclient_test.go",
"log_test.go",
"metrics_test.go",
"mock_test.go",
@@ -139,11 +138,9 @@ go_test(
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/light-client:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/das:go_default_library",
@@ -160,7 +157,6 @@ go_test(
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/p2p/testing:go_default_library",
"//beacon-chain/startup:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
@@ -176,6 +172,7 @@ go_test(
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/eth/v2:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//testing/assert:go_default_library",
@@ -184,7 +181,6 @@ go_test(
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_pkg_errors//:go_default_library",

View File

@@ -2,7 +2,6 @@ package blockchain
import (
"io"
"os"
"testing"
"github.com/sirupsen/logrus"
@@ -12,5 +11,5 @@ func TestMain(m *testing.M) {
logrus.SetLevel(logrus.DebugLevel)
logrus.SetOutput(io.Discard)
os.Exit(m.Run())
m.Run()
}

View File

@@ -6,18 +6,18 @@ import (
"time"
"github.com/pkg/errors"
"go.opencensus.io/trace"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
f "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/forkchoice"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
@@ -45,12 +45,10 @@ type ForkchoiceFetcher interface {
UpdateHead(context.Context, primitives.Slot)
HighestReceivedBlockSlot() primitives.Slot
ReceivedBlocksLastEpoch() (uint64, error)
InsertNode(context.Context, state.BeaconState, consensus_blocks.ROBlock) error
InsertNode(context.Context, state.BeaconState, [32]byte) error
ForkChoiceDump(context.Context) (*forkchoice.Dump, error)
NewSlot(context.Context, primitives.Slot) error
ProposerBoost() [32]byte
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error)
}
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
@@ -204,7 +202,7 @@ func (s *Service) HeadState(ctx context.Context) (state.BeaconState, error) {
defer s.headLock.RUnlock()
ok := s.hasHeadState()
span.SetAttributes(trace.BoolAttribute("cache_hit", ok))
span.AddAttributes(trace.BoolAttribute("cache_hit", ok))
if ok {
return s.headState(ctx), nil
@@ -226,7 +224,7 @@ func (s *Service) HeadStateReadOnly(ctx context.Context) (state.ReadOnlyBeaconSt
defer s.headLock.RUnlock()
ok := s.hasHeadState()
span.SetAttributes(trace.BoolAttribute("cache_hit", ok))
span.AddAttributes(trace.BoolAttribute("cache_hit", ok))
if ok {
return s.headStateReadOnly(ctx), nil
@@ -243,7 +241,7 @@ func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch primitives.Ep
if !s.hasHeadState() {
return []primitives.ValidatorIndex{}, nil
}
return helpers.ActiveValidatorIndices(ctx, s.headStateReadOnly(ctx), epoch)
return helpers.ActiveValidatorIndices(ctx, s.headState(ctx), epoch)
}
// HeadGenesisValidatorsRoot returns genesis validators root of the head state.

View File

@@ -4,7 +4,6 @@ import (
"context"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/forkchoice"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
)
@@ -23,6 +22,13 @@ func (s *Service) GetProposerHead() [32]byte {
return s.cfg.ForkChoiceStore.GetProposerHead()
}
// ShouldOverrideFCU returns the corresponding value from forkchoice
func (s *Service) ShouldOverrideFCU() bool {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.ShouldOverrideFCU()
}
// SetForkChoiceGenesisTime sets the genesis time in Forkchoice
func (s *Service) SetForkChoiceGenesisTime(timestamp uint64) {
s.cfg.ForkChoiceStore.Lock()
@@ -45,10 +51,10 @@ func (s *Service) ReceivedBlocksLastEpoch() (uint64, error) {
}
// InsertNode is a wrapper for node insertion which is self locked
func (s *Service) InsertNode(ctx context.Context, st state.BeaconState, block consensus_blocks.ROBlock) error {
func (s *Service) InsertNode(ctx context.Context, st state.BeaconState, root [32]byte) error {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, block)
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, root)
}
// ForkChoiceDump returns the corresponding value from forkchoice
@@ -93,10 +99,3 @@ func (s *Service) FinalizedBlockHash() [32]byte {
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
}
// ParentRoot wraps a call to the corresponding method in forkchoice
func (s *Service) ParentRoot(root [32]byte) ([32]byte, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.ParentRoot(root)
}

View File

@@ -38,7 +38,7 @@ func prepareForkchoiceState(
payloadHash [32]byte,
justified *ethpb.Checkpoint,
finalized *ethpb.Checkpoint,
) (state.BeaconState, blocks.ROBlock, error) {
) (state.BeaconState, [32]byte, error) {
blockHeader := &ethpb.BeaconBlockHeader{
ParentRoot: parentRoot[:],
}
@@ -59,26 +59,7 @@ func prepareForkchoiceState(
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
st, err := state_native.InitializeFromProtoBellatrix(base)
if err != nil {
return nil, blocks.ROBlock{}, err
}
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Slot: slot,
ParentRoot: parentRoot[:],
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &enginev1.ExecutionPayload{
BlockHash: payloadHash[:],
},
},
},
}
signed, err := blocks.NewSignedBeaconBlock(blk)
if err != nil {
return nil, blocks.ROBlock{}, err
}
roblock, err := blocks.NewROBlockWithRoot(signed, blockRoot)
return st, roblock, err
return st, blockRoot, err
}
func TestHeadRoot_Nil(t *testing.T) {
@@ -141,9 +122,9 @@ func TestUnrealizedJustifiedBlockHash(t *testing.T) {
service := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
ojc := &ethpb.Checkpoint{Root: []byte{'j'}}
ofc := &ethpb.Checkpoint{Root: []byte{'f'}}
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
service.cfg.ForkChoiceStore.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) { return []uint64{}, nil })
require.NoError(t, service.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 6, Root: [32]byte{'j'}}))
@@ -335,24 +316,24 @@ func TestService_ChainHeads(t *testing.T) {
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
roots, slots := c.ChainHeads()
require.Equal(t, 3, len(roots))
@@ -432,12 +413,12 @@ func TestService_IsOptimistic(t *testing.T) {
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
opt, err := c.IsOptimistic(ctx)
require.NoError(t, err)
@@ -468,12 +449,12 @@ func TestService_IsOptimisticForRoot(t *testing.T) {
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
require.NoError(t, err)

View File

@@ -33,7 +33,6 @@ var (
)
var errMaxBlobsExceeded = errors.New("Expected commitments in block exceeds MAX_BLOBS_PER_BLOCK")
var errMaxDataColumnsExceeded = errors.New("Expected data columns for node exceeds NUMBER_OF_COLUMNS")
// An invalid block is the block that fails state transition based on the core protocol rules.
// The beacon node shall not be accepting nor building blocks that branch off from an invalid block.

View File

@@ -2,15 +2,13 @@ package blockchain
import (
"context"
"crypto/sha256"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/async/event"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
@@ -23,13 +21,15 @@ import (
payloadattribute "github.com/prysmaticlabs/prysm/v5/consensus-types/payload-attribute"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
const blobCommitmentVersionKZG uint8 = 0x01
var defaultLatestValidHash = bytesutil.PadTo([]byte{0xff}, 32)
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
@@ -39,7 +39,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
ctx, span := trace.StartSpan(ctx, "blockChain.notifyForkchoiceUpdate")
defer span.End()
if arg.headBlock == nil || arg.headBlock.IsNil() {
if arg.headBlock.IsNil() {
log.Error("Head block is nil")
return nil, nil
}
@@ -72,11 +72,10 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
if arg.attributes == nil {
arg.attributes = payloadattribute.EmptyWithVersion(headBlk.Version())
}
go firePayloadAttributesEvent(ctx, s.cfg.StateNotifier.StateFeed(), arg)
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, arg.attributes)
if err != nil {
switch {
case errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus):
switch err {
case execution.ErrAcceptedSyncingPayloadStatus:
forkchoiceUpdatedOptimisticNodeCount.Inc()
log.WithFields(logrus.Fields{
"headSlot": headBlk.Slot(),
@@ -84,7 +83,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
}).Info("Called fork choice updated with optimistic block")
return payloadID, nil
case errors.Is(err, execution.ErrInvalidPayloadStatus):
case execution.ErrInvalidPayloadStatus:
forkchoiceUpdatedInvalidNodeCount.Inc()
headRoot := arg.headRoot
if len(lastValidHash) == 0 {
@@ -140,6 +139,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
"newHeadRoot": fmt.Sprintf("%#x", bytesutil.Trunc(r[:])),
}).Warn("Pruned invalid blocks")
return pid, invalidBlock{error: ErrInvalidPayload, root: arg.headRoot, invalidAncestorRoots: invalidRoots}
default:
log.WithError(err).Error(ErrUndefinedExecutionEngineError)
return nil, nil
@@ -171,38 +171,6 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
return payloadID, nil
}
func firePayloadAttributesEvent(ctx context.Context, f event.SubscriberSender, cfg *fcuConfig) {
pidx, err := helpers.BeaconProposerIndex(ctx, cfg.headState)
if err != nil {
log.WithError(err).
WithField("head_root", cfg.headRoot[:]).
Error("Could not get proposer index for PayloadAttributes event")
return
}
evd := payloadattribute.EventData{
ProposerIndex: pidx,
ProposalSlot: cfg.headState.Slot(),
ParentBlockRoot: cfg.headRoot[:],
Attributer: cfg.attributes,
HeadRoot: cfg.headRoot,
HeadState: cfg.headState,
HeadBlock: cfg.headBlock,
}
if cfg.headBlock != nil && !cfg.headBlock.IsNil() {
headPayload, err := cfg.headBlock.Block().Body().Execution()
if err != nil {
log.WithError(err).Error("Could not get execution payload for head block")
return
}
evd.ParentBlockHash = headPayload.BlockHash()
evd.ParentBlockNumber = headPayload.BlockNumber()
}
f.Send(&feed.Event{
Type: statefeed.PayloadAttributes,
Data: evd,
})
}
// getPayloadHash returns the payload hash given the block root.
// if the block is before bellatrix fork epoch, it returns the zero hash.
func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, error) {
@@ -252,40 +220,29 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
}
var lastValidHash []byte
var parentRoot *common.Hash
var versionedHashes []common.Hash
var requests *enginev1.ExecutionRequests
if blk.Version() >= version.Deneb {
var versionedHashes []common.Hash
versionedHashes, err = kzgCommitmentsToVersionedHashes(blk.Block().Body())
if err != nil {
return false, errors.Wrap(err, "could not get versioned hashes to feed the engine")
}
prh := common.Hash(blk.Block().ParentRoot())
parentRoot = &prh
pr := common.Hash(blk.Block().ParentRoot())
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, &pr)
} else {
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, []common.Hash{}, &common.Hash{} /*empty version hashes and root before Deneb*/)
}
if blk.Version() >= version.Electra {
requests, err = blk.Block().Body().ExecutionRequests()
if err != nil {
return false, errors.Wrap(err, "could not get execution requests")
}
if requests == nil {
return false, errors.New("nil execution requests")
}
}
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests)
switch {
case err == nil:
switch err {
case nil:
newPayloadValidNodeCount.Inc()
return true, nil
case errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus):
case execution.ErrAcceptedSyncingPayloadStatus:
newPayloadOptimisticNodeCount.Inc()
log.WithFields(logrus.Fields{
"slot": blk.Block().Slot(),
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
}).Info("Called new payload with optimistic block")
return false, nil
case errors.Is(err, execution.ErrInvalidPayloadStatus):
case execution.ErrInvalidPayloadStatus:
lvh := bytesutil.ToBytes32(lastValidHash)
return false, invalidBlock{
error: ErrInvalidPayload,
@@ -365,16 +322,15 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
return emptyAttri
}
v := st.Version()
if v >= version.Deneb {
var attr payloadattribute.Attributer
switch st.Version() {
case version.Deneb:
withdrawals, _, err := st.ExpectedWithdrawals()
if err != nil {
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
return emptyAttri
}
attr, err := payloadattribute.New(&enginev1.PayloadAttributesV3{
attr, err = payloadattribute.New(&enginev1.PayloadAttributesV3{
Timestamp: uint64(t.Unix()),
PrevRandao: prevRando,
SuggestedFeeRecipient: val.FeeRecipient[:],
@@ -385,18 +341,13 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
log.WithError(err).Error("Could not get payload attribute")
return emptyAttri
}
return attr
}
if v >= version.Capella {
case version.Capella:
withdrawals, _, err := st.ExpectedWithdrawals()
if err != nil {
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
return emptyAttri
}
attr, err := payloadattribute.New(&enginev1.PayloadAttributesV2{
attr, err = payloadattribute.New(&enginev1.PayloadAttributesV2{
Timestamp: uint64(t.Unix()),
PrevRandao: prevRando,
SuggestedFeeRecipient: val.FeeRecipient[:],
@@ -406,12 +357,8 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
log.WithError(err).Error("Could not get payload attribute")
return emptyAttri
}
return attr
}
if v >= version.Bellatrix {
attr, err := payloadattribute.New(&enginev1.PayloadAttributes{
case version.Bellatrix:
attr, err = payloadattribute.New(&enginev1.PayloadAttributes{
Timestamp: uint64(t.Unix()),
PrevRandao: prevRando,
SuggestedFeeRecipient: val.FeeRecipient[:],
@@ -420,12 +367,12 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
log.WithError(err).Error("Could not get payload attribute")
return emptyAttri
}
return attr
default:
log.WithField("version", st.Version()).Error("Could not get payload attribute due to unknown state version")
return emptyAttri
}
log.WithField("version", version.String(st.Version())).Error("Could not get payload attribute due to unknown state version")
return emptyAttri
return attr
}
// removeInvalidBlockAndState removes the invalid block, blob and its corresponding state from the cache and DB.
@@ -456,7 +403,13 @@ func kzgCommitmentsToVersionedHashes(body interfaces.ReadOnlyBeaconBlockBody) ([
versionedHashes := make([]common.Hash, len(commitments))
for i, commitment := range commitments {
versionedHashes[i] = primitives.ConvertKzgCommitmentToVersionedHash(commitment)
versionedHashes[i] = ConvertKzgCommitmentToVersionedHash(commitment)
}
return versionedHashes, nil
}
func ConvertKzgCommitmentToVersionedHash(commitment []byte) common.Hash {
versionedHash := sha256.Sum256(commitment)
versionedHash[0] = blobCommitmentVersionKZG
return versionedHash
}

View File

@@ -855,63 +855,41 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
require.Equal(t, 0, len(a))
}
func Test_GetPayloadAttributeV3(t *testing.T) {
var testCases = []struct {
name string
st bstate.BeaconState
}{
{
name: "deneb",
st: func() bstate.BeaconState {
st, _ := util.DeterministicGenesisStateDeneb(t, 1)
return st
}(),
},
{
name: "electra",
st: func() bstate.BeaconState {
st, _ := util.DeterministicGenesisStateElectra(t, 1)
return st
}(),
},
}
func Test_GetPayloadAttributeDeneb(t *testing.T) {
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
ctx := tr.ctx
for _, test := range testCases {
t.Run(test.name, func(t *testing.T) {
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
ctx := tr.ctx
st, _ := util.DeterministicGenesisStateDeneb(t, 1)
attr := service.getPayloadAttribute(ctx, st, 0, []byte{})
require.Equal(t, true, attr.IsEmpty())
attr := service.getPayloadAttribute(ctx, test.st, 0, []byte{})
require.Equal(t, true, attr.IsEmpty())
// Cache hit, advance state, no fee recipient
slot := primitives.Slot(1)
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, false, attr.IsEmpty())
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
a, err := attr.Withdrawals()
require.NoError(t, err)
require.Equal(t, 0, len(a))
// Cache hit, advance state, no fee recipient
slot := primitives.Slot(1)
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
attr = service.getPayloadAttribute(ctx, test.st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, false, attr.IsEmpty())
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
a, err := attr.Withdrawals()
require.NoError(t, err)
require.Equal(t, 0, len(a))
// Cache hit, advance state, has fee recipient
suggestedAddr := common.HexToAddress("123")
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, false, attr.IsEmpty())
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
a, err = attr.Withdrawals()
require.NoError(t, err)
require.Equal(t, 0, len(a))
// Cache hit, advance state, has fee recipient
suggestedAddr := common.HexToAddress("123")
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
attr = service.getPayloadAttribute(ctx, test.st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, false, attr.IsEmpty())
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
a, err = attr.Withdrawals()
require.NoError(t, err)
require.Equal(t, 0, len(a))
attrV3, err := attr.PbV3()
require.NoError(t, err)
hr := service.headRoot()
require.Equal(t, hr, [32]byte(attrV3.ParentBeaconBlockRoot))
attrV3, err := attr.PbV3()
require.NoError(t, err)
hr := service.headRoot()
require.Equal(t, hr, [32]byte(attrV3.ParentBeaconBlockRoot))
})
}
}
func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
@@ -1135,14 +1113,9 @@ func TestComputePayloadAttribute(t *testing.T) {
// Cache hit, advance state, no fee recipient
slot := primitives.Slot(1)
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
blk := util.NewBeaconBlockBellatrix()
signed, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(signed, [32]byte{'a'})
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
ctx: ctx,
blockRoot: [32]byte{'a'},
}
fcu := &fcuConfig{
headState: st,

View File

@@ -12,9 +12,9 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
payloadattribute "github.com/prysmaticlabs/prysm/v5/consensus-types/payload-attribute"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
func (s *Service) isNewHead(r [32]byte) bool {

View File

@@ -11,7 +11,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/features"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
@@ -19,11 +18,11 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/math"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
ethpbv1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// UpdateAndSaveHeadWithBalances updates the beacon state head after getting justified balanced from cache.
@@ -405,19 +404,13 @@ func (s *Service) saveOrphanedOperations(ctx context.Context, orphanedRoot [32]b
if a.GetData().Slot+params.BeaconConfig().SlotsPerEpoch < s.CurrentSlot() {
continue
}
if features.Get().EnableExperimentalAttestationPool {
if err = s.cfg.AttestationCache.Add(a); err != nil {
if helpers.IsAggregated(a) {
if err := s.cfg.AttPool.SaveAggregatedAttestation(a); err != nil {
return err
}
} else {
if a.IsAggregated() {
if err = s.cfg.AttPool.SaveAggregatedAttestation(a); err != nil {
return err
}
} else {
if err = s.cfg.AttPool.SaveUnaggregatedAttestation(a); err != nil {
return err
}
if err := s.cfg.AttPool.SaveUnaggregatedAttestation(a); err != nil {
return err
}
}
saveOrphanedAttCount.Inc()

View File

@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"kzg.go",
"trusted_setup.go",
"validation.go",
],
@@ -13,9 +12,6 @@ go_library(
deps = [
"//consensus-types/blocks:go_default_library",
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//crypto/kzg4844:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)
@@ -30,7 +26,8 @@ go_test(
deps = [
"//consensus-types/blocks:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library",
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -1,111 +0,0 @@
package kzg
import (
"errors"
ckzg4844 "github.com/ethereum/c-kzg-4844/v2/bindings/go"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
)
// BytesPerBlob is the number of bytes in a single blob.
const BytesPerBlob = ckzg4844.BytesPerBlob
// Blob represents a serialized chunk of data.
type Blob [BytesPerBlob]byte
// BytesPerCell is the number of bytes in a single cell.
const BytesPerCell = ckzg4844.BytesPerCell
// Cell represents a chunk of an encoded Blob.
type Cell [BytesPerCell]byte
// Commitment represent a KZG commitment to a Blob.
type Commitment [48]byte
// Proof represents a KZG proof that attests to the validity of a Blob or parts of it.
type Proof [48]byte
// Bytes48 is a 48-byte array.
type Bytes48 = ckzg4844.Bytes48
// Bytes32 is a 32-byte array.
type Bytes32 = ckzg4844.Bytes32
// CellsAndProofs represents the Cells and Proofs corresponding to
// a single blob.
type CellsAndProofs struct {
Cells []Cell
Proofs []Proof
}
func BlobToKZGCommitment(blob *Blob) (Commitment, error) {
kzgBlob := kzg4844.Blob(*blob)
comm, err := kzg4844.BlobToCommitment(&kzgBlob)
if err != nil {
return Commitment{}, err
}
return Commitment(comm), nil
}
func ComputeBlobKZGProof(blob *Blob, commitment Commitment) (Proof, error) {
kzgBlob := kzg4844.Blob(*blob)
proof, err := kzg4844.ComputeBlobProof(&kzgBlob, kzg4844.Commitment(commitment))
if err != nil {
return [48]byte{}, err
}
return Proof(proof), nil
}
func ComputeCellsAndKZGProofs(blob *Blob) (CellsAndProofs, error) {
ckzgBlob := (*ckzg4844.Blob)(blob)
ckzgCells, ckzgProofs, err := ckzg4844.ComputeCellsAndKZGProofs(ckzgBlob)
if err != nil {
return CellsAndProofs{}, err
}
return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:])
}
func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, cells []Cell, proofsBytes []Bytes48) (bool, error) {
// Convert `Cell` type to `ckzg4844.Cell`
ckzgCells := make([]ckzg4844.Cell, len(cells))
for i := range cells {
ckzgCells[i] = ckzg4844.Cell(cells[i])
}
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
}
func RecoverCellsAndKZGProofs(cellIndices []uint64, partialCells []Cell) (CellsAndProofs, error) {
// Convert `Cell` type to `ckzg4844.Cell`
ckzgPartialCells := make([]ckzg4844.Cell, len(partialCells))
for i := range partialCells {
ckzgPartialCells[i] = ckzg4844.Cell(partialCells[i])
}
ckzgCells, ckzgProofs, err := ckzg4844.RecoverCellsAndKZGProofs(cellIndices, ckzgPartialCells)
if err != nil {
return CellsAndProofs{}, err
}
return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:])
}
// Convert cells/proofs to the CellsAndProofs type defined in this package.
func makeCellsAndProofs(ckzgCells []ckzg4844.Cell, ckzgProofs []ckzg4844.KZGProof) (CellsAndProofs, error) {
if len(ckzgCells) != len(ckzgProofs) {
return CellsAndProofs{}, errors.New("different number of cells/proofs")
}
var cells []Cell
var proofs []Proof
for i := range ckzgCells {
cells = append(cells, Cell(ckzgCells[i]))
proofs = append(proofs, Proof(ckzgProofs[i]))
}
return CellsAndProofs{
Cells: cells,
Proofs: proofs,
}, nil
}

View File

@@ -5,8 +5,6 @@ import (
"encoding/json"
GoKZG "github.com/crate-crypto/go-kzg-4844"
CKZG "github.com/ethereum/c-kzg-4844/v2/bindings/go"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
)
@@ -14,53 +12,17 @@ var (
//go:embed trusted_setup.json
embeddedTrustedSetup []byte // 1.2Mb
kzgContext *GoKZG.Context
kzgLoaded bool
)
type TrustedSetup struct {
G1Monomial [GoKZG.ScalarsPerBlob]GoKZG.G1CompressedHexStr `json:"g1_monomial"`
G1Lagrange [GoKZG.ScalarsPerBlob]GoKZG.G1CompressedHexStr `json:"g1_lagrange"`
G2Monomial [65]GoKZG.G2CompressedHexStr `json:"g2_monomial"`
}
func Start() error {
trustedSetup := &TrustedSetup{}
err := json.Unmarshal(embeddedTrustedSetup, trustedSetup)
parsedSetup := GoKZG.JSONTrustedSetup{}
err := json.Unmarshal(embeddedTrustedSetup, &parsedSetup)
if err != nil {
return errors.Wrap(err, "could not parse trusted setup JSON")
}
kzgContext, err = GoKZG.NewContext4096(&GoKZG.JSONTrustedSetup{
SetupG2: trustedSetup.G2Monomial[:],
SetupG1Lagrange: trustedSetup.G1Lagrange})
kzgContext, err = GoKZG.NewContext4096(&parsedSetup)
if err != nil {
return errors.Wrap(err, "could not initialize go-kzg context")
}
// Length of a G1 point, converted from hex to binary.
g1MonomialBytes := make([]byte, len(trustedSetup.G1Monomial)*(len(trustedSetup.G1Monomial[0])-2)/2)
for i, g1 := range &trustedSetup.G1Monomial {
copy(g1MonomialBytes[i*(len(g1)-2)/2:], hexutil.MustDecode(g1))
}
// Length of a G1 point, converted from hex to binary.
g1LagrangeBytes := make([]byte, len(trustedSetup.G1Lagrange)*(len(trustedSetup.G1Lagrange[0])-2)/2)
for i, g1 := range &trustedSetup.G1Lagrange {
copy(g1LagrangeBytes[i*(len(g1)-2)/2:], hexutil.MustDecode(g1))
}
// Length of a G2 point, converted from hex to binary.
g2MonomialBytes := make([]byte, len(trustedSetup.G2Monomial)*(len(trustedSetup.G2Monomial[0])-2)/2)
for i, g2 := range &trustedSetup.G2Monomial {
copy(g2MonomialBytes[i*(len(g2)-2)/2:], hexutil.MustDecode(g2))
}
if !kzgLoaded {
// TODO: Provide a configuration option for this.
var precompute uint = 8
// Free the current trusted setup before running this method. CKZG
// panics if the same setup is run multiple times.
if err = CKZG.LoadTrustedSetup(g1MonomialBytes, g1LagrangeBytes, g2MonomialBytes, precompute); err != nil {
panic(err)
}
}
kzgLoaded = true
return nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -20,17 +20,16 @@ func Verify(sidecars ...blocks.ROBlob) error {
cmts := make([]GoKZG.KZGCommitment, len(sidecars))
proofs := make([]GoKZG.KZGProof, len(sidecars))
for i, sidecar := range sidecars {
blobs[i] = *bytesToBlob(sidecar.Blob)
blobs[i] = bytesToBlob(sidecar.Blob)
cmts[i] = bytesToCommitment(sidecar.KzgCommitment)
proofs[i] = bytesToKZGProof(sidecar.KzgProof)
}
return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs)
}
func bytesToBlob(blob []byte) *GoKZG.Blob {
var ret GoKZG.Blob
func bytesToBlob(blob []byte) (ret GoKZG.Blob) {
copy(ret[:], blob)
return &ret
return
}
func bytesToCommitment(commitment []byte) (ret GoKZG.KZGCommitment) {

View File

@@ -1,20 +1,57 @@
package kzg
import (
"bytes"
"crypto/sha256"
"encoding/binary"
"testing"
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
GoKZG "github.com/crate-crypto/go-kzg-4844"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"github.com/sirupsen/logrus"
)
func deterministicRandomness(seed int64) [32]byte {
// Converts an int64 to a byte slice
buf := new(bytes.Buffer)
err := binary.Write(buf, binary.BigEndian, seed)
if err != nil {
logrus.WithError(err).Error("Failed to write int64 to bytes buffer")
return [32]byte{}
}
bytes := buf.Bytes()
return sha256.Sum256(bytes)
}
// Returns a serialized random field element in big-endian
func GetRandFieldElement(seed int64) [32]byte {
bytes := deterministicRandomness(seed)
var r fr.Element
r.SetBytes(bytes[:])
return GoKZG.SerializeScalar(r)
}
// Returns a random blob using the passed seed as entropy
func GetRandBlob(seed int64) GoKZG.Blob {
var blob GoKZG.Blob
bytesPerBlob := GoKZG.ScalarsPerBlob * GoKZG.SerializedScalarSize
for i := 0; i < bytesPerBlob; i += GoKZG.SerializedScalarSize {
fieldElementBytes := GetRandFieldElement(seed + int64(i))
copy(blob[i:i+GoKZG.SerializedScalarSize], fieldElementBytes[:])
}
return blob
}
func GenerateCommitmentAndProof(blob GoKZG.Blob) (GoKZG.KZGCommitment, GoKZG.KZGProof, error) {
commitment, err := kzgContext.BlobToKZGCommitment(&blob, 0)
commitment, err := kzgContext.BlobToKZGCommitment(blob, 0)
if err != nil {
return GoKZG.KZGCommitment{}, GoKZG.KZGProof{}, err
}
proof, err := kzgContext.ComputeBlobKZGProof(&blob, commitment, 0)
proof, err := kzgContext.ComputeBlobKZGProof(blob, commitment, 0)
if err != nil {
return GoKZG.KZGCommitment{}, GoKZG.KZGProof{}, err
}
@@ -31,13 +68,13 @@ func TestBytesToAny(t *testing.T) {
blob := GoKZG.Blob{0x01, 0x02}
commitment := GoKZG.KZGCommitment{0x01, 0x02}
proof := GoKZG.KZGProof{0x01, 0x02}
require.DeepEqual(t, blob, *bytesToBlob(bytes))
require.DeepEqual(t, blob, bytesToBlob(bytes))
require.DeepEqual(t, commitment, bytesToCommitment(bytes))
require.DeepEqual(t, proof, bytesToKZGProof(bytes))
}
func TestGenerateCommitmentAndProof(t *testing.T) {
blob := util.GetRandBlob(123)
blob := GetRandBlob(123)
commitment, proof, err := GenerateCommitmentAndProof(blob)
require.NoError(t, err)
expectedCommitment := GoKZG.KZGCommitment{180, 218, 156, 194, 59, 20, 10, 189, 186, 254, 132, 93, 7, 127, 104, 172, 238, 240, 237, 70, 83, 89, 1, 152, 99, 0, 165, 65, 143, 62, 20, 215, 230, 14, 205, 95, 28, 245, 54, 25, 160, 16, 178, 31, 232, 207, 38, 85}

Some files were not shown because too many files have changed in this diff Show More