Compare commits

..

54 Commits

Author SHA1 Message Date
Raul Jordan
b5bd461627 minimal node/tree backing implementation 2021-11-20 12:22:10 -05:00
Raul Jordan
8fecc5af7f builds 2021-11-20 01:44:35 -05:00
Raul Jordan
03e40edf2c refactor 2021-11-20 01:02:34 -05:00
Raul Jordan
7a2b8e4e6a Merge branch 'develop' into light 2021-11-01 22:14:13 -04:00
Raul Jordan
e13cdf493e empt 2021-10-31 13:25:14 -04:00
Raul Jordan
fdd9c535b4 viz 2021-10-31 13:20:46 -04:00
Raul Jordan
ba728d4929 gaz 2021-10-31 12:20:01 -05:00
Raul Jordan
cd5eb0a2ef light go 2021-10-31 13:19:18 -04:00
Raul Jordan
6d44428e9c build protos 2021-10-31 12:18:53 -05:00
Raul Jordan
cefb5cec55 dep 2021-10-31 12:06:51 -05:00
Raul Jordan
815debee38 p 2021-10-31 13:00:50 -04:00
Raul Jordan
fc6c17cc75 p 2021-10-31 13:00:18 -04:00
Raul Jordan
df0e9fa3d7 dep 2021-10-31 12:59:35 -04:00
Raul Jordan
0287bc65c7 add build files 2021-10-31 11:59:13 -05:00
Raul Jordan
35c3225579 rem build 2021-10-31 12:53:42 -04:00
Raul Jordan
f238f872a1 use les 2021-10-31 12:48:44 -04:00
Raul Jordan
24f105b804 light 2021-10-30 22:58:54 -04:00
Raul Jordan
f98354f59f handle public key 2021-10-30 22:52:34 -04:00
Raul Jordan
2aea4e49f4 gen 2021-10-30 22:41:33 -04:00
Raul Jordan
a8e8338973 add 2021-10-30 22:36:30 -04:00
Raul Jordan
ed78d15ed6 check nil 2021-10-30 22:20:48 -04:00
Raul Jordan
d5cf0a2e54 blk 2021-10-30 22:19:40 -04:00
Raul Jordan
b89bb3fa30 genesis 2021-10-30 22:15:07 -04:00
Raul Jordan
be722f2c5c compiles 2021-10-30 22:07:39 -04:00
Raul Jordan
09c99b25bc light server updates 2021-10-30 22:02:32 -04:00
Raul Jordan
24ff40fbf5 regen 2021-10-30 17:02:20 -05:00
Raul Jordan
d96491ffa9 service wait for sync 2021-10-30 17:59:23 -04:00
Raul Jordan
5121a50bb4 service definitions 2021-10-30 17:54:54 -04:00
Raul Jordan
1c6d914ea1 add in functional endpoints 2021-10-30 17:36:24 -04:00
Raul Jordan
71adada879 database additions 2021-10-30 17:30:28 -04:00
Raul Jordan
5790aa66e0 light updater builds 2021-10-30 17:06:37 -04:00
Raul Jordan
8e6bb39d2f add in generated 2021-10-30 15:58:39 -05:00
Raul Jordan
ad06914f45 no expr 2021-10-30 16:56:17 -04:00
Raul Jordan
259e07d5c9 Merge branch 'light' of github.com:prysmaticlabs/prysm into light 2021-10-30 16:55:31 -04:00
Raul Jordan
f6cf77acd8 fix 2021-10-30 16:55:27 -04:00
Raul Jordan
40a36fb02d workspace 2021-10-30 15:54:31 -05:00
Raul Jordan
2e65be12b8 protos 2021-10-30 16:48:22 -04:00
Raul Jordan
4a237e11bc sync dev 2021-10-30 16:28:10 -04:00
Raul Jordan
4d9eafe110 add proto 2021-10-24 22:28:16 -04:00
Raul Jordan
7b1b9a564b type align 2021-10-24 22:22:41 -04:00
Raul Jordan
341cced53f more changes to updater 2021-10-24 22:17:47 -04:00
Raul Jordan
a3ad254b78 updater changes 2021-10-21 09:57:40 -04:00
Raul Jordan
a7fc25f2e0 start les update 2021-10-18 23:00:03 -04:00
Raul Jordan
9551a6c4b8 begin updater server 2021-10-18 00:18:49 -04:00
Raul Jordan
d39113af60 add in generated ssz tree 2021-10-18 00:05:57 -04:00
Raul Jordan
d27334746b attempt 2021-10-11 00:28:24 -04:00
Raul Jordan
d00c7a0ce8 add in test 2021-10-11 00:23:29 -04:00
Raul Jordan
c7826856a5 check in ferran ssz 2021-10-11 00:15:24 -04:00
Raul Jordan
1bec9ae9e6 add in generated ssz 2021-10-11 00:11:39 -04:00
Raul Jordan
17ed9356ff client server files 2021-10-09 17:10:51 -04:00
Raul Jordan
c493290027 split up files 2021-10-09 17:08:16 -04:00
Raul Jordan
30c07a8a1a les server running 2021-10-08 17:33:26 -04:00
Raul Jordan
b7fb8a8dcd begin les 2021-10-06 13:07:52 -04:00
Raul Jordan
bd108c3244 start les 2021-10-04 09:36:41 -04:00
2845 changed files with 89876 additions and 231635 deletions

189
.bazelrc
View File

@@ -1,9 +1,9 @@
# Import bazelrc presets
import %workspace%/build/bazelrc/convenience.bazelrc
import %workspace%/build/bazelrc/correctness.bazelrc
import %workspace%/build/bazelrc/cross.bazelrc
import %workspace%/build/bazelrc/debug.bazelrc
import %workspace%/build/bazelrc/performance.bazelrc
# Print warnings for tests with inappropriate test size or timeout.
test --test_verbose_timeout_warnings
# Only build test targets when running bazel test //...
test --build_tests_only
test --test_output=errors
# E2E run with debug gotag
test:e2e --define gotags=debug
@@ -11,10 +11,26 @@ test:e2e --define gotags=debug
# Clearly indicate that coverage is enabled to disable certain nogo checks.
coverage --define=coverage_enabled=1
# Fix for rules_docker. See: https://github.com/bazelbuild/rules_docker/issues/842
build --host_force_python=PY2
test --host_force_python=PY2
run --host_force_python=PY2
# Networking is blocked for tests by default, add "requires-network" tag to your test if networking
# is required within the sandbox. Network sandboxing only works on linux.
build --sandbox_default_allow_network=false
# Stamp binaries with git information
build --workspace_status_command=./hack/workspace_status.sh
build --stamp
# Prevent PATH changes from rebuilding when switching from IDE to command line.
build --incompatible_strict_action_env
test --incompatible_strict_action_env
run --incompatible_strict_action_env
build --define blst_disabled=false
test --define blst_disabled=false
run --define blst_disabled=false
build:blst_disabled --define blst_disabled=true
@@ -25,14 +41,13 @@ build:minimal --@io_bazel_rules_go//go/config:tags=minimal
# Release flags
build:release --compilation_mode=opt
build:release --stamp
build:release --config=llvm
# LLVM compiler for building C/C++ dependencies.
build:llvm --crosstool_top=@llvm_toolchain//:toolchain
build:llvm --define compiler=llvm
build:llvm --copt -fno-sanitize=vptr,function
build:llvm --linkopt -fno-sanitize=vptr,function
# --incompatible_enable_cc_toolchain_resolution not needed after this issue is closed https://github.com/bazelbuild/bazel/issues/7260
build:llvm --incompatible_enable_cc_toolchain_resolution
build:asan --copt -fsanitize=address,undefined
build:asan --copt -fno-omit-frame-pointer
@@ -47,7 +62,20 @@ build:llvm-asan --config=llvm
build:llvm-asan --config=asan
build:llvm-asan --linkopt -fuse-ld=ld.lld
build:fuzz --@io_bazel_rules_go//go/config:tags=fuzz
build:fuzz --define=gotags=libfuzzer
build:fuzz --config=llvm-asan
build:fuzz --copt=-fsanitize=fuzzer-no-link
build:fuzz --linkopt=-fsanitize=fuzzer
build:fuzz --copt=-fno-omit-frame-pointer
build:fuzz --define=FUZZING_ENGINE=libfuzzer
build:fuzz --copt=-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
build:fuzz --linkopt -Wl,--no-as-needed
build:fuzz --define=gc_goopts=-d=libfuzzer,checkptr
build:fuzz --run_under=//tools:fuzz_wrapper
build:fuzz --compilation_mode=opt
build:fuzz --define=blst_disabled=true
test:fuzz --local_test_jobs="HOST_CPUS*.5"
# Build binary with cgo symbolizer for debugging / profiling.
build:cgo_symbolizer --config=llvm
@@ -56,6 +84,36 @@ build:cgo_symbolizer --define=USE_CGO_SYMBOLIZER=true
build:cgo_symbolizer -c dbg
build:cgo_symbolizer --define=gotags=cgosymbolizer_enabled
# multi-arch cross-compiling toolchain configs:
-----------------------------------------------
build:cross --crosstool_top=@prysm_toolchains//:multiarch_toolchain
build:cross --host_platform=@io_bazel_rules_go//go/toolchain:linux_amd64
build:cross --host_crosstool_top=@prysm_toolchains//:hostonly_toolchain
# linux_amd64 config for cross compiler toolchain, not strictly necessary since host/exec env is amd64
build:linux_amd64 --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64_cgo
# osx_amd64 config for cross compiler toolchain
build:osx_amd64 --config=cross
build:osx_amd64 --platforms=@io_bazel_rules_go//go/toolchain:darwin_amd64_cgo
build:osx_amd64 --compiler=osxcross
# windows
build:windows_amd64 --config=cross
build:windows_amd64 --platforms=@io_bazel_rules_go//go/toolchain:windows_amd64_cgo
build:windows_amd64 --compiler=mingw-w64
# linux_arm64 conifg for cross compiler toolchain
build:linux_arm64 --config=cross
build:linux_arm64 --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64_cgo
build:linux_arm64 --copt=-funsafe-math-optimizations
build:linux_arm64 --copt=-ftree-vectorize
build:linux_arm64 --copt=-fomit-frame-pointer
build:linux_arm64 --cpu=aarch64
build:linux_arm64 --compiler=clang
build:linux_arm64 --copt=-march=armv8-a
# toolchain build debug configs
#------------------------------
build:debug --sandbox_debug
@@ -63,5 +121,112 @@ build:debug --toolchain_resolution_debug
build:debug --verbose_failures
build:debug -s
# Set bazel gotag
build --define gotags=bazel
# windows debug
build:windows_amd64_debug --config=windows_amd64
build:windows_amd64_debug --config=debug
# osx_amd64 debug config
build:osx_amd64_debug --config=debug
build:osx_amd64_debug --config=osx_amd64
# linux_arm64_debug
build:linux_arm64_debug --config=linux_arm64
build:linux_arm64_debug --config=debug
# linux_amd64_debug
build:linux_amd64_debug --config=linux_amd64
build:linux_amd64_debug --config=debug
# Docker Sandbox Configs
#-----------------------
# Note all docker sandbox configs must run from a linux x86_64 host
# build:docker-sandbox --experimental_docker_image=gcr.io/prysmaticlabs/rbe-worker:latest
build:docker-sandbox --spawn_strategy=docker --strategy=Javac=docker --genrule_strategy=docker
build:docker-sandbox --define=EXECUTOR=remote
build:docker-sandbox --experimental_docker_verbose
build:docker-sandbox --experimental_enable_docker_sandbox
build:docker-sandbox --crosstool_top=@rbe_ubuntu_clang//cc:toolchain
build:docker-sandbox --host_javabase=@rbe_ubuntu_clang//java:jdk
build:docker-sandbox --javabase=@rbe_ubuntu_clang//java:jdk
build:docker-sandbox --host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
build:docker-sandbox --java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
build:docker-sandbox --extra_execution_platforms=@rbe_ubuntu_clang//config:platform
build:docker-sandbox --host_platform=@rbe_ubuntu_clang//config:platform
build:docker-sandbox --platforms=@rbe_ubuntu_clang//config:platform
build:docker-sandbox --extra_toolchains=@prysm_toolchains//:cc-toolchain-multiarch
# windows_amd64 docker sandbox build config
build:windows_amd64_docker --config=docker-sandbox --config=windows_amd64
build:windows_amd64_docker_debug --config=windows_amd64_docker --config=debug
# osx_amd64 docker sandbox build config
build:osx_amd64_docker --config=docker-sandbox --config=osx_amd64
build:osx_amd64_docker_debug --config=osx_amd64_docker --config=debug
# linux_arm64 docker sandbox build config
build:linux_arm64_docker --config=docker-sandbox --config=linux_arm64
build:linux_arm64_docker_debug --config=linux_arm64_docker --config=debug
# linux_amd64 docker sandbox build config
build:linux_amd64_docker --config=docker-sandbox --config=linux_amd64
build:linux_amd64_docker_debug --config=linux_amd64_docker --config=debug
# Remote Build Execution
#-----------------------
# Originally from https://github.com/bazelbuild/bazel-toolchains/blob/master/bazelrc/bazel-2.0.0.bazelrc
#
# Depending on how many machines are in the remote execution instance, setting
# this higher can make builds faster by allowing more jobs to run in parallel.
# Setting it too high can result in jobs that timeout, however, while waiting
# for a remote machine to execute them.
build:remote --jobs=50
# Set several flags related to specifying the platform, toolchain and java
# properties.
# These flags should only be used as is for the rbe-ubuntu16-04 container
# and need to be adapted to work with other toolchain containers.
build:remote --host_javabase=@rbe_ubuntu_clang//java:jdk
build:remote --javabase=@rbe_ubuntu_clang//java:jdk
build:remote --host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
build:remote --java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
build:remote --crosstool_top=@rbe_ubuntu_clang//cc:toolchain
build:remote --action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1
# Platform flags:
# The toolchain container used for execution is defined in the target indicated
# by "extra_execution_platforms", "host_platform" and "platforms".
# More about platforms: https://docs.bazel.build/versions/master/platforms.html
build:remote --extra_toolchains=@rbe_ubuntu_clang//config:cc-toolchain
build:remote --extra_execution_platforms=@rbe_ubuntu_clang//config:platform
build:remote --host_platform=@rbe_ubuntu_clang//config:platform
build:remote --platforms=@rbe_ubuntu_clang//config:platform
# Starting with Bazel 0.27.0 strategies do not need to be explicitly
# defined. See https://github.com/bazelbuild/bazel/issues/7480
build:remote --define=EXECUTOR=remote
# Enable remote execution so actions are performed on the remote systems.
# build:remote --remote_executor=grpcs://remotebuildexecution.googleapis.com
# Enforce stricter environment rules, which eliminates some non-hermetic
# behavior and therefore improves both the remote cache hit rate and the
# correctness and repeatability of the build.
build:remote --incompatible_strict_action_env=true
# Set a higher timeout value, just in case.
build:remote --remote_timeout=3600
# Enable authentication. This will pick up application default credentials by
# default. You can use --google_credentials=some_file.json to use a service
# account credential instead.
# build:remote --google_default_credentials=true
# Enable build without the bytes
# See: https://github.com/bazelbuild/bazel/issues/6862
build:remote --experimental_remote_download_outputs=toplevel --experimental_inmemory_jdeps_files --experimental_inmemory_dotd_files
build:remote --remote_local_fallback
# Ignore GoStdLib with remote caching
build --modify_execution_info='GoStdlib.*=+no-remote-cache'

View File

@@ -1 +1 @@
6.1.0
4.2.1

View File

@@ -14,33 +14,36 @@ build:remote-cache --remote_download_toplevel
build:remote-cache --remote_cache=grpc://bazel-remote-cache:9092
build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
build:remote-cache --remote_local_fallback
build:remote-cache --experimental_remote_cache_async
build:remote-cache --experimental_remote_merkle_tree_cache
build:remote-cache --experimental_action_cache_store_output_metadata
build:remote-cache --experimental_remote_cache_compression
# Enforce stricter environment rules, which eliminates some non-hermetic
# behavior and therefore improves both the remote cache hit rate and the
# correctness and repeatability of the build.
build:remote-cache --incompatible_strict_action_env=true
build --experimental_use_hermetic_linux_sandbox
# Import workspace options.
import %workspace%/.bazelrc
startup --host_jvm_args=-Xmx4g --host_jvm_args=-Xms2g
startup --host_jvm_args=-Xmx2g --host_jvm_args=-Xms2g
query --repository_cache=/tmp/repositorycache
query --experimental_repository_cache_hardlinks
build --repository_cache=/tmp/repositorycache
build --experimental_repository_cache_hardlinks
build --experimental_strict_action_env
build --disk_cache=/tmp/bazelbuilds
build --experimental_multi_threaded_digest
build --sandbox_tmpfs_path=/tmp
build --verbose_failures
build --announce_rc
build --show_progress_rate_limit=5
build --curses=no --color=no
build --curses=yes --color=no
build --keep_going
build --test_output=errors
build --flaky_test_attempts=5
# Disabled race detection due to unstable test results under constrained environment build kite
# build --features=race
# Disable flaky test detection for fuzzing.
test:fuzz --flaky_test_attempts=1
# Better caching
build:nostamp --nostamp
build:nostamp --workspace_status_command=./hack/workspace_status_ci.sh

View File

@@ -11,7 +11,7 @@ name = "go"
enabled = true
[analyzers.meta]
import_paths = ["github.com/prysmaticlabs/prysm/v4"]
import_paths = ["github.com/prysmaticlabs/prysm"]
[[analyzers]]
name = "test-coverage"

2
.github/CODEOWNERS vendored
View File

@@ -5,4 +5,4 @@
*.bzl @prestonvanloon
# Anyone on prylabs team can approve dependency updates.
deps.bzl @prysmaticlabs/core-team
deps.bzl @prysmaticlabs/core-team

View File

@@ -16,12 +16,12 @@ Existing issues often contain information about workarounds, resolution, or prog
### Description
<!-- ✍️ A clear and concise description of the problem or missing capability... -->
<!-- ✍️--> A clear and concise description of the problem or missing capability...
### Describe the solution you'd like
<!-- ✍️ If you have a solution in mind, please describe it. -->
<!-- ✍️--> If you have a solution in mind, please describe it.
### Describe alternatives you've considered
<!-- ✍️ Have you considered any alternative solutions or workarounds? -->
<!-- ✍️--> Have you considered any alternative solutions or workarounds?

5
.github/actions/gofmt/Dockerfile vendored Normal file
View File

@@ -0,0 +1,5 @@
FROM cytopia/gofmt
COPY entrypoint.sh /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]

12
.github/actions/gofmt/action.yml vendored Normal file
View File

@@ -0,0 +1,12 @@
name: 'Gofmt checker'
description: 'Checks that all project files have been properly formatted.'
inputs:
path:
description: 'Path to check'
required: true
default: './'
runs:
using: 'docker'
image: 'Dockerfile'
args:
- ${{ inputs.path }}

15
.github/actions/gofmt/entrypoint.sh vendored Executable file
View File

@@ -0,0 +1,15 @@
#!/bin/sh -l
set -e
cd $GITHUB_WORKSPACE
# Check if any files are not formatted.
nonformatted="$(gofmt -l $1 2>&1)"
# Return if `go fmt` passes.
[ -z "$nonformatted" ] && exit 0
# Notify of issues with formatting.
echo "Following files need to be properly formatted:"
echo "$nonformatted"
exit 1

View File

@@ -1,13 +1,13 @@
#!/bin/sh -l
set -e
export PATH="$PATH:/usr/local/go/bin"
export PATH=$PATH:/usr/local/go/bin
cd "$GITHUB_WORKSPACE"
cd $GITHUB_WORKSPACE
cp go.mod go.mod.orig
cp go.sum go.sum.orig
go mod tidy -compat=1.17
go mod tidy
echo "Checking go.mod and go.sum:"
checks=0

View File

@@ -0,0 +1,41 @@
name: Update DAppNodePackages
on:
push:
tags:
- '*'
jobs:
dappnode-update-beacon-chain:
name: Trigger a beacon-chain release
runs-on: ubuntu-latest
steps:
- name: Get latest tag
id: get_tag
run: echo ::set-output name=TAG::${GITHUB_REF/refs\/tags\//}
- name: Send dispatch event to DAppNodePackage-prysm-beacon-chain
env:
DISPATCH_REPO: dappnode/DAppNodePackage-prysm-beacon-chain
run: |
curl -v -X POST -u "${{ secrets.PAT_GITHUB }}" \
-H "Accept: application/vnd.github.everest-preview+json" \
-H "Content-Type: application/json" \
--data '{"event_type":"new_release", "client_payload": { "tag":"${{ steps.get_tag.outputs.TAG }}"}}' \
https://api.github.com/repos/$DISPATCH_REPO/dispatches
dappnode-update-validator:
name: Trigger a validator release
runs-on: ubuntu-latest
steps:
- name: Get latest tag
id: get_tag
run: echo ::set-output name=TAG::${GITHUB_REF/refs\/tags\//}
- name: Send dispatch event to DAppNodePackage validator repository
env:
DISPATCH_REPO: dappnode/DAppNodePackage-prysm-validator
run: |
curl -v -X POST -u "${{ secrets.PAT_GITHUB }}" \
-H "Accept: application/vnd.github.everest-preview+json" \
-H "Content-Type: application/json" \
--data '{"event_type":"new_release", "client_payload": { "tag":"${{ steps.get_tag.outputs.TAG }}"}}' \
https://api.github.com/repos/$DISPATCH_REPO/dispatches

View File

@@ -7,53 +7,39 @@ on:
branches: [ '*' ]
jobs:
formatting:
name: Formatting
check:
name: Check
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v1
- name: Go mod tidy checker
id: gomodtidy
uses: ./.github/actions/gomodtidy
gosec:
name: Gosec scan
runs-on: ubuntu-latest
env:
GO111MODULE: on
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Go 1.19
uses: actions/setup-go@v3
- name: Gofmt checker
id: gofmt
uses: ./.github/actions/gofmt
with:
go-version: 1.19
- name: Run Gosec Security Scanner
run: | # https://github.com/securego/gosec/issues/469
export PATH=$PATH:$(go env GOPATH)/bin
go install github.com/securego/gosec/v2/cmd/gosec@v2.12.0
gosec -exclude=G307 -exclude-dir=crypto/bls/herumi ./...
path: ./
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Go 1.19
uses: actions/setup-go@v3
- name: GoImports checker
id: goimports
uses: Jerome1337/goimports-action@v1.0.2
with:
go-version: 1.19
id: go
goimports-path: ./
- name: Gosec security scanner
uses: securego/gosec@master
with:
args: '-exclude=G307 -exclude-dir=crypto/bls/herumi ./...'
- name: Golangci-lint
uses: golangci/golangci-lint-action@v3
uses: golangci/golangci-lint-action@v2
with:
version: v1.50.1
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
args: --print-issued-lines --sort-results --no-config --timeout=10m --disable-all -E deadcode -E errcheck -E gosimple --skip-files=validator/web/site_data.go
build:
name: Build
@@ -62,7 +48,7 @@ jobs:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: 1.19
go-version: ^1.14
id: go
- name: Check out code into the Go module directory
@@ -75,14 +61,6 @@ jobs:
- name: Build
# Use blst tag to allow go and bazel builds for blst.
run: go build -v ./...
env:
CGO_CFLAGS: "-O -D__BLST_PORTABLE__"
# fuzz leverage go tag based stubs at compile time.
# Building and testing with these tags should be checked and enforced at pre-submit.
- name: Test for fuzzing
run: go test -tags=fuzz,develop ./... -test.run=^Fuzz
env:
CGO_CFLAGS: "-O -D__BLST_PORTABLE__"
# Tests run via Bazel for now...
# - name: Test

View File

@@ -1,22 +0,0 @@
name: Horusec Security Scan
on:
schedule:
# Runs cron at 16.00 UTC on
- cron: '0 0 * * SUN'
jobs:
Horusec_Scan:
name: horusec-Scan
runs-on: ubuntu-latest
if: github.ref == 'refs/heads/develop'
steps:
- name: Check out code
uses: actions/checkout@v2
with: # Required when commit authors is enabled
fetch-depth: 0
- name: Running Security Scan
run: |
curl -fsSL https://raw.githubusercontent.com/ZupIT/horusec/main/deployments/scripts/install.sh | bash -s latest
horusec start -t="10000" -p="./" -e="true" -i="**/crypto/bls/herumi/**, **/**/*_test.go, **/third_party/afl/**, **/crypto/keystore/key.go"

13
.gitignore vendored
View File

@@ -30,14 +30,9 @@ password.txt
# Dist files
dist
# libfuzzer
oom-*
crash-*
# deepsource cli
bin
# p2p metaData
metaData
# execution API authentication
jwt.hex
# manual testing
tmp

View File

@@ -1,32 +1,69 @@
run:
skip-files:
- validator/web/site_data.go
- .*_test.go
skip-dirs:
- proto
- tools/analyzers
timeout: 10m
go: '1.19'
linters-settings:
govet:
check-shadowing: true
settings:
printf:
funcs:
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf
golint:
min-confidence: 0
gocyclo:
min-complexity: 10
maligned:
suggest-new: true
dupl:
threshold: 100
goconst:
min-len: 2
min-occurrences: 2
depguard:
list-type: blacklist
packages:
# logging is allowed only by logutils.Log, logrus
# is allowed to use only in logutils package
- github.com/sirupsen/logrus
misspell:
locale: US
lll:
line-length: 140
goimports:
local-prefixes: github.com/golangci/golangci-lint
gocritic:
enabled-tags:
- performance
- style
- experimental
disabled-checks:
- wrapperFunc
linters:
disable-all: true
enable:
- gofmt
- deadcode
- goconst
- goimports
- unused
- errcheck
- gosimple
- gocognit
- dupword
- nilerr
- whitespace
- golint
- gosec
- misspell
- structcheck
- typecheck
- unparam
- varcheck
- gofmt
- unused
disable-all: true
linters-settings:
gocognit:
# TODO: We should target for < 50
min-complexity: 65
run:
skip-dirs:
- proto/
- ^contracts/
deadline: 10m
output:
print-issued-lines: true
sort-results: true
# golangci.com configuration
# https://github.com/golangci/golangci/wiki/Configuration
service:
golangci-lint-version: 1.15.0 # use the fixed version to not introduce new linters unexpectedly
prepare:
- echo "here I can run custom commands, but no preparation needed for this repo"

View File

@@ -5,22 +5,21 @@ Contact: mailto:security@prysmaticlabs.com
Encryption: openpgp4fpr:0AE0051D647BA3C1A917AF4072E33E4DF1A5036E
Encryption: openpgp4fpr:CD08DE68C60B82D3EE2A3F7D95452A701810FEDB
Encryption: openpgp4fpr:317D6E91058F8F3C2303BA7756313E44581297A6
Encryption: openpgp4fpr:79C59A585E3FD3AFFA00F5C22940A6479DA7C9EC
Preferred-Languages: en
Canonical: https://github.com/prysmaticlabs/prysm/tree/master/.well-known/security.txt
-----BEGIN PGP SIGNATURE-----
iQIzBAEBCgAdFiEECuAFHWR7o8GpF69AcuM+TfGlA24FAmGOfiYACgkQcuM+TfGl
A24YwRAAiQk3w6yzqSEggrOlNoNn04iu/rWZdn5ihkQgzACXy8XH2D1gdKLChE/X
7e5bUtgE2aCuHryQjwoKxqZakviBJFstVmHgF64rXv2zKhpqA30Mj4fI+T3zn8I+
+FpFV0TTsxNLDx+AcR1eQ1nSayO7ImUDIfOQNDDnSZZy42Bc+F+QIGKB3aH/8bpG
kT+bDTZrXvX+TE1gZTbAtZG8sH8g/zadoWEHIhfXUuYb0kTz+DRzAxoqU4j4Z4ee
1zSfFAgfJwxJP4kWD7s4xkE1sBbCgGBeD6cW/C2lbcfIei+XSizLpHW3jD9dNqh4
fLkmEspSa/LV/iXFq8nFzu/GLww4q+sQZDzzDKZyws54CrATinRitZMhzoIL0bTn
yFZVOGHosFAMEVZ36dl1Aw2+B2W6tr2CVr9c5zfV+kup5/KZH1EmT5nYY/zFwfg2
jYCFB5wmYeiyWZvuprgJXRArgVZLZaJxwWazlPVk4i/4vPvRgvfHqOwHCBe8DXy0
VHPhpewwb/ECYek1KoaNQflgR8iH2GMHkC5RjhGDAt1S0AQDtite5m4ZYt1kvO9E
k/znkv89dduhL9CKDvZvnI+DICwsTrf//4KJ8PM/qaPAJa4GvtiUU/eS/jKBivtv
OP5dZQtX6KPc9ewqqZgn622uHSezoBidgeTkdZsJ6tw2eIu0lsY=
=V7L0
iQIzBAEBCgAdFiEECuAFHWR7o8GpF69AcuM+TfGlA24FAl++klgACgkQcuM+TfGl
A27rQw/6A29p1W20J0v+h218p8XWLSUpTIGLnZTxw6KqdyVXMzlsQK0YG4G2s2AB
0LKh7Ae/Di5E0U+Z4AjUW5nc5eaCxK36GMscH9Ah0rgJwNYxEJw7/2o8ZqVT/Ip2
+56rFihRqxFZfaCNKFVuZFaL9jKewV9FKYP38ID6/SnTcrOHiu2AoAlyZGmB03p+
iT57SPRHatygeY4xb/gwcfREFWEv+VHGyBTv8A+6ABZDxyurboCFMERHzFICrbmk
8UdHxxlWZDnHAbAUyAwpERC5znx6IHXQJwF8TMtu6XY6a6axT2XBOyJDF9/mZOz+
kdkz6loX5uxaQBGLtTv6Kqf1yUGANOZ16VhHvWwL209LmHmigIVQ+qSM6c79PsW/
vrsqdz3GBsiMC5Fq2vYgnbgzpfE8Atjn0y7E+j4R7IvwOAE/Ro/b++nqnc4YqhME
P/yTcfGftaCrdSNnQCXeoV9JxpFM5Xy8KV3eexvNKbcgA/9DtgxL5i+s5ZJkUT9A
+qJvoRrRyIym32ghkHgtFJKB3PLCdobeoOVRk6EnMo9zKSiSK2rZEJW8Ccbo515D
W9qUOn3GF7lNVuUFAU/YKEdmDp/AVaViZ7vH+8aq0LC0HBkZ8XlzWnWoArS8sMhw
fX0R9g/HMgrwNte/d0mwim5lJ2Plgv60Bh4grJqwZJeWbU0zi1U=
=uW+X
-----END PGP SIGNATURE-----

View File

@@ -3,7 +3,7 @@ load("@com_github_atlassian_bazel_tools//gometalinter:def.bzl", "gometalinter")
load("@com_github_atlassian_bazel_tools//goimports:def.bzl", "goimports")
load("@io_kubernetes_build//defs:run_in_workspace.bzl", "workspace_binary")
load("@io_bazel_rules_go//go:def.bzl", "nogo")
load("@vaticle_bazel_distribution//common:rules.bzl", "assemble_targz", "assemble_versioned")
load("@graknlabs_bazel_distribution//common:rules.bzl", "assemble_targz", "assemble_versioned")
load("@bazel_skylib//rules:common_settings.bzl", "string_setting")
prefix = "github.com/prysmaticlabs/prysm"
@@ -12,12 +12,10 @@ exports_files([
"LICENSE.md",
])
# gazelle:prefix github.com/prysmaticlabs/prysm/v4
# gazelle:prefix github.com/prysmaticlabs/prysm
# gazelle:map_kind go_library go_library @prysm//tools/go:def.bzl
# gazelle:map_kind go_test go_test @prysm//tools/go:def.bzl
# gazelle:map_kind go_repository go_repository @prysm//tools/go:def.bzl
# gazelle:build_tags bazel
# gazelle:exclude tools/analyzers/**/testdata/**
gazelle(
name = "gazelle",
prefix = prefix,
@@ -88,54 +86,50 @@ nogo(
config = "nogo_config.json",
visibility = ["//visibility:public"],
deps = [
"@org_golang_x_tools//go/analysis/passes/unsafeptr:go_default_library",
"@org_golang_x_tools//go/analysis/passes/unreachable:go_default_library",
"@org_golang_x_tools//go/analysis/passes/unmarshal:go_default_library",
"@org_golang_x_tools//go/analysis/passes/tests:go_default_library",
"@org_golang_x_tools//go/analysis/passes/structtag:go_default_library",
"@org_golang_x_tools//go/analysis/passes/stdmethods:go_default_library",
"@org_golang_x_tools//go/analysis/passes/shift:go_default_library",
# "@org_golang_x_tools//go/analysis/passes/shadow:go_default_library",
"@org_golang_x_tools//go/analysis/passes/printf:go_default_library",
"@org_golang_x_tools//go/analysis/passes/pkgfact:go_default_library",
"@org_golang_x_tools//go/analysis/passes/nilness:go_default_library",
"@org_golang_x_tools//go/analysis/passes/nilfunc:go_default_library",
"@org_golang_x_tools//go/analysis/passes/loopclosure:go_default_library",
"@org_golang_x_tools//go/analysis/passes/httpresponse:go_default_library",
"@org_golang_x_tools//go/analysis/passes/findcall:go_default_library",
"@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_default_library",
"@org_golang_x_tools//go/analysis/passes/ctrlflow:go_default_library",
"@org_golang_x_tools//go/analysis/passes/copylock:go_default_library",
# "@org_golang_x_tools//go/analysis/passes/cgocall:go_default_library",
"@org_golang_x_tools//go/analysis/passes/buildtag:go_default_library",
"@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library",
"@org_golang_x_tools//go/analysis/passes/bools:go_default_library",
"@org_golang_x_tools//go/analysis/passes/atomicalign:go_default_library",
"@org_golang_x_tools//go/analysis/passes/atomic:go_default_library",
"@org_golang_x_tools//go/analysis/passes/assign:go_default_library",
"@org_golang_x_tools//go/analysis/passes/inspect:go_default_library",
"@org_golang_x_tools//go/analysis/passes/asmdecl:go_default_library",
"//tools/analyzers/comparesame:go_default_library",
"//tools/analyzers/cryptorand:go_default_library",
"//tools/analyzers/errcheck:go_default_library",
"//tools/analyzers/featureconfig:go_default_library",
"//tools/analyzers/gocognit:go_default_library",
"//tools/analyzers/ineffassign:go_default_library",
"//tools/analyzers/interfacechecker:go_default_library",
"//tools/analyzers/logruswitherror:go_default_library",
"//tools/analyzers/maligned:go_default_library",
"//tools/analyzers/nop:go_default_library",
"//tools/analyzers/properpermissions:go_default_library",
"//tools/analyzers/recursivelock:go_default_library",
"//tools/analyzers/shadowpredecl:go_default_library",
"//tools/analyzers/slicedirect:go_default_library",
"//tools/analyzers/uintcast:go_default_library",
"@org_golang_x_tools//go/analysis/passes/unsafeptr:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/unreachable:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/unmarshal:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/tests:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/structtag:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/stdmethods:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/shift:go_tool_library",
# "@org_golang_x_tools//go/analysis/passes/shadow:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/printf:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/pkgfact:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/nilness:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/nilfunc:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/loopclosure:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/httpresponse:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/findcall:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/ctrlflow:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/copylock:go_tool_library",
# "@org_golang_x_tools//go/analysis/passes/cgocall:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/buildtag:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/buildssa:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/bools:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/atomicalign:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/atomic:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/assign:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/inspect:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/asmdecl:go_tool_library",
"//tools/analyzers/maligned:go_tool_library",
"//tools/analyzers/cryptorand:go_tool_library",
"//tools/analyzers/errcheck:go_tool_library",
"//tools/analyzers/featureconfig:go_tool_library",
"//tools/analyzers/comparesame:go_tool_library",
"//tools/analyzers/shadowpredecl:go_tool_library",
"//tools/analyzers/nop:go_tool_library",
"//tools/analyzers/slicedirect:go_tool_library",
"//tools/analyzers/interfacechecker:go_tool_library",
"//tools/analyzers/ineffassign:go_tool_library",
"//tools/analyzers/properpermissions:go_tool_library",
] + select({
# nogo checks that fail with coverage enabled.
":coverage_enabled": [],
"//conditions:default": [
"@org_golang_x_tools//go/analysis/passes/lostcancel:go_default_library",
"@org_golang_x_tools//go/analysis/passes/composite:go_default_library",
"@org_golang_x_tools//go/analysis/passes/lostcancel:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/composite:go_tool_library",
],
}),
)
@@ -150,6 +144,15 @@ common_files = {
"//:README.md": "README.md",
}
string_setting(
name = "gotags",
build_setting_default = "",
values = [
"",
"libfuzzer",
],
)
sh_binary(
name = "prysm_sh",
srcs = ["prysm.sh"],

View File

@@ -190,7 +190,7 @@ Anyone can become a part-time contributor and help out on implementing Ethereum
Core contributors are remote contractors of Prysmatic Labs, LLC. and are considered critical team members of our organization. Core devs have all of the responsibilities of part-time contributors plus the majority of the following:
- Stay up to date on the latest beacon chain specification
- Stay up to date on the latest beacon chain sepcification
- Monitor github issues and PRs to make sure owner, labels, descriptions are correct
- Formulate independent ideas, suggest new work to do, point out improvements to existing approaches
- Participate in code review, ensure code quality is excellent, and have ensure high code coverage

View File

@@ -1,4 +1,4 @@
# Dependency Management in Prysm
# Dependency Managagement in Prysm
Prysm is go project with many complicated dependencies, including some c++ based libraries. There
are two parts to Prysm's dependency management. Go modules and bazel managed dependencies. Be sure
@@ -28,7 +28,7 @@ including complicated c++ dependencies.
One key advantage of Bazel over vanilla `go build` is that Bazel automatically (re)builds generated
pb.go files at build time when file changes are present in any protobuf definition file or after
any updates to the protobuf compiler or other relevant dependencies. Vanilla go users should run
the following scripts often to ensure their generated files are up to date. Furthermore, Prysm
the following scripts often to ensure their generated files are up to date. Further more, Prysm
generates SSZ marshal related code based on defined data structures. These generated files must
also be updated and checked in as frequently.

View File

@@ -26,18 +26,15 @@ You can use `bazel run //tools/genesis-state-gen` to create a deterministic gene
### Usage
- **--genesis-time** uint: Unix timestamp used as the genesis time in the generated genesis state (defaults to now)
- **--mainnet-config** bool: Select whether genesis state should be generated with mainnet or minimal (default) params
- **--num-validators** int: Number of validators to deterministically include in the generated genesis state
- **--output-ssz** string: Output filename of the SSZ marshaling of the generated genesis state
- **--config-name=interop** string: name of the beacon chain config to use when generating the state. ex mainnet|minimal|interop
**deprecated flag: use --config-name instead**
- **--mainnet-config** bool: Select whether genesis state should be generated with mainnet or minimal (default) params
The example below creates 64 validator keys, instantiates a genesis state with those 64 validators and with genesis unix timestamp 1567542540,
and finally writes a ssz encoded output to ~/Desktop/genesis.ssz. This file can be used to kickstart the beacon chain in the next section. When using the `--interop-*` flags, the beacon node will assume the `interop` config should be used, unless a different config is specified on the command line.
and finally writes a ssz encoded output to ~/Desktop/genesis.ssz. This file can be used to kickstart the beacon chain in the next section.
```
bazel run //tools/genesis-state-gen -- --config-name interop --output-ssz ~/Desktop/genesis.ssz --num-validators 64 --genesis-time 1567542540
bazel run //tools/genesis-state-gen -- --output-ssz ~/Desktop/genesis.ssz --num-validators 64 --genesis-time 1567542540
```
## Launching a Beacon Node + Validator Client
@@ -49,10 +46,8 @@ Open up two terminal windows, run:
```
bazel run //beacon-chain -- \
--bootstrap-node= \
--deposit-contract 0x8A04d14125D0FDCDc742F4A05C051De07232EDa4 \
--datadir=/tmp/beacon-chain-interop \
--deposit-contract $(curl -s https://prylabs.net/contract) \
--force-clear-db \
--min-sync-peers=0 \
--interop-num-validators 64 \
--interop-eth1data-votes
```
@@ -74,10 +69,8 @@ Assuming you generated a `genesis.ssz` file with 64 validators, open up two term
```
bazel run //beacon-chain -- \
--bootstrap-node= \
--deposit-contract 0x8A04d14125D0FDCDc742F4A05C051De07232EDa4 \
--datadir=/tmp/beacon-chain-interop \
--deposit-contract $(curl -s https://prylabs.net/contract) \
--force-clear-db \
--min-sync-peers=0 \
--interop-genesis-state /path/to/genesis.ssz \
--interop-eth1data-votes
```

View File

@@ -2,10 +2,8 @@
[![Build status](https://badge.buildkite.com/b555891daf3614bae4284dcf365b2340cefc0089839526f096.svg?branch=master)](https://buildkite.com/prysmatic-labs/prysm)
[![Go Report Card](https://goreportcard.com/badge/github.com/prysmaticlabs/prysm)](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
[![Consensus_Spec_Version 1.3.0](https://img.shields.io/badge/Consensus%20Spec%20Version-v1.3.0-blue.svg)](https://github.com/ethereum/consensus-specs/tree/v1.3.0)
[![Execution_API_Version 1.0.0-beta.2](https://img.shields.io/badge/Execution%20API%20Version-v1.0.0.beta.2-blue.svg)](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.2/src/engine)
[![ETH2.0_Spec_Version 1.0.0](https://img.shields.io/badge/ETH2.0%20Spec%20Version-v1.0.0-blue.svg)](https://github.com/ethereum/consensus-specs/tree/v1.0.0)
[![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/CTYGPUJ)
[![GitPOAP Badge](https://public-api.gitpoap.io/v1/repo/prysmaticlabs/prysm/badge)](https://www.gitpoap.io/gh/prysmaticlabs/prysm)
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/eth2/) specification, developed by [Prysmatic Labs](https://prysmaticlabs.com). See the [Changelog](https://github.com/prysmaticlabs/prysm/releases) for details of the latest releases and upcoming breaking changes.

160
WORKSPACE
View File

@@ -4,23 +4,20 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
http_archive(
name = "rules_pkg",
sha256 = "8c20f74bca25d2d442b327ae26768c02cf3c99e93fad0381f32be9aab1967675",
name = "bazel_toolchains",
sha256 = "8e0633dfb59f704594f19ae996a35650747adc621ada5e8b9fb588f808c89cb0",
strip_prefix = "bazel-toolchains-3.7.0",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.8.1/rules_pkg-0.8.1.tar.gz",
"https://github.com/bazelbuild/rules_pkg/releases/download/0.8.1/rules_pkg-0.8.1.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/releases/download/3.7.0/bazel-toolchains-3.7.0.tar.gz",
"https://github.com/bazelbuild/bazel-toolchains/releases/download/3.7.0/bazel-toolchains-3.7.0.tar.gz",
],
)
load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
rules_pkg_dependencies()
http_archive(
name = "com_grail_bazel_toolchain",
sha256 = "b210fc8e58782ef171f428bfc850ed7179bdd805543ebd1aa144b9c93489134f",
strip_prefix = "bazel-toolchain-83e69ba9e4b4fdad0d1d057fcb87addf77c281c9",
urls = ["https://github.com/grailbio/bazel-toolchain/archive/83e69ba9e4b4fdad0d1d057fcb87addf77c281c9.tar.gz"],
sha256 = "040b9d00b8a03e8a28e38159ad0f2d0e0de625d93f453a9f226971a8c47e757b",
strip_prefix = "bazel-toolchain-5f82830f9d6a1941c3eb29683c1864ccf2862454",
urls = ["https://github.com/grailbio/bazel-toolchain/archive/5f82830f9d6a1941c3eb29683c1864ccf2862454.tar.gz"],
)
load("@com_grail_bazel_toolchain//toolchain:deps.bzl", "bazel_toolchain_dependencies")
@@ -31,7 +28,7 @@ load("@com_grail_bazel_toolchain//toolchain:rules.bzl", "llvm_toolchain")
llvm_toolchain(
name = "llvm_toolchain",
llvm_version = "13.0.1",
llvm_version = "10.0.0",
)
load("@llvm_toolchain//:toolchains.bzl", "llvm_register_toolchains")
@@ -42,6 +39,10 @@ load("@prysm//tools/cross-toolchain:prysm_toolchains.bzl", "configure_prysm_tool
configure_prysm_toolchains()
load("@prysm//tools/cross-toolchain:rbe_toolchains_config.bzl", "rbe_toolchains_config")
rbe_toolchains_config()
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
http_archive(
@@ -59,10 +60,10 @@ bazel_skylib_workspace()
http_archive(
name = "bazel_gazelle",
sha256 = "5982e5463f171da99e3bdaeff8c0f48283a7a5f396ec5282910b9e8a49c0dd7e",
sha256 = "62ca106be173579c0a167deb23358fdfe71ffa1e4cfdddf5582af26520f1c66f",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.25.0/bazel-gazelle-v0.25.0.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.25.0/bazel-gazelle-v0.25.0.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz",
],
)
@@ -75,21 +76,25 @@ http_archive(
http_archive(
name = "io_bazel_rules_docker",
sha256 = "b1e80761a8a8243d03ebca8845e9cc1ba6c82ce7c5179ce2b295cd36f7e394bf",
urls = ["https://github.com/bazelbuild/rules_docker/releases/download/v0.25.0/rules_docker-v0.25.0.tar.gz"],
sha256 = "1f4e59843b61981a96835dc4ac377ad4da9f8c334ebe5e0bb3f58f80c09735f4",
strip_prefix = "rules_docker-0.19.0",
urls = ["https://github.com/bazelbuild/rules_docker/releases/download/v0.19.0/rules_docker-v0.19.0.tar.gz"],
)
http_archive(
name = "io_bazel_rules_go",
patch_args = ["-p1"],
patches = [
# Required until https://github.com/bazelbuild/rules_go/pull/2450 merges otherwise nilness
# nogo check fails for certain third_party dependencies.
"//third_party:io_bazel_rules_go.patch",
# Expose internals of go_test for custom build transitions.
"//third_party:io_bazel_rules_go_test.patch",
],
sha256 = "6b65cb7917b4d1709f9410ffe00ecf3e160edf674b78c54a894471320862184f",
sha256 = "7c10271940c6bce577d51a075ae77728964db285dac0a46614a7934dc34303e6",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.39.0/rules_go-v0.39.0.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.39.0/rules_go-v0.39.0.zip",
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.26.0/rules_go-v0.26.0.tar.gz",
"https://github.com/bazelbuild/rules_go/releases/download/v0.26.0/rules_go-v0.26.0.tar.gz",
],
)
@@ -108,6 +113,20 @@ git_repository(
# gazelle args: -go_prefix github.com/gogo/protobuf -proto legacy
)
http_archive(
name = "fuzzit_linux",
build_file_content = "exports_files([\"fuzzit\"])",
sha256 = "9ca76ac1c22d9360936006efddf992977ebf8e4788ded8e5f9d511285c9ac774",
urls = ["https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.76/fuzzit_Linux_x86_64.zip"],
)
git_repository(
name = "graknlabs_bazel_distribution",
commit = "962f3a7e56942430c0ec120c24f9e9f2a9c2ce1a",
remote = "https://github.com/graknlabs/bazel-distribution",
shallow_since = "1569509514 +0300",
)
load(
"@io_bazel_rules_docker//repositories:repositories.bzl",
container_repositories = "repositories",
@@ -120,36 +139,32 @@ load(
"container_pull",
)
# Pulled gcr.io/distroless/cc-debian11:latest on 2022-02-23
container_pull(
name = "cc_image_base_amd64",
digest = "sha256:2a0daf90a7deb78465bfca3ef2eee6e91ce0a5706059f05d79d799a51d339523",
name = "cc_image_base",
digest = "sha256:2c4bb6b7236db0a55ec54ba8845e4031f5db2be957ac61867872bf42e56c4deb",
registry = "gcr.io",
repository = "distroless/cc-debian11",
repository = "distroless/cc",
)
# Pulled gcr.io/distroless/cc-debian11:debug on 2022-02-23
container_pull(
name = "cc_debug_image_base_amd64",
digest = "sha256:7bd596f5f200588f13a69c268eea6ce428b222b67cd7428d6a7fef95e75c052a",
name = "cc_debug_image_base",
digest = "sha256:3680c61e81f68fc00bfb5e1ec65e8e678aaafa7c5f056bc2681c29527ebbb30c",
registry = "gcr.io",
repository = "distroless/cc-debian11",
repository = "distroless/cc",
)
# Pulled from gcr.io/distroless/base-debian11:latest on 2022-02-23
container_pull(
name = "go_image_base_amd64",
digest = "sha256:34e682800774ecbd0954b1663d90238505f1ba5543692dbc75feef7dd4839e90",
name = "go_image_base",
digest = "sha256:ba7a315f86771332e76fa9c3d423ecfdbb8265879c6f1c264d6fff7d4fa460a4",
registry = "gcr.io",
repository = "distroless/base-debian11",
repository = "distroless/base",
)
# Pulled from gcr.io/distroless/base-debian11:debug on 2022-02-23
container_pull(
name = "go_debug_image_base_amd64",
digest = "sha256:0f503c6bfd207793bc416f20a35bf6b75d769a903c48f180ad73f60f7b60d7bd",
name = "go_debug_image_base",
digest = "sha256:efd8711717d9e9b5d0dbb20ea10876dab0609c923bc05321b912f9239090ca80",
registry = "gcr.io",
repository = "distroless/base-debian11",
repository = "distroless/base",
)
container_pull(
@@ -159,15 +174,35 @@ container_pull(
repository = "pinglamb/alpine-glibc",
)
container_pull(
name = "fuzzit_base",
digest = "sha256:24a39a4360b07b8f0121eb55674a2e757ab09f0baff5569332fefd227ee4338f",
registry = "gcr.io",
repository = "fuzzit-public/stretch-llvm8",
)
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
go_rules_dependencies()
go_register_toolchains(
go_version = "1.20.3",
go_version = "1.16.4",
nogo = "@//:nogo",
)
http_archive(
name = "prysm_testnet_site",
build_file_content = """
proto_library(
name = "faucet_proto",
srcs = ["src/proto/faucet.proto"],
visibility = ["//visibility:public"],
)""",
sha256 = "29742136ff9faf47343073c4569a7cf21b8ed138f726929e09e3c38ab83544f7",
strip_prefix = "prysm-testnet-site-5c711600f0a77fc553b18cf37b880eaffef4afdb",
url = "https://github.com/prestonvanloon/prysm-testnet-site/archive/5c711600f0a77fc553b18cf37b880eaffef4afdb.tar.gz",
)
http_archive(
name = "io_kubernetes_build",
sha256 = "b84fbd1173acee9d02a7d3698ad269fdf4f7aa081e9cecd40e012ad0ad8cfa2a",
@@ -187,25 +222,10 @@ filegroup(
)
""",
sha256 = "91434d5fd5e1c6eb7b0174fed2afe25e09bddf00e1e4c431db931b2cee4e7773",
url = "https://github.com/eth-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
url = "https://github.com/eth2-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
)
http_archive(
name = "eip4881_spec_tests",
build_file_content = """
filegroup(
name = "test_data",
srcs = glob([
"**/*.yaml",
]),
visibility = ["//visibility:public"],
)
""",
sha256 = "89cb659498c0d196fc9f957f8b849b2e1a5c041c3b2b3ae5432ac5c26944297e",
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.3.0"
consensus_spec_version = "v1.1.3"
bls_test_version = "v0.1.1"
@@ -221,7 +241,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "1c806e04ac5e3779032c06a6009350b3836b6809bb23812993d6ececd7047cf5",
sha256 = "e572f8c57e2dbbaeee056a600dc9d08396010dd5134a3a95e43c540470acf6f5",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -237,7 +257,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "2b42796dc5ccd9f1246032d0c17663e20f70334ff7e00325f0fc3af28cb24186",
sha256 = "7e2f62eaae9fd541690cc61d252556d0c5deb585ca1873aacbeb5b02d06f1362",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -253,7 +273,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "231e3371e81ce9acde65d2910ec4580587e74dbbcfcbd9c675e473e022deec8a",
sha256 = "05cbb89810c8acd6c57c4773ddfd167305cd4539960e9b4d7b69e1a988b35ad2",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
@@ -268,7 +288,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "219b74d95664ea7e8dfbf31162dfa206b9c0cf45919ea86db5fa0f8902977e3c",
sha256 = "0cef67b08448f7eb43bf66c464451c9e7a4852df8ef90555cca6d440e3436882",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)
@@ -299,9 +319,9 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "2701e1e1a3ec10c673fe7dbdbbe6f02c8ae8c922aebbf6e720d8c72d5458aafe",
strip_prefix = "eth2-networks-7b4897888cebef23801540236f73123e21774954",
url = "https://github.com/eth-clients/eth2-networks/archive/7b4897888cebef23801540236f73123e21774954.tar.gz",
sha256 = "9dc47bf6b14aed7fac8833e35ab83a69131b43fa5789b3256bf1ac3d4861aeb8",
strip_prefix = "eth2-networks-7fa1b868985ee24aad65567f9250cf7fa86f97b1",
url = "https://github.com/eth2-clients/eth2-networks/archive/7fa1b868985ee24aad65567f9250cf7fa86f97b1.tar.gz",
)
http_archive(
@@ -321,6 +341,16 @@ git_repository(
# Group the sources of the library so that CMake rule have access to it
all_content = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])"""
http_archive(
name = "sigp_beacon_fuzz_corpora",
build_file = "//third_party:beacon-fuzz/corpora.BUILD",
sha256 = "42993d0901a316afda45b4ba6d53c7c21f30c551dcec290a4ca131c24453d1ef",
strip_prefix = "beacon-fuzz-corpora-bac24ad78d45cc3664c0172241feac969c1ac29b",
urls = [
"https://github.com/sigp/beacon-fuzz-corpora/archive/bac24ad78d45cc3664c0172241feac969c1ac29b.tar.gz",
],
)
# External dependencies
http_archive(
@@ -332,9 +362,9 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "5006614c33e358699b4e072c649cd4c3866f7d41a691449d5156f6c6e07a4c60",
sha256 = "54ce527b83d092da01127f2e3816f4d5cfbab69354caba8537f1ea55889b6d7c",
urls = [
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v2.0.3/prysm-web-ui.tar.gz",
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.0-beta.4/prysm-web-ui.tar.gz",
],
)
@@ -347,10 +377,6 @@ load("@prysm//third_party/herumi:herumi.bzl", "bls_dependencies")
bls_dependencies()
load("@prysm//testing/endtoend:deps.bzl", "e2e_deps")
e2e_deps()
load(
"@io_bazel_rules_docker//go:image.bzl",
_go_image_repos = "repositories",

View File

@@ -1,56 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"checkpoint.go",
"client.go",
"doc.go",
"errors.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/beacon",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/rpc/apimiddleware:go_default_library",
"//beacon-chain/state:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz/detect:go_default_library",
"//io/file:go_default_library",
"//network/forks:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@org_golang_x_mod//semver:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"checkpoint_test.go",
"client_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/blocks/testing:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/ssz/detect:go_default_library",
"//network/forks:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -1,263 +0,0 @@
package beacon
import (
"context"
"fmt"
"path"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/ssz/detect"
"github.com/prysmaticlabs/prysm/v4/io/file"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time/slots"
log "github.com/sirupsen/logrus"
"golang.org/x/mod/semver"
)
// OriginData represents the BeaconState and ReadOnlySignedBeaconBlock necessary to start an empty Beacon Node
// using Checkpoint Sync.
type OriginData struct {
sb []byte
bb []byte
st state.BeaconState
b interfaces.ReadOnlySignedBeaconBlock
vu *detect.VersionedUnmarshaler
br [32]byte
sr [32]byte
}
// SaveBlock saves the downloaded block to a unique file in the given path.
// For readability and collision avoidance, the file name includes: type, config name, slot and root
func (o *OriginData) SaveBlock(dir string) (string, error) {
blockPath := path.Join(dir, fname("block", o.vu, o.b.Block().Slot(), o.br))
return blockPath, file.WriteFile(blockPath, o.BlockBytes())
}
// SaveState saves the downloaded state to a unique file in the given path.
// For readability and collision avoidance, the file name includes: type, config name, slot and root
func (o *OriginData) SaveState(dir string) (string, error) {
statePath := path.Join(dir, fname("state", o.vu, o.st.Slot(), o.sr))
return statePath, file.WriteFile(statePath, o.StateBytes())
}
// StateBytes returns the ssz-encoded bytes of the downloaded BeaconState value.
func (o *OriginData) StateBytes() []byte {
return o.sb
}
// BlockBytes returns the ssz-encoded bytes of the downloaded ReadOnlySignedBeaconBlock value.
func (o *OriginData) BlockBytes() []byte {
return o.bb
}
func fname(prefix string, vu *detect.VersionedUnmarshaler, slot primitives.Slot, root [32]byte) string {
return fmt.Sprintf("%s_%s_%s_%d-%#x.ssz", prefix, vu.Config.ConfigName, version.String(vu.Fork), slot, root)
}
// DownloadFinalizedData downloads the most recently finalized state, and the block most recently applied to that state.
// This pair can be used to initialize a new beacon node via checkpoint sync.
func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, error) {
sb, err := client.GetState(ctx, IdFinalized)
if err != nil {
return nil, err
}
vu, err := detect.FromState(sb)
if err != nil {
return nil, errors.Wrap(err, "error detecting chain config for finalized state")
}
log.Printf("detected supported config in remote finalized state, name=%s, fork=%s", vu.Config.ConfigName, version.String(vu.Fork))
s, err := vu.UnmarshalBeaconState(sb)
if err != nil {
return nil, errors.Wrap(err, "error unmarshaling finalized state to correct version")
}
if s.Slot() != s.LatestBlockHeader().Slot {
return nil, fmt.Errorf("finalized state slot does not match latest block header slot %d != %d", s.Slot(), s.LatestBlockHeader().Slot)
}
sr, err := s.HashTreeRoot(ctx)
if err != nil {
return nil, errors.Wrapf(err, "failed to compute htr for finalized state at slot=%d", s.Slot())
}
header := s.LatestBlockHeader()
header.StateRoot = sr[:]
br, err := header.HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error while computing block root using state data")
}
bb, err := client.GetBlock(ctx, IdFromRoot(br))
if err != nil {
return nil, errors.Wrapf(err, "error requesting block by root = %#x", br)
}
b, err := vu.UnmarshalBeaconBlock(bb)
if err != nil {
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
}
realBlockRoot, err := b.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block")
}
log.Printf("BeaconState slot=%d, Block slot=%d", s.Slot(), b.Block().Slot())
log.Printf("BeaconState htr=%#x, Block state_root=%#x", sr, b.Block().StateRoot())
log.Printf("BeaconState latest_block_header htr=%#x, block htr=%#x", br, realBlockRoot)
return &OriginData{
st: s,
b: b,
sb: sb,
bb: bb,
vu: vu,
br: br,
sr: sr,
}, nil
}
// WeakSubjectivityData represents the state root, block root and epoch of the BeaconState + ReadOnlySignedBeaconBlock
// that falls at the beginning of the current weak subjectivity period. These values can be used to construct
// a weak subjectivity checkpoint beacon node flag to be used for validation.
type WeakSubjectivityData struct {
BlockRoot [32]byte
StateRoot [32]byte
Epoch primitives.Epoch
}
// CheckpointString returns the standard string representation of a Checkpoint.
// The format is a hex-encoded block root, followed by the epoch of the block, separated by a colon. For example:
// "0x1c35540cac127315fabb6bf29181f2ae0de1a3fc909d2e76ba771e61312cc49a:74888"
func (wsd *WeakSubjectivityData) CheckpointString() string {
return fmt.Sprintf("%#x:%d", wsd.BlockRoot, wsd.Epoch)
}
// ComputeWeakSubjectivityCheckpoint attempts to use the prysm weak_subjectivity api
// to obtain the current weak_subjectivity checkpoint.
// For non-prysm nodes, the same computation will be performed with extra steps,
// using the head state downloaded from the beacon node api.
func ComputeWeakSubjectivityCheckpoint(ctx context.Context, client *Client) (*WeakSubjectivityData, error) {
ws, err := client.GetWeakSubjectivity(ctx)
if err != nil {
// a 404/405 is expected if querying an endpoint that doesn't support the weak subjectivity checkpoint api
if !errors.Is(err, ErrNotOK) {
return nil, errors.Wrap(err, "unexpected API response for prysm-only weak subjectivity checkpoint API")
}
// fall back to vanilla Beacon Node API method
return computeBackwardsCompatible(ctx, client)
}
log.Printf("server weak subjectivity checkpoint response - epoch=%d, block_root=%#x, state_root=%#x", ws.Epoch, ws.BlockRoot, ws.StateRoot)
return ws, nil
}
const (
prysmMinimumVersion = "v2.0.7"
prysmImplementationName = "Prysm"
)
// errUnsupportedPrysmCheckpointVersion indicates remote beacon node can't be used for checkpoint retrieval.
var errUnsupportedPrysmCheckpointVersion = errors.New("node does not meet minimum version requirements for checkpoint retrieval")
// for older endpoints or clients that do not support the weak_subjectivity api method
// we gather the necessary data for a checkpoint sync by:
// - inspecting the remote server's head state and computing the weak subjectivity epoch locally
// - requesting the state at the first slot of the epoch
// - using hash_tree_root(state.latest_block_header) to compute the block the state integrates
// - requesting that block by its root
func computeBackwardsCompatible(ctx context.Context, client *Client) (*WeakSubjectivityData, error) {
log.Print("falling back to generic checkpoint derivation, weak_subjectivity API not supported by server")
nv, err := client.GetNodeVersion(ctx)
if err != nil {
return nil, errors.Wrap(err, "unable to proceed with fallback method without confirming node version")
}
if nv.implementation == prysmImplementationName && semver.Compare(nv.semver, prysmMinimumVersion) < 0 {
return nil, errors.Wrapf(errUnsupportedPrysmCheckpointVersion, "%s < minimum (%s)", nv.semver, prysmMinimumVersion)
}
epoch, err := getWeakSubjectivityEpochFromHead(ctx, client)
if err != nil {
return nil, errors.Wrap(err, "error computing weak subjectivity epoch via head state inspection")
}
// use first slot of the epoch for the state slot
slot, err := slots.EpochStart(epoch)
if err != nil {
return nil, errors.Wrapf(err, "error computing first slot of epoch=%d", epoch)
}
log.Printf("requesting checkpoint state at slot %d", slot)
// get the state at the first slot of the epoch
sb, err := client.GetState(ctx, IdFromSlot(slot))
if err != nil {
return nil, errors.Wrapf(err, "failed to request state by slot from api, slot=%d", slot)
}
// ConfigFork is used to unmarshal the BeaconState so we can read the block root in latest_block_header
vu, err := detect.FromState(sb)
if err != nil {
return nil, errors.Wrap(err, "error detecting chain config for beacon state")
}
log.Printf("detected supported config in checkpoint state, name=%s, fork=%s", vu.Config.ConfigName, version.String(vu.Fork))
s, err := vu.UnmarshalBeaconState(sb)
if err != nil {
return nil, errors.Wrap(err, "error using detected config fork to unmarshal state bytes")
}
// compute state and block roots
sr, err := s.HashTreeRoot(ctx)
if err != nil {
return nil, errors.Wrap(err, "error computing hash_tree_root of state")
}
h := s.LatestBlockHeader()
h.StateRoot = sr[:]
br, err := h.HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error while computing block root using state data")
}
bb, err := client.GetBlock(ctx, IdFromRoot(br))
if err != nil {
return nil, errors.Wrapf(err, "error requesting block by root = %d", br)
}
b, err := vu.UnmarshalBeaconBlock(bb)
if err != nil {
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
}
br, err = b.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error computing hash_tree_root for block obtained via root")
}
return &WeakSubjectivityData{
Epoch: epoch,
BlockRoot: br,
StateRoot: sr,
}, nil
}
// this method downloads the head state, which can be used to find the correct chain config
// and use prysm's helper methods to compute the latest weak subjectivity epoch.
func getWeakSubjectivityEpochFromHead(ctx context.Context, client *Client) (primitives.Epoch, error) {
headBytes, err := client.GetState(ctx, IdHead)
if err != nil {
return 0, err
}
vu, err := detect.FromState(headBytes)
if err != nil {
return 0, errors.Wrap(err, "error detecting chain config for beacon state")
}
log.Printf("detected supported config in remote head state, name=%s, fork=%s", vu.Config.ConfigName, version.String(vu.Fork))
headState, err := vu.UnmarshalBeaconState(headBytes)
if err != nil {
return 0, errors.Wrap(err, "error unmarshaling state to correct version")
}
epoch, err := helpers.LatestWeakSubjectivityEpoch(ctx, headState, vu.Config)
if err != nil {
return 0, errors.Wrap(err, "error computing the weak subjectivity epoch from head state")
}
log.Printf("(computed client-side) weak subjectivity epoch = %d", epoch)
return epoch, nil
}

View File

@@ -1,499 +0,0 @@
package beacon
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
blocktest "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks/testing"
"github.com/prysmaticlabs/prysm/v4/network/forks"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/util"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/ssz/detect"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
type testRT struct {
rt func(*http.Request) (*http.Response, error)
}
func (rt *testRT) RoundTrip(req *http.Request) (*http.Response, error) {
if rt.rt != nil {
return rt.rt(req)
}
return nil, errors.New("RoundTripper not implemented")
}
var _ http.RoundTripper = &testRT{}
func marshalToEnvelope(val interface{}) ([]byte, error) {
raw, err := json.Marshal(val)
if err != nil {
return nil, errors.Wrap(err, "error marshaling value to place in data envelope")
}
env := struct {
Data json.RawMessage `json:"data"`
}{
Data: raw,
}
return json.Marshal(env)
}
func TestMarshalToEnvelope(t *testing.T) {
d := struct {
Version string `json:"version"`
}{
Version: "Prysm/v2.0.5 (linux amd64)",
}
encoded, err := marshalToEnvelope(d)
require.NoError(t, err)
expected := `{"data":{"version":"Prysm/v2.0.5 (linux amd64)"}}`
require.Equal(t, expected, string(encoded))
}
func TestFallbackVersionCheck(t *testing.T) {
c := &Client{
hc: &http.Client{},
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
c.hc.Transport = &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getNodeVersionPath:
res.StatusCode = http.StatusOK
b := bytes.NewBuffer(nil)
d := struct {
Version string `json:"version"`
}{
Version: "Prysm/v2.0.5 (linux amd64)",
}
encoded, err := marshalToEnvelope(d)
require.NoError(t, err)
b.Write(encoded)
res.Body = io.NopCloser(b)
case getWeakSubjectivityPath:
res.StatusCode = http.StatusNotFound
}
return res, nil
}}
ctx := context.Background()
_, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
require.ErrorIs(t, err, errUnsupportedPrysmCheckpointVersion)
}
func TestFname(t *testing.T) {
vu := &detect.VersionedUnmarshaler{
Config: params.MainnetConfig(),
Fork: version.Phase0,
}
slot := primitives.Slot(23)
prefix := "block"
var root [32]byte
copy(root[:], []byte{0x23, 0x23, 0x23})
expected := "block_mainnet_phase0_23-0x2323230000000000000000000000000000000000000000000000000000000000.ssz"
actual := fname(prefix, vu, slot, root)
require.Equal(t, expected, actual)
vu.Config = params.MinimalSpecConfig()
vu.Fork = version.Altair
slot = 17
prefix = "state"
copy(root[29:], []byte{0x17, 0x17, 0x17})
expected = "state_minimal_altair_17-0x2323230000000000000000000000000000000000000000000000000000171717.ssz"
actual = fname(prefix, vu, slot, root)
require.Equal(t, expected, actual)
}
func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) {
ctx := context.Background()
cfg := params.MainnetConfig().Copy()
epoch := cfg.AltairForkEpoch - 1
// set up checkpoint state, using the epoch that will be computed as the ws checkpoint state based on the head state
wSlot, err := slots.EpochStart(epoch)
require.NoError(t, err)
wst, err := util.NewBeaconState()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, epoch)
require.NoError(t, wst.SetFork(fork))
// set up checkpoint block
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
b, err = blocktest.SetBlockParentRoot(b, cfg.ZeroHash)
require.NoError(t, err)
b, err = blocktest.SetBlockSlot(b, wSlot)
require.NoError(t, err)
b, err = blocktest.SetProposerIndex(b, 0)
require.NoError(t, err)
// set up state header pointing at checkpoint block - this is how the block is downloaded by root
header, err := b.Header()
require.NoError(t, err)
require.NoError(t, wst.SetLatestBlockHeader(header.Header))
// order of operations can be confusing here:
// - when computing the state root, make sure block header is complete, EXCEPT the state root should be zero-value
// - before computing the block root (to match the request route), the block should include the state root
// *computed from the state with a header that does not have a state root set yet*
wRoot, err := wst.HashTreeRoot(ctx)
require.NoError(t, err)
b, err = blocktest.SetBlockStateRoot(b, wRoot)
require.NoError(t, err)
serBlock, err := b.MarshalSSZ()
require.NoError(t, err)
bRoot, err := b.Block().HashTreeRoot()
require.NoError(t, err)
wsSerialized, err := wst.MarshalSSZ()
require.NoError(t, err)
expectedWSD := WeakSubjectivityData{
BlockRoot: bRoot,
StateRoot: wRoot,
Epoch: epoch,
}
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getWeakSubjectivityPath:
res.StatusCode = http.StatusOK
cp := struct {
Epoch string `json:"epoch"`
Root string `json:"root"`
}{
Epoch: fmt.Sprintf("%d", slots.ToEpoch(b.Block().Slot())),
Root: fmt.Sprintf("%#x", bRoot),
}
wsr := struct {
Checkpoint interface{} `json:"ws_checkpoint"`
StateRoot string `json:"state_root"`
}{
Checkpoint: cp,
StateRoot: fmt.Sprintf("%#x", wRoot),
}
rb, err := marshalToEnvelope(wsr)
require.NoError(t, err)
res.Body = io.NopCloser(bytes.NewBuffer(rb))
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
}
return res, nil
}},
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
wsd, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
require.NoError(t, err)
require.Equal(t, expectedWSD.Epoch, wsd.Epoch)
require.Equal(t, expectedWSD.StateRoot, wsd.StateRoot)
require.Equal(t, expectedWSD.BlockRoot, wsd.BlockRoot)
}
// runs computeBackwardsCompatible directly
// and via ComputeWeakSubjectivityCheckpoint with a round tripper that triggers the backwards compatible code path
func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
ctx := context.Background()
cfg := params.MainnetConfig()
st, expectedEpoch := defaultTestHeadState(t, cfg)
serialized, err := st.MarshalSSZ()
require.NoError(t, err)
// set up checkpoint state, using the epoch that will be computed as the ws checkpoint state based on the head state
wSlot, err := slots.EpochStart(expectedEpoch)
require.NoError(t, err)
wst, err := util.NewBeaconState()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, cfg.GenesisEpoch)
require.NoError(t, wst.SetFork(fork))
// set up checkpoint block
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
b, err = blocktest.SetBlockParentRoot(b, cfg.ZeroHash)
require.NoError(t, err)
b, err = blocktest.SetBlockSlot(b, wSlot)
require.NoError(t, err)
b, err = blocktest.SetProposerIndex(b, 0)
require.NoError(t, err)
// set up state header pointing at checkpoint block - this is how the block is downloaded by root
header, err := b.Header()
require.NoError(t, err)
require.NoError(t, wst.SetLatestBlockHeader(header.Header))
// order of operations can be confusing here:
// - when computing the state root, make sure block header is complete, EXCEPT the state root should be zero-value
// - before computing the block root (to match the request route), the block should include the state root
// *computed from the state with a header that does not have a state root set yet*
wRoot, err := wst.HashTreeRoot(ctx)
require.NoError(t, err)
b, err = blocktest.SetBlockStateRoot(b, wRoot)
require.NoError(t, err)
serBlock, err := b.MarshalSSZ()
require.NoError(t, err)
bRoot, err := b.Block().HashTreeRoot()
require.NoError(t, err)
wsSerialized, err := wst.MarshalSSZ()
require.NoError(t, err)
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getNodeVersionPath:
res.StatusCode = http.StatusOK
b := bytes.NewBuffer(nil)
d := struct {
Version string `json:"version"`
}{
Version: "Lighthouse/v0.1.5 (Linux x86_64)",
}
encoded, err := marshalToEnvelope(d)
require.NoError(t, err)
b.Write(encoded)
res.Body = io.NopCloser(b)
case getWeakSubjectivityPath:
res.StatusCode = http.StatusNotFound
case renderGetStatePath(IdHead):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
}
return res, nil
}},
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
wsPub, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
require.NoError(t, err)
wsPriv, err := computeBackwardsCompatible(ctx, c)
require.NoError(t, err)
require.DeepEqual(t, wsPriv, wsPub)
}
func TestGetWeakSubjectivityEpochFromHead(t *testing.T) {
st, expectedEpoch := defaultTestHeadState(t, params.MainnetConfig())
serialized, err := st.MarshalSSZ()
require.NoError(t, err)
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case renderGetStatePath(IdHead):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
}
return res, nil
}},
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
actualEpoch, err := getWeakSubjectivityEpochFromHead(context.Background(), c)
require.NoError(t, err)
require.Equal(t, expectedEpoch, actualEpoch)
}
func forkForEpoch(cfg *params.BeaconChainConfig, epoch primitives.Epoch) (*ethpb.Fork, error) {
os := forks.NewOrderedSchedule(cfg)
currentVersion, err := os.VersionForEpoch(epoch)
if err != nil {
return nil, err
}
prevVersion, err := os.Previous(currentVersion)
if err != nil {
if !errors.Is(err, forks.ErrNoPreviousVersion) {
return nil, err
}
// use same version for both in the case of genesis
prevVersion = currentVersion
}
forkEpoch := cfg.ForkVersionSchedule[currentVersion]
return &ethpb.Fork{
PreviousVersion: prevVersion[:],
CurrentVersion: currentVersion[:],
Epoch: forkEpoch,
}, nil
}
func defaultTestHeadState(t *testing.T, cfg *params.BeaconChainConfig) (state.BeaconState, primitives.Epoch) {
st, err := util.NewBeaconStateAltair()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, cfg.AltairForkEpoch)
require.NoError(t, err)
require.NoError(t, st.SetFork(fork))
slot, err := slots.EpochStart(cfg.AltairForkEpoch)
require.NoError(t, err)
require.NoError(t, st.SetSlot(slot))
var validatorCount, avgBalance uint64 = 100, 35
require.NoError(t, populateValidators(cfg, st, validatorCount, avgBalance))
require.NoError(t, st.SetFinalizedCheckpoint(&ethpb.Checkpoint{
Epoch: fork.Epoch - 10,
Root: make([]byte, 32),
}))
// to see the math for this, look at helpers.LatestWeakSubjectivityEpoch
// and for the values use mainnet config values, the validatorCount and avgBalance above, and altair fork epoch
expectedEpoch := slots.ToEpoch(st.Slot()) - 224
return st, expectedEpoch
}
// TODO(10429): refactor beacon state options in testing/util to take a state.BeaconState so this can become an option
func populateValidators(cfg *params.BeaconChainConfig, st state.BeaconState, valCount, avgBalance uint64) error {
validators := make([]*ethpb.Validator, valCount)
balances := make([]uint64, len(validators))
for i := uint64(0); i < valCount; i++ {
validators[i] = &ethpb.Validator{
PublicKey: make([]byte, cfg.BLSPubkeyLength),
WithdrawalCredentials: make([]byte, 32),
EffectiveBalance: avgBalance * 1e9,
ExitEpoch: cfg.FarFutureEpoch,
}
balances[i] = validators[i].EffectiveBalance
}
if err := st.SetValidators(validators); err != nil {
return err
}
return st.SetBalances(balances)
}
func TestDownloadFinalizedData(t *testing.T) {
ctx := context.Background()
cfg := params.MainnetConfig().Copy()
// avoid the altair zone because genesis tests are easier to set up
epoch := cfg.AltairForkEpoch - 1
// set up checkpoint state, using the epoch that will be computed as the ws checkpoint state based on the head state
slot, err := slots.EpochStart(epoch)
require.NoError(t, err)
st, err := util.NewBeaconState()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, epoch)
require.NoError(t, st.SetFork(fork))
require.NoError(t, st.SetSlot(slot))
// set up checkpoint block
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
b, err = blocktest.SetBlockParentRoot(b, cfg.ZeroHash)
require.NoError(t, err)
b, err = blocktest.SetBlockSlot(b, slot)
require.NoError(t, err)
b, err = blocktest.SetProposerIndex(b, 0)
require.NoError(t, err)
// set up state header pointing at checkpoint block - this is how the block is downloaded by root
header, err := b.Header()
require.NoError(t, err)
require.NoError(t, st.SetLatestBlockHeader(header.Header))
// order of operations can be confusing here:
// - when computing the state root, make sure block header is complete, EXCEPT the state root should be zero-value
// - before computing the block root (to match the request route), the block should include the state root
// *computed from the state with a header that does not have a state root set yet*
sr, err := st.HashTreeRoot(ctx)
require.NoError(t, err)
b, err = blocktest.SetBlockStateRoot(b, sr)
require.NoError(t, err)
mb, err := b.MarshalSSZ()
require.NoError(t, err)
br, err := b.Block().HashTreeRoot()
require.NoError(t, err)
ms, err := st.MarshalSSZ()
require.NoError(t, err)
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case renderGetStatePath(IdFinalized):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(ms))
case renderGetBlockPath(IdFromRoot(br)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(mb))
default:
res.StatusCode = http.StatusInternalServerError
res.Body = io.NopCloser(bytes.NewBufferString(""))
}
return res, nil
}},
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
// sanity check before we go through checkpoint
// make sure we can download the state and unmarshal it with the VersionedUnmarshaler
sb, err := c.GetState(ctx, IdFinalized)
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(sb, ms))
vu, err := detect.FromState(sb)
require.NoError(t, err)
us, err := vu.UnmarshalBeaconState(sb)
require.NoError(t, err)
ushtr, err := us.HashTreeRoot(ctx)
require.NoError(t, err)
require.Equal(t, sr, ushtr)
expected := &OriginData{
sb: ms,
bb: mb,
br: br,
sr: sr,
}
od, err := DownloadFinalizedData(ctx, c)
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(expected.sb, od.sb))
require.Equal(t, true, bytes.Equal(expected.bb, od.bb))
require.Equal(t, expected.br, od.br)
require.Equal(t, expected.sr, od.sr)
}

View File

@@ -1,491 +0,0 @@
package beacon
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"net/url"
"path"
"regexp"
"sort"
"strconv"
"text/template"
"time"
"github.com/prysmaticlabs/prysm/v4/network/forks"
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/apimiddleware"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
log "github.com/sirupsen/logrus"
)
const (
getSignedBlockPath = "/eth/v2/beacon/blocks"
getBlockRootPath = "/eth/v1/beacon/blocks/{{.Id}}/root"
getForkForStatePath = "/eth/v1/beacon/states/{{.Id}}/fork"
getWeakSubjectivityPath = "/eth/v1/beacon/weak_subjectivity"
getForkSchedulePath = "/eth/v1/config/fork_schedule"
getConfigSpecPath = "/eth/v1/config/spec"
getStatePath = "/eth/v2/debug/beacon/states"
getNodeVersionPath = "/eth/v1/node/version"
changeBLStoExecutionPath = "/eth/v1/beacon/pool/bls_to_execution_changes"
)
// StateOrBlockId represents the block_id / state_id parameters that several of the Eth Beacon API methods accept.
// StateOrBlockId constants are defined for named identifiers, and helper methods are provided
// for slot and root identifiers. Example text from the Eth Beacon Node API documentation:
//
// "Block identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
// <slot>, <hex encoded blockRoot with 0x prefix>."
type StateOrBlockId string
const (
IdGenesis StateOrBlockId = "genesis"
IdHead StateOrBlockId = "head"
IdFinalized StateOrBlockId = "finalized"
)
var ErrMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
// IdFromRoot encodes a block root in the format expected by the API in places where a root can be used to identify
// a BeaconState or SignedBeaconBlock.
func IdFromRoot(r [32]byte) StateOrBlockId {
return StateOrBlockId(fmt.Sprintf("%#x", r))
}
// IdFromSlot encodes a Slot in the format expected by the API in places where a slot can be used to identify
// a BeaconState or SignedBeaconBlock.
func IdFromSlot(s primitives.Slot) StateOrBlockId {
return StateOrBlockId(strconv.FormatUint(uint64(s), 10))
}
// idTemplate is used to create template functions that can interpolate StateOrBlockId values.
func idTemplate(ts string) func(StateOrBlockId) string {
t := template.Must(template.New("").Parse(ts))
f := func(id StateOrBlockId) string {
b := bytes.NewBuffer(nil)
err := t.Execute(b, struct{ Id string }{Id: string(id)})
if err != nil {
panic(fmt.Sprintf("invalid idTemplate: %s", ts))
}
return b.String()
}
// run the template to ensure that it is valid
// this should happen load time (using package scoped vars) to ensure runtime errors aren't possible
_ = f(IdGenesis)
return f
}
// ClientOpt is a functional option for the Client type (http.Client wrapper)
type ClientOpt func(*Client)
// WithTimeout sets the .Timeout attribute of the wrapped http.Client.
func WithTimeout(timeout time.Duration) ClientOpt {
return func(c *Client) {
c.hc.Timeout = timeout
}
}
// Client provides a collection of helper methods for calling the Eth Beacon Node API endpoints.
type Client struct {
hc *http.Client
baseURL *url.URL
}
// NewClient constructs a new client with the provided options (ex WithTimeout).
// `host` is the base host + port used to construct request urls. This value can be
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
u, err := urlForHost(host)
if err != nil {
return nil, err
}
c := &Client{
hc: &http.Client{},
baseURL: u,
}
for _, o := range opts {
o(c)
}
return c, nil
}
func urlForHost(h string) (*url.URL, error) {
// try to parse as url (being permissive)
u, err := url.Parse(h)
if err == nil && u.Host != "" {
return u, nil
}
// try to parse as host:port
host, port, err := net.SplitHostPort(h)
if err != nil {
return nil, ErrMalformedHostname
}
return &url.URL{Host: fmt.Sprintf("%s:%s", host, port), Scheme: "http"}, nil
}
// NodeURL returns a human-readable string representation of the beacon node base url.
func (c *Client) NodeURL() string {
return c.baseURL.String()
}
type reqOption func(*http.Request)
func withSSZEncoding() reqOption {
return func(req *http.Request) {
req.Header.Set("Accept", "application/octet-stream")
}
}
// get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
func (c *Client) get(ctx context.Context, path string, opts ...reqOption) ([]byte, error) {
u := c.baseURL.ResolveReference(&url.URL{Path: path})
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
for _, o := range opts {
o(req)
}
r, err := c.hc.Do(req)
if err != nil {
return nil, err
}
defer func() {
err = r.Body.Close()
}()
if r.StatusCode != http.StatusOK {
return nil, non200Err(r)
}
b, err := io.ReadAll(r.Body)
if err != nil {
return nil, errors.Wrap(err, "error reading http response body from GetBlock")
}
return b, nil
}
func renderGetBlockPath(id StateOrBlockId) string {
return path.Join(getSignedBlockPath, string(id))
}
// GetBlock retrieves the SignedBeaconBlock for the given block id.
// Block identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
// <slot>, <hex encoded blockRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
// for the named identifiers.
// The return value contains the ssz-encoded bytes.
func (c *Client) GetBlock(ctx context.Context, blockId StateOrBlockId) ([]byte, error) {
blockPath := renderGetBlockPath(blockId)
b, err := c.get(ctx, blockPath, withSSZEncoding())
if err != nil {
return nil, errors.Wrapf(err, "error requesting state by id = %s", blockId)
}
return b, nil
}
var getBlockRootTpl = idTemplate(getBlockRootPath)
// GetBlockRoot retrieves the hash_tree_root of the BeaconBlock for the given block id.
// Block identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
// <slot>, <hex encoded blockRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
// for the named identifiers.
func (c *Client) GetBlockRoot(ctx context.Context, blockId StateOrBlockId) ([32]byte, error) {
rootPath := getBlockRootTpl(blockId)
b, err := c.get(ctx, rootPath)
if err != nil {
return [32]byte{}, errors.Wrapf(err, "error requesting block root by id = %s", blockId)
}
jsonr := &struct{ Data struct{ Root string } }{}
err = json.Unmarshal(b, jsonr)
if err != nil {
return [32]byte{}, errors.Wrap(err, "error decoding json data from get block root response")
}
rs, err := hexutil.Decode(jsonr.Data.Root)
if err != nil {
return [32]byte{}, errors.Wrap(err, fmt.Sprintf("error decoding hex-encoded value %s", jsonr.Data.Root))
}
return bytesutil.ToBytes32(rs), nil
}
var getForkTpl = idTemplate(getForkForStatePath)
// GetFork queries the Beacon Node API for the Fork from the state identified by stateId.
// Block identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
// <slot>, <hex encoded blockRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
// for the named identifiers.
func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fork, error) {
body, err := c.get(ctx, getForkTpl(stateId))
if err != nil {
return nil, errors.Wrapf(err, "error requesting fork by state id = %s", stateId)
}
fr := &forkResponse{}
dataWrapper := &struct{ Data *forkResponse }{Data: fr}
err = json.Unmarshal(body, dataWrapper)
if err != nil {
return nil, errors.Wrap(err, "error decoding json response in GetFork")
}
return fr.Fork()
}
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, error) {
body, err := c.get(ctx, getForkSchedulePath)
if err != nil {
return nil, errors.Wrap(err, "error requesting fork schedule")
}
fsr := &forkScheduleResponse{}
err = json.Unmarshal(body, fsr)
if err != nil {
return nil, err
}
ofs, err := fsr.OrderedForkSchedule()
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("problem unmarshaling %s response", getForkSchedulePath))
}
return ofs, nil
}
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
func (c *Client) GetConfigSpec(ctx context.Context) (*v1.SpecResponse, error) {
body, err := c.get(ctx, getConfigSpecPath)
if err != nil {
return nil, errors.Wrap(err, "error requesting configSpecPath")
}
fsr := &v1.SpecResponse{}
err = json.Unmarshal(body, fsr)
if err != nil {
return nil, err
}
return fsr, nil
}
type NodeVersion struct {
implementation string
semver string
systemInfo string
}
var versionRE = regexp.MustCompile(`^(\w+)/(v\d+\.\d+\.\d+[-a-zA-Z0-9]*)\s*/?(.*)$`)
func parseNodeVersion(v string) (*NodeVersion, error) {
groups := versionRE.FindStringSubmatch(v)
if len(groups) != 4 {
return nil, errors.Wrapf(ErrInvalidNodeVersion, "could not be parsed: %s", v)
}
return &NodeVersion{
implementation: groups[1],
semver: groups[2],
systemInfo: groups[3],
}, nil
}
// GetNodeVersion requests that the beacon node identify information about its implementation in a format
// similar to a HTTP User-Agent field. ex: Lighthouse/v0.1.5 (Linux x86_64)
func (c *Client) GetNodeVersion(ctx context.Context) (*NodeVersion, error) {
b, err := c.get(ctx, getNodeVersionPath)
if err != nil {
return nil, errors.Wrap(err, "error requesting node version")
}
d := struct {
Data struct {
Version string `json:"version"`
} `json:"data"`
}{}
err = json.Unmarshal(b, &d)
if err != nil {
return nil, errors.Wrapf(err, "error unmarshaling response body: %s", string(b))
}
return parseNodeVersion(d.Data.Version)
}
func renderGetStatePath(id StateOrBlockId) string {
return path.Join(getStatePath, string(id))
}
// GetState retrieves the BeaconState for the given state id.
// State identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
// <slot>, <hex encoded stateRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
// for the named identifiers.
// The return value contains the ssz-encoded bytes.
func (c *Client) GetState(ctx context.Context, stateId StateOrBlockId) ([]byte, error) {
statePath := path.Join(getStatePath, string(stateId))
b, err := c.get(ctx, statePath, withSSZEncoding())
if err != nil {
return nil, errors.Wrapf(err, "error requesting state by id = %s", stateId)
}
return b, nil
}
// GetWeakSubjectivity calls a proposed API endpoint that is unique to prysm
// This api method does the following:
// - computes weak subjectivity epoch
// - finds the highest non-skipped block preceding the epoch
// - returns the htr of the found block and returns this + the value of state_root from the block
func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData, error) {
body, err := c.get(ctx, getWeakSubjectivityPath)
if err != nil {
return nil, err
}
v := &apimiddleware.WeakSubjectivityResponse{}
err = json.Unmarshal(body, v)
if err != nil {
return nil, err
}
epoch, err := strconv.ParseUint(v.Data.Checkpoint.Epoch, 10, 64)
if err != nil {
return nil, err
}
blockRoot, err := hexutil.Decode(v.Data.Checkpoint.Root)
if err != nil {
return nil, err
}
stateRoot, err := hexutil.Decode(v.Data.StateRoot)
if err != nil {
return nil, err
}
return &WeakSubjectivityData{
Epoch: primitives.Epoch(epoch),
BlockRoot: bytesutil.ToBytes32(blockRoot),
StateRoot: bytesutil.ToBytes32(stateRoot),
}, nil
}
// SubmitChangeBLStoExecution calls a beacon API endpoint to set the withdrawal addresses based on the given signed messages.
// If the API responds with something other than OK there will be failure messages associated to the corresponding request message.
func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apimiddleware.SignedBLSToExecutionChangeJson) error {
u := c.baseURL.ResolveReference(&url.URL{Path: changeBLStoExecutionPath})
body, err := json.Marshal(request)
if err != nil {
return errors.Wrap(err, "failed to marshal JSON")
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, u.String(), bytes.NewBuffer(body))
if err != nil {
return errors.Wrap(err, "invalid format, failed to create new POST request object")
}
req.Header.Set("Content-Type", "application/json")
resp, err := c.hc.Do(req)
if err != nil {
return err
}
defer func() {
err = resp.Body.Close()
}()
if resp.StatusCode != http.StatusOK {
decoder := json.NewDecoder(resp.Body)
decoder.DisallowUnknownFields()
errorJson := &apimiddleware.IndexedVerificationFailureErrorJson{}
if err := decoder.Decode(errorJson); err != nil {
return errors.Wrapf(err, "failed to decode error JSON for %s", resp.Request.URL)
}
for _, failure := range errorJson.Failures {
w := request[failure.Index].Message
log.WithFields(log.Fields{
"validator_index": w.ValidatorIndex,
"withdrawal_address": w.ToExecutionAddress,
}).Error(failure.Message)
}
return errors.Errorf("POST error %d: %s", errorJson.Code, errorJson.Message)
}
return nil
}
// GetBLStoExecutionChanges gets all the set withdrawal messages in the node's operation pool.
// Returns a struct representation of json response.
func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*apimiddleware.BLSToExecutionChangesPoolResponseJson, error) {
body, err := c.get(ctx, changeBLStoExecutionPath)
if err != nil {
return nil, err
}
poolResponse := &apimiddleware.BLSToExecutionChangesPoolResponseJson{}
err = json.Unmarshal(body, poolResponse)
if err != nil {
return nil, err
}
return poolResponse, nil
}
func non200Err(response *http.Response) error {
bodyBytes, err := io.ReadAll(response.Body)
var body string
if err != nil {
body = "(Unable to read response body.)"
} else {
body = "response body:\n" + string(bodyBytes)
}
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
switch response.StatusCode {
case 404:
return errors.Wrap(ErrNotFound, msg)
default:
return errors.Wrap(ErrNotOK, msg)
}
}
type forkResponse struct {
PreviousVersion string `json:"previous_version"`
CurrentVersion string `json:"current_version"`
Epoch string `json:"epoch"`
}
func (f *forkResponse) Fork() (*ethpb.Fork, error) {
epoch, err := strconv.ParseUint(f.Epoch, 10, 64)
if err != nil {
return nil, err
}
cSlice, err := hexutil.Decode(f.CurrentVersion)
if err != nil {
return nil, err
}
if len(cSlice) != 4 {
return nil, fmt.Errorf("got %d byte version for CurrentVersion, expected 4 bytes. hex=%s", len(cSlice), f.CurrentVersion)
}
pSlice, err := hexutil.Decode(f.PreviousVersion)
if err != nil {
return nil, err
}
if len(pSlice) != 4 {
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(pSlice), f.PreviousVersion)
}
return &ethpb.Fork{
CurrentVersion: cSlice,
PreviousVersion: pSlice,
Epoch: primitives.Epoch(epoch),
}, nil
}
type forkScheduleResponse struct {
Data []forkResponse
}
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
ofs := make(forks.OrderedSchedule, 0)
for _, d := range fsr.Data {
epoch, err := strconv.Atoi(d.Epoch)
if err != nil {
return nil, err
}
vSlice, err := hexutil.Decode(d.CurrentVersion)
if err != nil {
return nil, err
}
if len(vSlice) != 4 {
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(vSlice), d.CurrentVersion)
}
version := bytesutil.ToBytes4(vSlice)
ofs = append(ofs, forks.ForkScheduleEntry{
Version: version,
Epoch: primitives.Epoch(uint64(epoch)),
})
}
sort.Sort(ofs)
return ofs, nil
}

View File

@@ -1,138 +0,0 @@
package beacon
import (
"net/url"
"testing"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func TestParseNodeVersion(t *testing.T) {
cases := []struct {
name string
v string
err error
nv *NodeVersion
}{
{
name: "empty string",
v: "",
err: ErrInvalidNodeVersion,
},
{
name: "Prysm as the version string",
v: "Prysm",
err: ErrInvalidNodeVersion,
},
{
name: "semver only",
v: "v2.0.6",
err: ErrInvalidNodeVersion,
},
{
name: "complete version",
v: "Prysm/v2.0.6 (linux amd64)",
nv: &NodeVersion{
implementation: "Prysm",
semver: "v2.0.6",
systemInfo: "(linux amd64)",
},
},
{
name: "nimbus version",
v: "Nimbus/v22.4.0-039bec-stateofus",
nv: &NodeVersion{
implementation: "Nimbus",
semver: "v22.4.0-039bec-stateofus",
systemInfo: "",
},
},
{
name: "teku version",
v: "teku/v22.3.2/linux-x86_64/oracle-java-11",
nv: &NodeVersion{
implementation: "teku",
semver: "v22.3.2",
systemInfo: "linux-x86_64/oracle-java-11",
},
},
{
name: "lighthouse version",
v: "Lighthouse/v2.1.1-5f628a7/x86_64-linux",
nv: &NodeVersion{
implementation: "Lighthouse",
semver: "v2.1.1-5f628a7",
systemInfo: "x86_64-linux",
},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
nv, err := parseNodeVersion(c.v)
if c.err != nil {
require.ErrorIs(t, err, c.err)
} else {
require.NoError(t, err)
require.DeepEqual(t, c.nv, nv)
}
})
}
}
func TestValidHostname(t *testing.T) {
cases := []struct {
name string
hostArg string
path string
joined string
err error
}{
{
name: "hostname without port",
hostArg: "mydomain.org",
err: ErrMalformedHostname,
},
{
name: "hostname with port",
hostArg: "mydomain.org:3500",
path: getNodeVersionPath,
joined: "http://mydomain.org:3500/eth/v1/node/version",
},
{
name: "https scheme, hostname with port",
hostArg: "https://mydomain.org:3500",
path: getNodeVersionPath,
joined: "https://mydomain.org:3500/eth/v1/node/version",
},
{
name: "http scheme, hostname without port",
hostArg: "http://mydomain.org",
path: getNodeVersionPath,
joined: "http://mydomain.org/eth/v1/node/version",
},
{
name: "http scheme, trailing slash, hostname without port",
hostArg: "http://mydomain.org/",
path: getNodeVersionPath,
joined: "http://mydomain.org/eth/v1/node/version",
},
{
name: "http scheme, hostname with basic auth creds and no port",
hostArg: "http://username:pass@mydomain.org/",
path: getNodeVersionPath,
joined: "http://username:pass@mydomain.org/eth/v1/node/version",
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
cl, err := NewClient(c.hostArg)
if c.err != nil {
require.ErrorIs(t, err, c.err)
return
}
require.NoError(t, err)
require.Equal(t, c.joined, cl.baseURL.ResolveReference(&url.URL{Path: c.path}).String())
})
}
}

View File

@@ -1,5 +0,0 @@
/*
Package beacon provides a client for interacting with the standard Eth Beacon Node API.
Interactive swagger documentation for the API is available here: https://ethereum.github.io/beacon-APIs/
*/
package beacon

View File

@@ -1,13 +0,0 @@
package beacon
import "github.com/pkg/errors"
// ErrNotOK is used to indicate when an HTTP request to the Beacon Node API failed with any non-2xx response code.
// More specific errors may be returned, but an error in reaction to a non-2xx response will always wrap ErrNotOK.
var ErrNotOK = errors.New("did not receive 2xx response from API")
// ErrNotFound specifically means that a '404 - NOT FOUND' response was received from the API.
var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
// ErrInvalidNodeVersion indicates that the /eth/v1/node/version api response format was not recognized.
var ErrInvalidNodeVersion = errors.New("invalid node version response")

View File

@@ -1,55 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"bid.go",
"client.go",
"errors.go",
"types.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/builder",
visibility = ["//visibility:public"],
deps = [
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//network:go_default_library",
"//network/authorization:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"client_test.go",
"types_test.go",
],
data = glob(["testdata/**"]),
embed = [":go_default_library"],
deps = [
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_golang_protobuf//proto:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
],
)

View File

@@ -1,201 +0,0 @@
package builder
import (
"math/big"
"github.com/pkg/errors"
ssz "github.com/prysmaticlabs/fastssz"
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/math"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
)
// SignedBid is an interface describing the method set of a signed builder bid.
type SignedBid interface {
Message() (Bid, error)
Signature() []byte
Version() int
IsNil() bool
}
// Bid is an interface describing the method set of a builder bid.
type Bid interface {
Header() (interfaces.ExecutionData, error)
Value() []byte
Pubkey() []byte
Version() int
IsNil() bool
HashTreeRoot() ([32]byte, error)
HashTreeRootWith(hh *ssz.Hasher) error
}
type signedBuilderBid struct {
p *ethpb.SignedBuilderBid
}
// WrappedSignedBuilderBid is a constructor which wraps a protobuf signed bit into an interface.
func WrappedSignedBuilderBid(p *ethpb.SignedBuilderBid) (SignedBid, error) {
w := signedBuilderBid{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
// Message --
func (b signedBuilderBid) Message() (Bid, error) {
return WrappedBuilderBid(b.p.Message)
}
// Signature --
func (b signedBuilderBid) Signature() []byte {
return b.p.Signature
}
// Version --
func (b signedBuilderBid) Version() int {
return version.Bellatrix
}
// IsNil --
func (b signedBuilderBid) IsNil() bool {
return b.p == nil
}
type signedBuilderBidCapella struct {
p *ethpb.SignedBuilderBidCapella
}
// WrappedSignedBuilderBidCapella is a constructor which wraps a protobuf signed bit into an interface.
func WrappedSignedBuilderBidCapella(p *ethpb.SignedBuilderBidCapella) (SignedBid, error) {
w := signedBuilderBidCapella{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
// Message --
func (b signedBuilderBidCapella) Message() (Bid, error) {
return WrappedBuilderBidCapella(b.p.Message)
}
// Signature --
func (b signedBuilderBidCapella) Signature() []byte {
return b.p.Signature
}
// Version --
func (b signedBuilderBidCapella) Version() int {
return version.Capella
}
// IsNil --
func (b signedBuilderBidCapella) IsNil() bool {
return b.p == nil
}
type builderBid struct {
p *ethpb.BuilderBid
}
// WrappedBuilderBid is a constructor which wraps a protobuf bid into an interface.
func WrappedBuilderBid(p *ethpb.BuilderBid) (Bid, error) {
w := builderBid{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
// Header --
func (b builderBid) Header() (interfaces.ExecutionData, error) {
return blocks.WrappedExecutionPayloadHeader(b.p.Header)
}
// Version --
func (b builderBid) Version() int {
return version.Bellatrix
}
// Value --
func (b builderBid) Value() []byte {
return b.p.Value
}
// Pubkey --
func (b builderBid) Pubkey() []byte {
return b.p.Pubkey
}
// IsNil --
func (b builderBid) IsNil() bool {
return b.p == nil
}
// HashTreeRoot --
func (b builderBid) HashTreeRoot() ([32]byte, error) {
return b.p.HashTreeRoot()
}
// HashTreeRootWith --
func (b builderBid) HashTreeRootWith(hh *ssz.Hasher) error {
return b.p.HashTreeRootWith(hh)
}
type builderBidCapella struct {
p *ethpb.BuilderBidCapella
}
// WrappedBuilderBidCapella is a constructor which wraps a protobuf bid into an interface.
func WrappedBuilderBidCapella(p *ethpb.BuilderBidCapella) (Bid, error) {
w := builderBidCapella{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
// Header returns the execution data interface.
func (b builderBidCapella) Header() (interfaces.ExecutionData, error) {
if b.p == nil {
return nil, errors.New("builder bid is nil")
}
// We have to convert big endian to little endian because the value is coming from the execution layer.
v := big.NewInt(0).SetBytes(bytesutil.ReverseByteOrder(b.p.Value))
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, math.WeiToGwei(v))
}
// Version --
func (b builderBidCapella) Version() int {
return version.Capella
}
// Value --
func (b builderBidCapella) Value() []byte {
return b.p.Value
}
// Pubkey --
func (b builderBidCapella) Pubkey() []byte {
return b.p.Pubkey
}
// IsNil --
func (b builderBidCapella) IsNil() bool {
return b.p == nil
}
// HashTreeRoot --
func (b builderBidCapella) HashTreeRoot() ([32]byte, error) {
return b.p.HashTreeRoot()
}
// HashTreeRootWith --
func (b builderBidCapella) HashTreeRootWith(hh *ssz.Hasher) error {
return b.p.HashTreeRootWith(hh)
}

View File

@@ -1,408 +0,0 @@
package builder
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"net/url"
"strings"
"text/template"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
"github.com/prysmaticlabs/prysm/v4/network"
"github.com/prysmaticlabs/prysm/v4/network/authorization"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
log "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
const (
getExecHeaderPath = "/eth/v1/builder/header/{{.Slot}}/{{.ParentHash}}/{{.Pubkey}}"
getStatus = "/eth/v1/builder/status"
postBlindedBeaconBlockPath = "/eth/v1/builder/blinded_blocks"
postRegisterValidatorPath = "/eth/v1/builder/validators"
)
var errMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
var errMalformedRequest = errors.New("required request data are missing")
var errNotBlinded = errors.New("submitted block is not blinded")
var submitBlindedBlockTimeout = 3 * time.Second
// ClientOpt is a functional option for the Client type (http.Client wrapper)
type ClientOpt func(*Client)
type observer interface {
observe(r *http.Request) error
}
func WithObserver(m observer) ClientOpt {
return func(c *Client) {
c.obvs = append(c.obvs, m)
}
}
type requestLogger struct{}
func (*requestLogger) observe(r *http.Request) (e error) {
b := bytes.NewBuffer(nil)
if r.Body == nil {
log.WithFields(log.Fields{
"body-base64": "(nil value)",
"url": r.URL.String(),
}).Info("builder http request")
return nil
}
t := io.TeeReader(r.Body, b)
defer func() {
if r.Body != nil {
e = r.Body.Close()
}
}()
body, err := io.ReadAll(t)
if err != nil {
return err
}
r.Body = io.NopCloser(b)
log.WithFields(log.Fields{
"body-base64": string(body),
"url": r.URL.String(),
}).Info("builder http request")
return nil
}
var _ observer = &requestLogger{}
// BuilderClient provides a collection of helper methods for calling Builder API endpoints.
type BuilderClient interface {
NodeURL() string
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubkey [48]byte) (SignedBid, error)
RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error)
Status(ctx context.Context) error
}
// Client provides a collection of helper methods for calling Builder API endpoints.
type Client struct {
hc *http.Client
baseURL *url.URL
obvs []observer
}
// NewClient constructs a new client with the provided options (ex WithTimeout).
// `host` is the base host + port used to construct request urls. This value can be
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
endpoint := covertEndPoint(host)
u, err := urlForHost(endpoint.Url)
if err != nil {
return nil, err
}
c := &Client{
hc: &http.Client{},
baseURL: u,
}
for _, o := range opts {
o(c)
}
return c, nil
}
func urlForHost(h string) (*url.URL, error) {
// try to parse as url (being permissive)
u, err := url.Parse(h)
if err == nil && u.Host != "" {
return u, nil
}
// try to parse as host:port
host, port, err := net.SplitHostPort(h)
if err != nil {
return nil, errMalformedHostname
}
return &url.URL{Host: net.JoinHostPort(host, port), Scheme: "http"}, nil
}
// NodeURL returns a human-readable string representation of the beacon node base url.
func (c *Client) NodeURL() string {
return c.baseURL.String()
}
type reqOption func(*http.Request)
// do is a generic, opinionated request function to reduce boilerplate amongst the methods in this package api/client/builder/types.go.
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, opts ...reqOption) (res []byte, err error) {
ctx, span := trace.StartSpan(ctx, "builder.client.do")
defer func() {
tracing.AnnotateError(span, err)
span.End()
}()
u := c.baseURL.ResolveReference(&url.URL{Path: path})
span.AddAttributes(trace.StringAttribute("url", u.String()),
trace.StringAttribute("method", method))
req, err := http.NewRequestWithContext(ctx, method, u.String(), body)
if err != nil {
return
}
req.Header.Add("User-Agent", version.BuildData())
for _, o := range opts {
o(req)
}
for _, o := range c.obvs {
if err = o.observe(req); err != nil {
return
}
}
r, err := c.hc.Do(req)
if err != nil {
return
}
defer func() {
closeErr := r.Body.Close()
if closeErr != nil {
log.WithError(closeErr).Error("Failed to close response body")
}
}()
if r.StatusCode != http.StatusOK {
err = non200Err(r)
return
}
res, err = io.ReadAll(r.Body)
if err != nil {
err = errors.Wrap(err, "error reading http response body from builder server")
return
}
return
}
var execHeaderTemplate = template.Must(template.New("").Parse(getExecHeaderPath))
func execHeaderPath(slot primitives.Slot, parentHash [32]byte, pubkey [48]byte) (string, error) {
v := struct {
Slot primitives.Slot
ParentHash string
Pubkey string
}{
Slot: slot,
ParentHash: fmt.Sprintf("%#x", parentHash),
Pubkey: fmt.Sprintf("%#x", pubkey),
}
b := bytes.NewBuffer(nil)
err := execHeaderTemplate.Execute(b, v)
if err != nil {
return "", errors.Wrapf(err, "error rendering exec header template with slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
}
return b.String(), nil
}
// GetHeader is used by a proposing validator to request an execution payload header from the Builder node.
func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubkey [48]byte) (SignedBid, error) {
path, err := execHeaderPath(slot, parentHash, pubkey)
if err != nil {
return nil, err
}
hb, err := c.do(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, err
}
v := &VersionResponse{}
if err := json.Unmarshal(hb, v); err != nil {
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
}
switch strings.ToLower(v.Version) {
case strings.ToLower(version.String(version.Capella)):
hr := &ExecHeaderResponseCapella{}
if err := json.Unmarshal(hb, hr); err != nil {
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
}
p, err := hr.ToProto()
if err != nil {
return nil, errors.Wrapf(err, "could not extract proto message from header")
}
return WrappedSignedBuilderBidCapella(p)
case strings.ToLower(version.String(version.Bellatrix)):
hr := &ExecHeaderResponse{}
if err := json.Unmarshal(hb, hr); err != nil {
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
}
p, err := hr.ToProto()
if err != nil {
return nil, errors.Wrap(err, "could not extract proto message from header")
}
return WrappedSignedBuilderBid(p)
default:
return nil, fmt.Errorf("unsupported header version %s", strings.ToLower(v.Version))
}
}
// RegisterValidator encodes the SignedValidatorRegistrationV1 message to json (including hex-encoding the byte
// fields with 0x prefixes) and posts to the builder validator registration endpoint.
func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error {
ctx, span := trace.StartSpan(ctx, "builder.client.RegisterValidator")
defer span.End()
span.AddAttributes(trace.Int64Attribute("num_reqs", int64(len(svr))))
if len(svr) == 0 {
err := errors.Wrap(errMalformedRequest, "empty validator registration list")
tracing.AnnotateError(span, err)
return err
}
vs := make([]*SignedValidatorRegistration, len(svr))
for i := 0; i < len(svr); i++ {
vs[i] = &SignedValidatorRegistration{SignedValidatorRegistrationV1: svr[i]}
}
body, err := json.Marshal(vs)
if err != nil {
err := errors.Wrap(err, "error encoding the SignedValidatorRegistration value body in RegisterValidator")
tracing.AnnotateError(span, err)
return err
}
_, err = c.do(ctx, http.MethodPost, postRegisterValidatorPath, bytes.NewBuffer(body))
return err
}
// SubmitBlindedBlock calls the builder API endpoint that binds the validator to the builder and submits the block.
// The response is the full execution payload used to create the blinded block.
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error) {
if !sb.IsBlinded() {
return nil, errNotBlinded
}
switch sb.Version() {
case version.Bellatrix:
psb, err := sb.PbBlindedBellatrixBlock()
if err != nil {
return nil, errors.Wrapf(err, "could not get protobuf block")
}
b := &SignedBlindedBeaconBlockBellatrix{SignedBlindedBeaconBlockBellatrix: psb}
body, err := json.Marshal(b)
if err != nil {
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockBellatrix value body in SubmitBlindedBlock")
}
ctx, cancel := context.WithTimeout(ctx, submitBlindedBlockTimeout)
defer cancel()
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Bellatrix))
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
if err != nil {
return nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockBellatrix to the builder api")
}
ep := &ExecPayloadResponse{}
if err := json.Unmarshal(rb, ep); err != nil {
return nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlock response")
}
if strings.ToLower(ep.Version) != version.String(version.Bellatrix) {
return nil, errors.New("not a bellatrix payload")
}
p, err := ep.ToProto()
if err != nil {
return nil, errors.Wrapf(err, "could not extract proto message from payload")
}
return blocks.WrappedExecutionPayload(p)
case version.Capella:
psb, err := sb.PbBlindedCapellaBlock()
if err != nil {
return nil, errors.Wrapf(err, "could not get protobuf block")
}
b := &SignedBlindedBeaconBlockCapella{SignedBlindedBeaconBlockCapella: psb}
body, err := json.Marshal(b)
if err != nil {
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockCapella value body in SubmitBlindedBlockCapella")
}
ctx, cancel := context.WithTimeout(ctx, submitBlindedBlockTimeout)
defer cancel()
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Capella))
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
if err != nil {
return nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockCapella to the builder api")
}
ep := &ExecPayloadResponseCapella{}
if err := json.Unmarshal(rb, ep); err != nil {
return nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockCapella response")
}
if strings.ToLower(ep.Version) != version.String(version.Capella) {
return nil, errors.New("not a capella payload")
}
p, err := ep.ToProto()
if err != nil {
return nil, errors.Wrapf(err, "could not extract proto message from payload")
}
return blocks.WrappedExecutionPayloadCapella(p, 0)
default:
return nil, fmt.Errorf("unsupported block version %s", version.String(sb.Version()))
}
}
// Status asks the remote builder server for a health check. A response of 200 with an empty body is the success/healthy
// response, and an error response may have an error message. This method will return a nil value for error in the
// happy path, and an error with information about the server response body for a non-200 response.
func (c *Client) Status(ctx context.Context) error {
_, err := c.do(ctx, http.MethodGet, getStatus, nil)
return err
}
func non200Err(response *http.Response) error {
bodyBytes, err := io.ReadAll(response.Body)
var errMessage ErrorMessage
var body string
if err != nil {
body = "(Unable to read response body.)"
} else {
body = "response body:\n" + string(bodyBytes)
}
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
switch response.StatusCode {
case 204:
log.WithError(ErrNoContent).Debug(msg)
return ErrNoContent
case 400:
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
return errors.Wrap(jsonErr, "unable to read response body")
}
log.WithError(ErrBadRequest).Debug(msg)
return errors.Wrap(ErrBadRequest, errMessage.Message)
case 404:
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
return errors.Wrap(jsonErr, "unable to read response body")
}
log.WithError(ErrNotFound).Debug(msg)
return errors.Wrap(ErrNotFound, errMessage.Message)
case 500:
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
return errors.Wrap(jsonErr, "unable to read response body")
}
log.WithError(ErrNotOK).Debug(msg)
return errors.Wrap(ErrNotOK, errMessage.Message)
default:
log.WithError(ErrNotOK).Debug(msg)
return errors.Wrap(ErrNotOK, fmt.Sprintf("unsupported error code: %d", response.StatusCode))
}
}
func covertEndPoint(ep string) network.Endpoint {
return network.Endpoint{
Url: ep,
Auth: network.AuthorizationData{ // Auth is not used for builder.
Method: authorization.None,
Value: "",
}}
}

View File

@@ -1,648 +0,0 @@
package builder
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"math/big"
"net/http"
"net/url"
"strconv"
"testing"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
type roundtrip func(*http.Request) (*http.Response, error)
func (fn roundtrip) RoundTrip(r *http.Request) (*http.Response, error) {
return fn(r)
}
func TestClient_Status(t *testing.T) {
ctx := context.Background()
statusPath := "/eth/v1/builder/status"
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
defer func() {
if r.Body == nil {
return
}
require.NoError(t, r.Body.Close())
}()
require.Equal(t, statusPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBuffer(nil)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
require.NoError(t, c.Status(ctx))
hc = &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
defer func() {
if r.Body == nil {
return
}
require.NoError(t, r.Body.Close())
}()
require.Equal(t, statusPath, r.URL.Path)
message := ErrorMessage{
Code: 500,
Message: "Internal server error",
}
resp, err := json.Marshal(message)
require.NoError(t, err)
return &http.Response{
StatusCode: http.StatusInternalServerError,
Body: io.NopCloser(bytes.NewBuffer(resp)),
Request: r.Clone(ctx),
}, nil
}),
}
c = &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
require.ErrorIs(t, c.Status(ctx), ErrNotOK)
}
func TestClient_RegisterValidator(t *testing.T) {
ctx := context.Background()
expectedBody := `[{"message":{"fee_recipient":"0x0000000000000000000000000000000000000000","gas_limit":"23","timestamp":"42","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}]`
expectedPath := "/eth/v1/builder/validators"
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
body, err := io.ReadAll(r.Body)
defer func() {
require.NoError(t, r.Body.Close())
}()
require.NoError(t, err)
require.Equal(t, expectedBody, string(body))
require.Equal(t, expectedPath, r.URL.Path)
require.Equal(t, http.MethodPost, r.Method)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBuffer(nil)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
reg := &eth.SignedValidatorRegistrationV1{
Message: &eth.ValidatorRegistrationV1{
FeeRecipient: ezDecode(t, params.BeaconConfig().EthBurnAddressHex),
GasLimit: 23,
Timestamp: 42,
Pubkey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
}
require.NoError(t, c.RegisterValidator(ctx, []*eth.SignedValidatorRegistrationV1{reg}))
}
func TestClient_GetHeader(t *testing.T) {
ctx := context.Background()
expectedPath := "/eth/v1/builder/header/23/0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2/0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
var slot types.Slot = 23
parentHash := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
pubkey := ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
t.Run("server error", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)
message := ErrorMessage{
Code: 500,
Message: "Internal server error",
}
resp, err := json.Marshal(message)
require.NoError(t, err)
return &http.Response{
StatusCode: http.StatusInternalServerError,
Body: io.NopCloser(bytes.NewBuffer(resp)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
_, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.ErrorIs(t, err, ErrNotOK)
})
t.Run("header not available", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusNoContent,
Body: io.NopCloser(bytes.NewBuffer([]byte("No header is available."))),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
_, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.ErrorIs(t, err, ErrNoContent)
})
t.Run("bellatrix", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponse)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
h, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.NoError(t, err)
expectedSig := ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505")
require.Equal(t, true, bytes.Equal(expectedSig, h.Signature()))
expectedTxRoot := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
bid, err := h.Message()
require.NoError(t, err)
bidHeader, err := bid.Header()
require.NoError(t, err)
withdrawalsRoot, err := bidHeader.TransactionsRoot()
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(expectedTxRoot, withdrawalsRoot))
require.Equal(t, uint64(1), bidHeader.GasUsed())
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
bidValue := bytesutil.ReverseByteOrder(bid.Value())
require.DeepEqual(t, bidValue, value.Bytes())
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
})
t.Run("capella", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponseCapella)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
h, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.NoError(t, err)
expectedWithdrawalsRoot := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
bid, err := h.Message()
require.NoError(t, err)
bidHeader, err := bid.Header()
require.NoError(t, err)
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
bidValue := bytesutil.ReverseByteOrder(bid.Value())
require.DeepEqual(t, bidValue, value.Bytes())
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
})
t.Run("unsupported version", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponseUnknownVersion)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
_, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.ErrorContains(t, "unsupported header version", err)
})
}
func TestSubmitBlindedBlock(t *testing.T) {
ctx := context.Background()
t.Run("bellatrix", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayload)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
require.NoError(t, err)
ep, err := c.SubmitBlindedBlock(ctx, sbbb)
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"), ep.ParentHash()))
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
require.Equal(t, fmt.Sprintf("%#x", bfpg.SSZBytes()), fmt.Sprintf("%#x", ep.BaseFeePerGas()))
require.Equal(t, uint64(1), ep.GasLimit())
})
t.Run("capella", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "capella", r.Header.Get("Eth-Consensus-Version"))
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadCapella)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
sbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockCapella(t))
require.NoError(t, err)
ep, err := c.SubmitBlindedBlock(ctx, sbb)
require.NoError(t, err)
withdrawals, err := ep.Withdrawals()
require.NoError(t, err)
require.Equal(t, 1, len(withdrawals))
assert.Equal(t, uint64(1), withdrawals[0].Index)
assert.Equal(t, types.ValidatorIndex(1), withdrawals[0].ValidatorIndex)
assert.DeepEqual(t, ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943"), withdrawals[0].Address)
assert.Equal(t, uint64(1), withdrawals[0].Amount)
})
t.Run("mismatched versions, expected bellatrix got capella", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadCapella)), // send a Capella payload
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
require.NoError(t, err)
_, err = c.SubmitBlindedBlock(ctx, sbbb)
require.ErrorContains(t, "not a bellatrix payload", err)
})
t.Run("not blinded", func(t *testing.T) {
sbb, err := blocks.NewSignedBeaconBlock(&eth.SignedBeaconBlockBellatrix{Block: &eth.BeaconBlockBellatrix{Body: &eth.BeaconBlockBodyBellatrix{}}})
require.NoError(t, err)
_, err = (&Client{}).SubmitBlindedBlock(ctx, sbb)
require.ErrorIs(t, err, errNotBlinded)
})
}
func testSignedBlindedBeaconBlockBellatrix(t *testing.T) *eth.SignedBlindedBeaconBlockBellatrix {
return &eth.SignedBlindedBeaconBlockBellatrix{
Block: &eth.BlindedBeaconBlockBellatrix{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Body: &eth.BlindedBeaconBlockBodyBellatrix{
RandaoReveal: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
Eth1Data: &eth.Eth1Data{
DepositRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
DepositCount: 1,
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Graffiti: ezDecode(t, "0xdeadbeefc0ffee"),
ProposerSlashings: []*eth.ProposerSlashing{
{
Header_1: &eth.SignedBeaconBlockHeader{
Header: &eth.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
Header_2: &eth.SignedBeaconBlockHeader{
Header: &eth.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
},
AttesterSlashings: []*eth.AttesterSlashing{
{
Attestation_1: &eth.IndexedAttestation{
AttestingIndices: []uint64{1},
Data: &eth.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Source: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Target: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
Attestation_2: &eth.IndexedAttestation{
AttestingIndices: []uint64{1},
Data: &eth.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Source: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Target: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
},
Attestations: []*eth.Attestation{
{
AggregationBits: bitfield.Bitlist{0x01},
Data: &eth.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Source: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Target: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
Deposits: []*eth.Deposit{
{
Proof: [][]byte{ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")},
Data: &eth.Deposit_Data{
PublicKey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
WithdrawalCredentials: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Amount: 1,
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
},
VoluntaryExits: []*eth.SignedVoluntaryExit{
{
Exit: &eth.VoluntaryExit{
Epoch: 1,
ValidatorIndex: 1,
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
SyncAggregate: &eth.SyncAggregate{
SyncCommitteeSignature: make([]byte, 48),
SyncCommitteeBits: bitfield.Bitvector512{0x01},
},
ExecutionPayloadHeader: &v1.ExecutionPayloadHeader{
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
ReceiptsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
LogsBloom: ezDecode(t, "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
PrevRandao: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BlockNumber: 1,
GasLimit: 1,
GasUsed: 1,
Timestamp: 1,
ExtraData: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BaseFeePerGas: []byte(strconv.FormatUint(1, 10)),
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
}
}
func testSignedBlindedBeaconBlockCapella(t *testing.T) *eth.SignedBlindedBeaconBlockCapella {
return &eth.SignedBlindedBeaconBlockCapella{
Block: &eth.BlindedBeaconBlockCapella{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Body: &eth.BlindedBeaconBlockBodyCapella{
RandaoReveal: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
Eth1Data: &eth.Eth1Data{
DepositRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
DepositCount: 1,
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Graffiti: ezDecode(t, "0xdeadbeefc0ffee"),
ProposerSlashings: []*eth.ProposerSlashing{
{
Header_1: &eth.SignedBeaconBlockHeader{
Header: &eth.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
Header_2: &eth.SignedBeaconBlockHeader{
Header: &eth.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
},
AttesterSlashings: []*eth.AttesterSlashing{
{
Attestation_1: &eth.IndexedAttestation{
AttestingIndices: []uint64{1},
Data: &eth.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Source: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Target: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
Attestation_2: &eth.IndexedAttestation{
AttestingIndices: []uint64{1},
Data: &eth.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Source: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Target: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
},
Attestations: []*eth.Attestation{
{
AggregationBits: bitfield.Bitlist{0x01},
Data: &eth.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Source: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Target: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
Deposits: []*eth.Deposit{
{
Proof: [][]byte{ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")},
Data: &eth.Deposit_Data{
PublicKey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
WithdrawalCredentials: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Amount: 1,
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
},
VoluntaryExits: []*eth.SignedVoluntaryExit{
{
Exit: &eth.VoluntaryExit{
Epoch: 1,
ValidatorIndex: 1,
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
SyncAggregate: &eth.SyncAggregate{
SyncCommitteeSignature: make([]byte, 48),
SyncCommitteeBits: bitfield.Bitvector512{0x01},
},
ExecutionPayloadHeader: &v1.ExecutionPayloadHeaderCapella{
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
ReceiptsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
LogsBloom: ezDecode(t, "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
PrevRandao: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BlockNumber: 1,
GasLimit: 1,
GasUsed: 1,
Timestamp: 1,
ExtraData: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BaseFeePerGas: []byte(strconv.FormatUint(1, 10)),
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
WithdrawalsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
}
}
func TestRequestLogger(t *testing.T) {
wo := WithObserver(&requestLogger{})
c, err := NewClient("localhost:3500", wo)
require.NoError(t, err)
ctx := context.Background()
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, getStatus, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayload)),
Request: r.Clone(ctx),
}, nil
}),
}
c.hc = hc
err = c.Status(ctx)
require.NoError(t, err)
}

View File

@@ -1,17 +0,0 @@
package builder
import "github.com/pkg/errors"
// ErrNotOK is used to indicate when an HTTP request to the Beacon Node API failed with any non-2xx response code.
// More specific errors may be returned, but an error in reaction to a non-2xx response will always wrap ErrNotOK.
var ErrNotOK = errors.New("did not receive 200 response from API")
// ErrNotFound specifically means that a '404 - NOT FOUND' response was received from the API.
var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
// ErrBadRequest specifically means that a '400 - BAD REQUEST' response was received from the API.
var ErrBadRequest = errors.Wrap(ErrNotOK, "recv 400 BadRequest response from API")
// ErrNoContent specifically means that a '204 - No Content' response was received from the API.
// Typically, a 204 is a success but in this case for the Header API means No header is available
var ErrNoContent = errors.New("recv 204 no content response from API, No header is available")

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -1 +0,0 @@
{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"14074904626401341155369551180448584754667373453244490859944217516317499064576","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","withdrawals_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}

View File

@@ -1 +0,0 @@
{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"14074904626401341155369551180448584754667373453244490859944217516317499064576","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}

View File

@@ -1,15 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["mock.go"],
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/builder/testing",
visibility = ["//visibility:public"],
deps = [
"//api/client/builder:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
],
)

View File

@@ -1,50 +0,0 @@
package testing
import (
"context"
"github.com/prysmaticlabs/prysm/v4/api/client/builder"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
// MockClient is a mock implementation of BuilderClient.
type MockClient struct {
RegisteredVals map[[48]byte]bool
}
// NewClient creates a new, correctly initialized mock.
func NewClient() MockClient {
return MockClient{RegisteredVals: map[[48]byte]bool{}}
}
// NodeURL --
func (MockClient) NodeURL() string {
return ""
}
// GetHeader --
func (MockClient) GetHeader(_ context.Context, _ primitives.Slot, _ [32]byte, _ [48]byte) (builder.SignedBid, error) {
return nil, nil
}
// RegisterValidator --
func (m MockClient) RegisterValidator(_ context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error {
for _, r := range svr {
b := bytesutil.ToBytes48(r.Message.Pubkey)
m.RegisteredVals[b] = true
}
return nil
}
// SubmitBlindedBlock --
func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error) {
return nil, nil
}
// Status --
func (MockClient) Status(_ context.Context) error {
return nil
}

View File

@@ -1,963 +0,0 @@
package builder
import (
"encoding/json"
"fmt"
"math/big"
"strconv"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
type SignedValidatorRegistration struct {
*eth.SignedValidatorRegistrationV1
}
type ValidatorRegistration struct {
*eth.ValidatorRegistrationV1
}
func (r *SignedValidatorRegistration) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Message *ValidatorRegistration `json:"message"`
Signature hexutil.Bytes `json:"signature"`
}{
Message: &ValidatorRegistration{r.Message},
Signature: r.SignedValidatorRegistrationV1.Signature,
})
}
func (r *SignedValidatorRegistration) UnmarshalJSON(b []byte) error {
if r.SignedValidatorRegistrationV1 == nil {
r.SignedValidatorRegistrationV1 = &eth.SignedValidatorRegistrationV1{}
}
o := struct {
Message *ValidatorRegistration `json:"message"`
Signature hexutil.Bytes `json:"signature"`
}{}
if err := json.Unmarshal(b, &o); err != nil {
return err
}
r.Message = o.Message.ValidatorRegistrationV1
r.Signature = o.Signature
return nil
}
func (r *ValidatorRegistration) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
GasLimit string `json:"gas_limit"`
Timestamp string `json:"timestamp"`
Pubkey hexutil.Bytes `json:"pubkey"`
}{
FeeRecipient: r.FeeRecipient,
GasLimit: fmt.Sprintf("%d", r.GasLimit),
Timestamp: fmt.Sprintf("%d", r.Timestamp),
Pubkey: r.Pubkey,
})
}
func (r *ValidatorRegistration) UnmarshalJSON(b []byte) error {
if r.ValidatorRegistrationV1 == nil {
r.ValidatorRegistrationV1 = &eth.ValidatorRegistrationV1{}
}
o := struct {
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
GasLimit string `json:"gas_limit"`
Timestamp string `json:"timestamp"`
Pubkey hexutil.Bytes `json:"pubkey"`
}{}
if err := json.Unmarshal(b, &o); err != nil {
return err
}
r.FeeRecipient = o.FeeRecipient
r.Pubkey = o.Pubkey
var err error
if r.GasLimit, err = strconv.ParseUint(o.GasLimit, 10, 64); err != nil {
return errors.Wrap(err, "failed to parse gas limit")
}
if r.Timestamp, err = strconv.ParseUint(o.Timestamp, 10, 64); err != nil {
return errors.Wrap(err, "failed to parse timestamp")
}
return nil
}
var errInvalidUint256 = errors.New("invalid Uint256")
var errDecodeUint256 = errors.New("unable to decode into Uint256")
type Uint256 struct {
*big.Int
}
func isValidUint256(bi *big.Int) bool {
return bi.Cmp(big.NewInt(0)) >= 0 && bi.BitLen() <= 256
}
func stringToUint256(s string) (Uint256, error) {
bi := new(big.Int)
_, ok := bi.SetString(s, 10)
if !ok || !isValidUint256(bi) {
return Uint256{}, errors.Wrapf(errDecodeUint256, "value=%s", s)
}
return Uint256{Int: bi}, nil
}
// sszBytesToUint256 creates a Uint256 from a ssz-style (little-endian byte slice) representation.
func sszBytesToUint256(b []byte) (Uint256, error) {
bi := bytesutil.LittleEndianBytesToBigInt(b)
if !isValidUint256(bi) {
return Uint256{}, errors.Wrapf(errDecodeUint256, "value=%s", b)
}
return Uint256{Int: bi}, nil
}
// SSZBytes creates an ssz-style (little-endian byte slice) representation of the Uint256
func (s Uint256) SSZBytes() []byte {
if !isValidUint256(s.Int) {
return []byte{}
}
return bytesutil.PadTo(bytesutil.ReverseByteOrder(s.Int.Bytes()), 32)
}
func (s *Uint256) UnmarshalJSON(t []byte) error {
start := 0
end := len(t)
if t[0] == '"' {
start += 1
}
if t[end-1] == '"' {
end -= 1
}
return s.UnmarshalText(t[start:end])
}
func (s *Uint256) UnmarshalText(t []byte) error {
if s.Int == nil {
s.Int = big.NewInt(0)
}
z, ok := s.SetString(string(t), 10)
if !ok {
return errors.Wrapf(errDecodeUint256, "value=%s", t)
}
if !isValidUint256(z) {
return errors.Wrapf(errDecodeUint256, "value=%s", t)
}
s.Int = z
return nil
}
func (s Uint256) MarshalJSON() ([]byte, error) {
t, err := s.MarshalText()
if err != nil {
return nil, err
}
t = append([]byte{'"'}, t...)
t = append(t, '"')
return t, nil
}
func (s Uint256) MarshalText() ([]byte, error) {
if !isValidUint256(s.Int) {
return nil, errors.Wrapf(errInvalidUint256, "value=%s", s.Int)
}
return []byte(s.String()), nil
}
type Uint64String uint64
func (s *Uint64String) UnmarshalText(t []byte) error {
u, err := strconv.ParseUint(string(t), 10, 64)
*s = Uint64String(u)
return err
}
func (s Uint64String) MarshalText() ([]byte, error) {
return []byte(fmt.Sprintf("%d", s)), nil
}
type VersionResponse struct {
Version string `json:"version"`
}
type ExecHeaderResponse struct {
Version string `json:"version"`
Data struct {
Signature hexutil.Bytes `json:"signature"`
Message *BuilderBid `json:"message"`
} `json:"data"`
}
func (ehr *ExecHeaderResponse) ToProto() (*eth.SignedBuilderBid, error) {
bb, err := ehr.Data.Message.ToProto()
if err != nil {
return nil, err
}
return &eth.SignedBuilderBid{
Message: bb,
Signature: ehr.Data.Signature,
}, nil
}
func (bb *BuilderBid) ToProto() (*eth.BuilderBid, error) {
header, err := bb.Header.ToProto()
if err != nil {
return nil, err
}
return &eth.BuilderBid{
Header: header,
Value: bb.Value.SSZBytes(),
Pubkey: bb.Pubkey,
}, nil
}
func (h *ExecutionPayloadHeader) ToProto() (*v1.ExecutionPayloadHeader, error) {
return &v1.ExecutionPayloadHeader{
ParentHash: h.ParentHash,
FeeRecipient: h.FeeRecipient,
StateRoot: h.StateRoot,
ReceiptsRoot: h.ReceiptsRoot,
LogsBloom: h.LogsBloom,
PrevRandao: h.PrevRandao,
BlockNumber: uint64(h.BlockNumber),
GasLimit: uint64(h.GasLimit),
GasUsed: uint64(h.GasUsed),
Timestamp: uint64(h.Timestamp),
ExtraData: h.ExtraData,
BaseFeePerGas: h.BaseFeePerGas.SSZBytes(),
BlockHash: h.BlockHash,
TransactionsRoot: h.TransactionsRoot,
}, nil
}
type BuilderBid struct {
Header *ExecutionPayloadHeader `json:"header"`
Value Uint256 `json:"value"`
Pubkey hexutil.Bytes `json:"pubkey"`
}
type ExecutionPayloadHeader struct {
ParentHash hexutil.Bytes `json:"parent_hash"`
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
StateRoot hexutil.Bytes `json:"state_root"`
ReceiptsRoot hexutil.Bytes `json:"receipts_root"`
LogsBloom hexutil.Bytes `json:"logs_bloom"`
PrevRandao hexutil.Bytes `json:"prev_randao"`
BlockNumber Uint64String `json:"block_number"`
GasLimit Uint64String `json:"gas_limit"`
GasUsed Uint64String `json:"gas_used"`
Timestamp Uint64String `json:"timestamp"`
ExtraData hexutil.Bytes `json:"extra_data"`
BaseFeePerGas Uint256 `json:"base_fee_per_gas"`
BlockHash hexutil.Bytes `json:"block_hash"`
TransactionsRoot hexutil.Bytes `json:"transactions_root"`
*v1.ExecutionPayloadHeader
}
func (h *ExecutionPayloadHeader) MarshalJSON() ([]byte, error) {
type MarshalCaller ExecutionPayloadHeader
baseFeePerGas, err := sszBytesToUint256(h.ExecutionPayloadHeader.BaseFeePerGas)
if err != nil {
return []byte{}, errors.Wrapf(err, "invalid BaseFeePerGas")
}
return json.Marshal(&MarshalCaller{
ParentHash: h.ExecutionPayloadHeader.ParentHash,
FeeRecipient: h.ExecutionPayloadHeader.FeeRecipient,
StateRoot: h.ExecutionPayloadHeader.StateRoot,
ReceiptsRoot: h.ExecutionPayloadHeader.ReceiptsRoot,
LogsBloom: h.ExecutionPayloadHeader.LogsBloom,
PrevRandao: h.ExecutionPayloadHeader.PrevRandao,
BlockNumber: Uint64String(h.ExecutionPayloadHeader.BlockNumber),
GasLimit: Uint64String(h.ExecutionPayloadHeader.GasLimit),
GasUsed: Uint64String(h.ExecutionPayloadHeader.GasUsed),
Timestamp: Uint64String(h.ExecutionPayloadHeader.Timestamp),
ExtraData: h.ExecutionPayloadHeader.ExtraData,
BaseFeePerGas: baseFeePerGas,
BlockHash: h.ExecutionPayloadHeader.BlockHash,
TransactionsRoot: h.ExecutionPayloadHeader.TransactionsRoot,
})
}
func (h *ExecutionPayloadHeader) UnmarshalJSON(b []byte) error {
type UnmarshalCaller ExecutionPayloadHeader
uc := &UnmarshalCaller{}
if err := json.Unmarshal(b, uc); err != nil {
return err
}
ep := ExecutionPayloadHeader(*uc)
*h = ep
var err error
h.ExecutionPayloadHeader, err = h.ToProto()
return err
}
type ExecPayloadResponse struct {
Version string `json:"version"`
Data ExecutionPayload `json:"data"`
}
type ExecutionPayload struct {
ParentHash hexutil.Bytes `json:"parent_hash"`
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
StateRoot hexutil.Bytes `json:"state_root"`
ReceiptsRoot hexutil.Bytes `json:"receipts_root"`
LogsBloom hexutil.Bytes `json:"logs_bloom"`
PrevRandao hexutil.Bytes `json:"prev_randao"`
BlockNumber Uint64String `json:"block_number"`
GasLimit Uint64String `json:"gas_limit"`
GasUsed Uint64String `json:"gas_used"`
Timestamp Uint64String `json:"timestamp"`
ExtraData hexutil.Bytes `json:"extra_data"`
BaseFeePerGas Uint256 `json:"base_fee_per_gas"`
BlockHash hexutil.Bytes `json:"block_hash"`
Transactions []hexutil.Bytes `json:"transactions"`
}
func (r *ExecPayloadResponse) ToProto() (*v1.ExecutionPayload, error) {
return r.Data.ToProto()
}
func (p *ExecutionPayload) ToProto() (*v1.ExecutionPayload, error) {
txs := make([][]byte, len(p.Transactions))
for i := range p.Transactions {
txs[i] = p.Transactions[i]
}
return &v1.ExecutionPayload{
ParentHash: p.ParentHash,
FeeRecipient: p.FeeRecipient,
StateRoot: p.StateRoot,
ReceiptsRoot: p.ReceiptsRoot,
LogsBloom: p.LogsBloom,
PrevRandao: p.PrevRandao,
BlockNumber: uint64(p.BlockNumber),
GasLimit: uint64(p.GasLimit),
GasUsed: uint64(p.GasUsed),
Timestamp: uint64(p.Timestamp),
ExtraData: p.ExtraData,
BaseFeePerGas: p.BaseFeePerGas.SSZBytes(),
BlockHash: p.BlockHash,
Transactions: txs,
}, nil
}
type ExecHeaderResponseCapella struct {
Data struct {
Signature hexutil.Bytes `json:"signature"`
Message *BuilderBidCapella `json:"message"`
} `json:"data"`
}
func (ehr *ExecHeaderResponseCapella) ToProto() (*eth.SignedBuilderBidCapella, error) {
bb, err := ehr.Data.Message.ToProto()
if err != nil {
return nil, err
}
return &eth.SignedBuilderBidCapella{
Message: bb,
Signature: ehr.Data.Signature,
}, nil
}
func (bb *BuilderBidCapella) ToProto() (*eth.BuilderBidCapella, error) {
header, err := bb.Header.ToProto()
if err != nil {
return nil, err
}
return &eth.BuilderBidCapella{
Header: header,
Value: bb.Value.SSZBytes(),
Pubkey: bb.Pubkey,
}, nil
}
func (h *ExecutionPayloadHeaderCapella) ToProto() (*v1.ExecutionPayloadHeaderCapella, error) {
return &v1.ExecutionPayloadHeaderCapella{
ParentHash: h.ParentHash,
FeeRecipient: h.FeeRecipient,
StateRoot: h.StateRoot,
ReceiptsRoot: h.ReceiptsRoot,
LogsBloom: h.LogsBloom,
PrevRandao: h.PrevRandao,
BlockNumber: uint64(h.BlockNumber),
GasLimit: uint64(h.GasLimit),
GasUsed: uint64(h.GasUsed),
Timestamp: uint64(h.Timestamp),
ExtraData: h.ExtraData,
BaseFeePerGas: h.BaseFeePerGas.SSZBytes(),
BlockHash: h.BlockHash,
TransactionsRoot: h.TransactionsRoot,
WithdrawalsRoot: h.WithdrawalsRoot,
}, nil
}
type BuilderBidCapella struct {
Header *ExecutionPayloadHeaderCapella `json:"header"`
Value Uint256 `json:"value"`
Pubkey hexutil.Bytes `json:"pubkey"`
}
type ExecutionPayloadHeaderCapella struct {
ParentHash hexutil.Bytes `json:"parent_hash"`
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
StateRoot hexutil.Bytes `json:"state_root"`
ReceiptsRoot hexutil.Bytes `json:"receipts_root"`
LogsBloom hexutil.Bytes `json:"logs_bloom"`
PrevRandao hexutil.Bytes `json:"prev_randao"`
BlockNumber Uint64String `json:"block_number"`
GasLimit Uint64String `json:"gas_limit"`
GasUsed Uint64String `json:"gas_used"`
Timestamp Uint64String `json:"timestamp"`
ExtraData hexutil.Bytes `json:"extra_data"`
BaseFeePerGas Uint256 `json:"base_fee_per_gas"`
BlockHash hexutil.Bytes `json:"block_hash"`
TransactionsRoot hexutil.Bytes `json:"transactions_root"`
WithdrawalsRoot hexutil.Bytes `json:"withdrawals_root"`
*v1.ExecutionPayloadHeaderCapella
}
func (h *ExecutionPayloadHeaderCapella) MarshalJSON() ([]byte, error) {
type MarshalCaller ExecutionPayloadHeaderCapella
baseFeePerGas, err := sszBytesToUint256(h.ExecutionPayloadHeaderCapella.BaseFeePerGas)
if err != nil {
return []byte{}, errors.Wrapf(err, "invalid BaseFeePerGas")
}
return json.Marshal(&MarshalCaller{
ParentHash: h.ExecutionPayloadHeaderCapella.ParentHash,
FeeRecipient: h.ExecutionPayloadHeaderCapella.FeeRecipient,
StateRoot: h.ExecutionPayloadHeaderCapella.StateRoot,
ReceiptsRoot: h.ExecutionPayloadHeaderCapella.ReceiptsRoot,
LogsBloom: h.ExecutionPayloadHeaderCapella.LogsBloom,
PrevRandao: h.ExecutionPayloadHeaderCapella.PrevRandao,
BlockNumber: Uint64String(h.ExecutionPayloadHeaderCapella.BlockNumber),
GasLimit: Uint64String(h.ExecutionPayloadHeaderCapella.GasLimit),
GasUsed: Uint64String(h.ExecutionPayloadHeaderCapella.GasUsed),
Timestamp: Uint64String(h.ExecutionPayloadHeaderCapella.Timestamp),
ExtraData: h.ExecutionPayloadHeaderCapella.ExtraData,
BaseFeePerGas: baseFeePerGas,
BlockHash: h.ExecutionPayloadHeaderCapella.BlockHash,
TransactionsRoot: h.ExecutionPayloadHeaderCapella.TransactionsRoot,
WithdrawalsRoot: h.ExecutionPayloadHeaderCapella.WithdrawalsRoot,
})
}
func (h *ExecutionPayloadHeaderCapella) UnmarshalJSON(b []byte) error {
type UnmarshalCaller ExecutionPayloadHeaderCapella
uc := &UnmarshalCaller{}
if err := json.Unmarshal(b, uc); err != nil {
return err
}
ep := ExecutionPayloadHeaderCapella(*uc)
*h = ep
var err error
h.ExecutionPayloadHeaderCapella, err = h.ToProto()
return err
}
type ExecPayloadResponseCapella struct {
Version string `json:"version"`
Data ExecutionPayloadCapella `json:"data"`
}
type ExecutionPayloadCapella struct {
ParentHash hexutil.Bytes `json:"parent_hash"`
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
StateRoot hexutil.Bytes `json:"state_root"`
ReceiptsRoot hexutil.Bytes `json:"receipts_root"`
LogsBloom hexutil.Bytes `json:"logs_bloom"`
PrevRandao hexutil.Bytes `json:"prev_randao"`
BlockNumber Uint64String `json:"block_number"`
GasLimit Uint64String `json:"gas_limit"`
GasUsed Uint64String `json:"gas_used"`
Timestamp Uint64String `json:"timestamp"`
ExtraData hexutil.Bytes `json:"extra_data"`
BaseFeePerGas Uint256 `json:"base_fee_per_gas"`
BlockHash hexutil.Bytes `json:"block_hash"`
Transactions []hexutil.Bytes `json:"transactions"`
Withdrawals []Withdrawal `json:"withdrawals"`
}
func (r *ExecPayloadResponseCapella) ToProto() (*v1.ExecutionPayloadCapella, error) {
return r.Data.ToProto()
}
func (p *ExecutionPayloadCapella) ToProto() (*v1.ExecutionPayloadCapella, error) {
txs := make([][]byte, len(p.Transactions))
for i := range p.Transactions {
txs[i] = p.Transactions[i]
}
withdrawals := make([]*v1.Withdrawal, len(p.Withdrawals))
for i, w := range p.Withdrawals {
withdrawals[i] = &v1.Withdrawal{
Index: w.Index.Uint64(),
ValidatorIndex: types.ValidatorIndex(w.ValidatorIndex.Uint64()),
Address: w.Address,
Amount: w.Amount.Uint64(),
}
}
return &v1.ExecutionPayloadCapella{
ParentHash: p.ParentHash,
FeeRecipient: p.FeeRecipient,
StateRoot: p.StateRoot,
ReceiptsRoot: p.ReceiptsRoot,
LogsBloom: p.LogsBloom,
PrevRandao: p.PrevRandao,
BlockNumber: uint64(p.BlockNumber),
GasLimit: uint64(p.GasLimit),
GasUsed: uint64(p.GasUsed),
Timestamp: uint64(p.Timestamp),
ExtraData: p.ExtraData,
BaseFeePerGas: p.BaseFeePerGas.SSZBytes(),
BlockHash: p.BlockHash,
Transactions: txs,
Withdrawals: withdrawals,
}, nil
}
type Withdrawal struct {
Index Uint256 `json:"index"`
ValidatorIndex Uint256 `json:"validator_index"`
Address hexutil.Bytes `json:"address"`
Amount Uint256 `json:"amount"`
}
type SignedBlindedBeaconBlockBellatrix struct {
*eth.SignedBlindedBeaconBlockBellatrix
}
type BlindedBeaconBlockBellatrix struct {
*eth.BlindedBeaconBlockBellatrix
}
type BlindedBeaconBlockBodyBellatrix struct {
*eth.BlindedBeaconBlockBodyBellatrix
}
func (r *SignedBlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Message *BlindedBeaconBlockBellatrix `json:"message"`
Signature hexutil.Bytes `json:"signature"`
}{
Message: &BlindedBeaconBlockBellatrix{r.SignedBlindedBeaconBlockBellatrix.Block},
Signature: r.SignedBlindedBeaconBlockBellatrix.Signature,
})
}
func (b *BlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
ParentRoot hexutil.Bytes `json:"parent_root"`
StateRoot hexutil.Bytes `json:"state_root"`
Body *BlindedBeaconBlockBodyBellatrix `json:"body"`
}{
Slot: fmt.Sprintf("%d", b.Slot),
ProposerIndex: fmt.Sprintf("%d", b.ProposerIndex),
ParentRoot: b.ParentRoot,
StateRoot: b.StateRoot,
Body: &BlindedBeaconBlockBodyBellatrix{b.BlindedBeaconBlockBellatrix.Body},
})
}
type ProposerSlashing struct {
*eth.ProposerSlashing
}
func (s *ProposerSlashing) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
SignedHeader1 *SignedBeaconBlockHeader `json:"signed_header_1"`
SignedHeader2 *SignedBeaconBlockHeader `json:"signed_header_2"`
}{
SignedHeader1: &SignedBeaconBlockHeader{s.ProposerSlashing.Header_1},
SignedHeader2: &SignedBeaconBlockHeader{s.ProposerSlashing.Header_2},
})
}
type SignedBeaconBlockHeader struct {
*eth.SignedBeaconBlockHeader
}
func (h *SignedBeaconBlockHeader) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Header *BeaconBlockHeader `json:"message"`
Signature hexutil.Bytes `json:"signature"`
}{
Header: &BeaconBlockHeader{h.SignedBeaconBlockHeader.Header},
Signature: h.SignedBeaconBlockHeader.Signature,
})
}
type BeaconBlockHeader struct {
*eth.BeaconBlockHeader
}
func (h *BeaconBlockHeader) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
ParentRoot hexutil.Bytes `json:"parent_root"`
StateRoot hexutil.Bytes `json:"state_root"`
BodyRoot hexutil.Bytes `json:"body_root"`
}{
Slot: fmt.Sprintf("%d", h.BeaconBlockHeader.Slot),
ProposerIndex: fmt.Sprintf("%d", h.BeaconBlockHeader.ProposerIndex),
ParentRoot: h.BeaconBlockHeader.ParentRoot,
StateRoot: h.BeaconBlockHeader.StateRoot,
BodyRoot: h.BeaconBlockHeader.BodyRoot,
})
}
type IndexedAttestation struct {
*eth.IndexedAttestation
}
func (a *IndexedAttestation) MarshalJSON() ([]byte, error) {
indices := make([]string, len(a.IndexedAttestation.AttestingIndices))
for i := range a.IndexedAttestation.AttestingIndices {
indices[i] = fmt.Sprintf("%d", a.AttestingIndices[i])
}
return json.Marshal(struct {
AttestingIndices []string `json:"attesting_indices"`
Data *AttestationData `json:"data"`
Signature hexutil.Bytes `json:"signature"`
}{
AttestingIndices: indices,
Data: &AttestationData{a.IndexedAttestation.Data},
Signature: a.IndexedAttestation.Signature,
})
}
type AttesterSlashing struct {
*eth.AttesterSlashing
}
func (s *AttesterSlashing) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Attestation1 *IndexedAttestation `json:"attestation_1"`
Attestation2 *IndexedAttestation `json:"attestation_2"`
}{
Attestation1: &IndexedAttestation{s.Attestation_1},
Attestation2: &IndexedAttestation{s.Attestation_2},
})
}
type Checkpoint struct {
*eth.Checkpoint
}
func (c *Checkpoint) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Epoch string `json:"epoch"`
Root hexutil.Bytes `json:"root"`
}{
Epoch: fmt.Sprintf("%d", c.Checkpoint.Epoch),
Root: c.Checkpoint.Root,
})
}
type AttestationData struct {
*eth.AttestationData
}
func (a *AttestationData) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Slot string `json:"slot"`
Index string `json:"index"`
BeaconBlockRoot hexutil.Bytes `json:"beacon_block_root"`
Source *Checkpoint `json:"source"`
Target *Checkpoint `json:"target"`
}{
Slot: fmt.Sprintf("%d", a.AttestationData.Slot),
Index: fmt.Sprintf("%d", a.AttestationData.CommitteeIndex),
BeaconBlockRoot: a.AttestationData.BeaconBlockRoot,
Source: &Checkpoint{a.AttestationData.Source},
Target: &Checkpoint{a.AttestationData.Target},
})
}
type Attestation struct {
*eth.Attestation
}
func (a *Attestation) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
AggregationBits hexutil.Bytes `json:"aggregation_bits"`
Data *AttestationData `json:"data"`
Signature hexutil.Bytes `json:"signature" ssz-size:"96"`
}{
AggregationBits: hexutil.Bytes(a.Attestation.AggregationBits),
Data: &AttestationData{a.Attestation.Data},
Signature: a.Attestation.Signature,
})
}
type DepositData struct {
*eth.Deposit_Data
}
func (d *DepositData) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
PublicKey hexutil.Bytes `json:"pubkey"`
WithdrawalCredentials hexutil.Bytes `json:"withdrawal_credentials"`
Amount string `json:"amount"`
Signature hexutil.Bytes `json:"signature"`
}{
PublicKey: d.PublicKey,
WithdrawalCredentials: d.WithdrawalCredentials,
Amount: fmt.Sprintf("%d", d.Amount),
Signature: d.Signature,
})
}
type Deposit struct {
*eth.Deposit
}
func (d *Deposit) MarshalJSON() ([]byte, error) {
proof := make([]hexutil.Bytes, len(d.Proof))
for i := range d.Proof {
proof[i] = d.Proof[i]
}
return json.Marshal(struct {
Proof []hexutil.Bytes `json:"proof"`
Data *DepositData `json:"data"`
}{
Proof: proof,
Data: &DepositData{Deposit_Data: d.Deposit.Data},
})
}
type SignedVoluntaryExit struct {
*eth.SignedVoluntaryExit
}
func (sve *SignedVoluntaryExit) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Message *VoluntaryExit `json:"message"`
Signature hexutil.Bytes `json:"signature"`
}{
Signature: sve.SignedVoluntaryExit.Signature,
Message: &VoluntaryExit{sve.SignedVoluntaryExit.Exit},
})
}
type VoluntaryExit struct {
*eth.VoluntaryExit
}
func (ve *VoluntaryExit) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Epoch string `json:"epoch"`
ValidatorIndex string `json:"validator_index"`
}{
Epoch: fmt.Sprintf("%d", ve.Epoch),
ValidatorIndex: fmt.Sprintf("%d", ve.ValidatorIndex),
})
}
type SyncAggregate struct {
*eth.SyncAggregate
}
func (s *SyncAggregate) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
SyncCommitteeBits hexutil.Bytes `json:"sync_committee_bits"`
SyncCommitteeSignature hexutil.Bytes `json:"sync_committee_signature"`
}{
SyncCommitteeBits: hexutil.Bytes(s.SyncAggregate.SyncCommitteeBits),
SyncCommitteeSignature: s.SyncAggregate.SyncCommitteeSignature,
})
}
type Eth1Data struct {
*eth.Eth1Data
}
func (e *Eth1Data) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
DepositRoot hexutil.Bytes `json:"deposit_root"`
DepositCount string `json:"deposit_count"`
BlockHash hexutil.Bytes `json:"block_hash"`
}{
DepositRoot: e.DepositRoot,
DepositCount: fmt.Sprintf("%d", e.DepositCount),
BlockHash: e.BlockHash,
})
}
func (b *BlindedBeaconBlockBodyBellatrix) MarshalJSON() ([]byte, error) {
sve := make([]*SignedVoluntaryExit, len(b.BlindedBeaconBlockBodyBellatrix.VoluntaryExits))
for i := range b.BlindedBeaconBlockBodyBellatrix.VoluntaryExits {
sve[i] = &SignedVoluntaryExit{SignedVoluntaryExit: b.BlindedBeaconBlockBodyBellatrix.VoluntaryExits[i]}
}
deps := make([]*Deposit, len(b.BlindedBeaconBlockBodyBellatrix.Deposits))
for i := range b.BlindedBeaconBlockBodyBellatrix.Deposits {
deps[i] = &Deposit{Deposit: b.BlindedBeaconBlockBodyBellatrix.Deposits[i]}
}
atts := make([]*Attestation, len(b.BlindedBeaconBlockBodyBellatrix.Attestations))
for i := range b.BlindedBeaconBlockBodyBellatrix.Attestations {
atts[i] = &Attestation{Attestation: b.BlindedBeaconBlockBodyBellatrix.Attestations[i]}
}
atsl := make([]*AttesterSlashing, len(b.BlindedBeaconBlockBodyBellatrix.AttesterSlashings))
for i := range b.BlindedBeaconBlockBodyBellatrix.AttesterSlashings {
atsl[i] = &AttesterSlashing{AttesterSlashing: b.BlindedBeaconBlockBodyBellatrix.AttesterSlashings[i]}
}
pros := make([]*ProposerSlashing, len(b.BlindedBeaconBlockBodyBellatrix.ProposerSlashings))
for i := range b.BlindedBeaconBlockBodyBellatrix.ProposerSlashings {
pros[i] = &ProposerSlashing{ProposerSlashing: b.BlindedBeaconBlockBodyBellatrix.ProposerSlashings[i]}
}
return json.Marshal(struct {
RandaoReveal hexutil.Bytes `json:"randao_reveal"`
Eth1Data *Eth1Data `json:"eth1_data"`
Graffiti hexutil.Bytes `json:"graffiti"`
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
AttesterSlashings []*AttesterSlashing `json:"attester_slashings"`
Attestations []*Attestation `json:"attestations"`
Deposits []*Deposit `json:"deposits"`
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
ExecutionPayloadHeader *ExecutionPayloadHeader `json:"execution_payload_header"`
}{
RandaoReveal: b.RandaoReveal,
Eth1Data: &Eth1Data{b.BlindedBeaconBlockBodyBellatrix.Eth1Data},
Graffiti: b.BlindedBeaconBlockBodyBellatrix.Graffiti,
ProposerSlashings: pros,
AttesterSlashings: atsl,
Attestations: atts,
Deposits: deps,
VoluntaryExits: sve,
SyncAggregate: &SyncAggregate{b.BlindedBeaconBlockBodyBellatrix.SyncAggregate},
ExecutionPayloadHeader: &ExecutionPayloadHeader{ExecutionPayloadHeader: b.BlindedBeaconBlockBodyBellatrix.ExecutionPayloadHeader},
})
}
type SignedBLSToExecutionChange struct {
*eth.SignedBLSToExecutionChange
}
func (ch *SignedBLSToExecutionChange) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Message *BLSToExecutionChange `json:"message"`
Signature hexutil.Bytes `json:"signature"`
}{
Signature: ch.Signature,
Message: &BLSToExecutionChange{ch.Message},
})
}
type BLSToExecutionChange struct {
*eth.BLSToExecutionChange
}
func (ch *BLSToExecutionChange) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
ValidatorIndex string `json:"validator_index"`
FromBlsPubkey hexutil.Bytes `json:"from_bls_pubkey"`
ToExecutionAddress hexutil.Bytes `json:"to_execution_address"`
}{
ValidatorIndex: fmt.Sprintf("%d", ch.ValidatorIndex),
FromBlsPubkey: ch.FromBlsPubkey,
ToExecutionAddress: ch.ToExecutionAddress,
})
}
type SignedBlindedBeaconBlockCapella struct {
*eth.SignedBlindedBeaconBlockCapella
}
type BlindedBeaconBlockCapella struct {
*eth.BlindedBeaconBlockCapella
}
type BlindedBeaconBlockBodyCapella struct {
*eth.BlindedBeaconBlockBodyCapella
}
func (b *SignedBlindedBeaconBlockCapella) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Message *BlindedBeaconBlockCapella `json:"message"`
Signature hexutil.Bytes `json:"signature"`
}{
Message: &BlindedBeaconBlockCapella{b.Block},
Signature: b.Signature,
})
}
func (b *BlindedBeaconBlockCapella) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
ParentRoot hexutil.Bytes `json:"parent_root"`
StateRoot hexutil.Bytes `json:"state_root"`
Body *BlindedBeaconBlockBodyCapella `json:"body"`
}{
Slot: fmt.Sprintf("%d", b.Slot),
ProposerIndex: fmt.Sprintf("%d", b.ProposerIndex),
ParentRoot: b.ParentRoot,
StateRoot: b.StateRoot,
Body: &BlindedBeaconBlockBodyCapella{b.Body},
})
}
func (b *BlindedBeaconBlockBodyCapella) MarshalJSON() ([]byte, error) {
sve := make([]*SignedVoluntaryExit, len(b.VoluntaryExits))
for i := range b.VoluntaryExits {
sve[i] = &SignedVoluntaryExit{SignedVoluntaryExit: b.VoluntaryExits[i]}
}
deps := make([]*Deposit, len(b.Deposits))
for i := range b.Deposits {
deps[i] = &Deposit{Deposit: b.Deposits[i]}
}
atts := make([]*Attestation, len(b.Attestations))
for i := range b.Attestations {
atts[i] = &Attestation{Attestation: b.Attestations[i]}
}
atsl := make([]*AttesterSlashing, len(b.AttesterSlashings))
for i := range b.AttesterSlashings {
atsl[i] = &AttesterSlashing{AttesterSlashing: b.AttesterSlashings[i]}
}
pros := make([]*ProposerSlashing, len(b.ProposerSlashings))
for i := range b.ProposerSlashings {
pros[i] = &ProposerSlashing{ProposerSlashing: b.ProposerSlashings[i]}
}
chs := make([]*SignedBLSToExecutionChange, len(b.BlsToExecutionChanges))
for i := range b.BlsToExecutionChanges {
chs[i] = &SignedBLSToExecutionChange{SignedBLSToExecutionChange: b.BlsToExecutionChanges[i]}
}
return json.Marshal(struct {
RandaoReveal hexutil.Bytes `json:"randao_reveal"`
Eth1Data *Eth1Data `json:"eth1_data"`
Graffiti hexutil.Bytes `json:"graffiti"`
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
AttesterSlashings []*AttesterSlashing `json:"attester_slashings"`
Attestations []*Attestation `json:"attestations"`
Deposits []*Deposit `json:"deposits"`
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
ExecutionPayloadHeader *ExecutionPayloadHeaderCapella `json:"execution_payload_header"`
}{
RandaoReveal: b.RandaoReveal,
Eth1Data: &Eth1Data{b.Eth1Data},
Graffiti: b.Graffiti,
ProposerSlashings: pros,
AttesterSlashings: atsl,
Attestations: atts,
Deposits: deps,
VoluntaryExits: sve,
BLSToExecutionChanges: chs,
SyncAggregate: &SyncAggregate{b.SyncAggregate},
ExecutionPayloadHeader: &ExecutionPayloadHeaderCapella{ExecutionPayloadHeaderCapella: b.ExecutionPayloadHeader},
})
}
type ErrorMessage struct {
Code int `json:"code"`
Message string `json:"message"`
Stacktraces []string `json:"stacktraces,omitempty"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -5,10 +5,8 @@ go_library(
srcs = [
"gateway.go",
"log.go",
"modifiers.go",
"options.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/api/gateway",
importpath = "github.com/prysmaticlabs/prysm/api/gateway",
visibility = [
"//beacon-chain:__subpackages__",
"//validator:__subpackages__",
@@ -24,7 +22,6 @@ go_library(
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//connectivity:go_default_library",
"@org_golang_google_grpc//credentials:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

View File

@@ -10,12 +10,11 @@ go_library(
"process_request.go",
"structs.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware",
importpath = "github.com/prysmaticlabs/prysm/api/gateway/apimiddleware",
visibility = ["//visibility:public"],
deps = [
"//api/grpc:go_default_library",
"//encoding/bytesutil:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
"@com_github_pkg_errors//:go_default_library",

View File

@@ -3,7 +3,6 @@ package apimiddleware
import (
"net/http"
"reflect"
"time"
"github.com/gorilla/mux"
)
@@ -15,8 +14,6 @@ import (
type ApiProxyMiddleware struct {
GatewayAddress string
EndpointCreator EndpointFactory
Timeout time.Duration
router *mux.Router
}
// EndpointFactory is responsible for creating new instances of Endpoint values.
@@ -32,8 +29,6 @@ type Endpoint struct {
GetResponse interface{} // The struct corresponding to the JSON structure used in a GET response.
PostRequest interface{} // The struct corresponding to the JSON structure used in a POST request.
PostResponse interface{} // The struct corresponding to the JSON structure used in a POST response.
DeleteRequest interface{} // The struct corresponding to the JSON structure used in a DELETE request.
DeleteResponse interface{} // The struct corresponding to the JSON structure used in a DELETE response.
RequestURLLiterals []string // Names of URL parameters that should not be base64-encoded.
RequestQueryParams []QueryParam // Query parameters of the request.
Err ErrorJson // The struct corresponding to the error that should be returned in case of a request failure.
@@ -79,23 +74,16 @@ type fieldProcessor struct {
// Run starts the proxy, registering all proxy endpoints.
func (m *ApiProxyMiddleware) Run(gatewayRouter *mux.Router) {
for _, path := range m.EndpointCreator.Paths() {
gatewayRouter.HandleFunc(path, m.WithMiddleware(path))
m.handleApiPath(gatewayRouter, path, m.EndpointCreator)
}
m.router = gatewayRouter
}
// ServeHTTP for the proxy middleware.
func (m *ApiProxyMiddleware) ServeHTTP(w http.ResponseWriter, req *http.Request) {
m.router.ServeHTTP(w, req)
}
// WithMiddleware wraps the given endpoint handler with the middleware logic.
func (m *ApiProxyMiddleware) WithMiddleware(path string) http.HandlerFunc {
return func(w http.ResponseWriter, req *http.Request) {
endpoint, err := m.EndpointCreator.Create(path)
func (m *ApiProxyMiddleware) handleApiPath(gatewayRouter *mux.Router, path string, endpointFactory EndpointFactory) {
gatewayRouter.HandleFunc(path, func(w http.ResponseWriter, req *http.Request) {
endpoint, err := endpointFactory.Create(path)
if err != nil {
log.WithError(err).Errorf("Could not create endpoint for path: %s", path)
return
errJson := InternalServerErrorWithMessage(err, "could not create endpoint")
WriteError(w, errJson, nil)
}
for _, handler := range endpoint.CustomHandlers {
@@ -105,14 +93,16 @@ func (m *ApiProxyMiddleware) WithMiddleware(path string) http.HandlerFunc {
}
if req.Method == "POST" {
if errJson := handlePostRequestForEndpoint(endpoint, w, req); errJson != nil {
if errJson := deserializeRequestBodyIntoContainerWrapped(endpoint, req, w); errJson != nil {
WriteError(w, errJson, nil)
return
}
}
if req.Method == "DELETE" && req.Body != http.NoBody {
if errJson := handleDeleteRequestForEndpoint(endpoint, req); errJson != nil {
if errJson := ProcessRequestContainerFields(endpoint.PostRequest); errJson != nil {
WriteError(w, errJson, nil)
return
}
if errJson := SetRequestBodyToRequestContainer(endpoint.PostRequest, req); errJson != nil {
WriteError(w, errJson, nil)
return
}
@@ -122,7 +112,7 @@ func (m *ApiProxyMiddleware) WithMiddleware(path string) http.HandlerFunc {
WriteError(w, errJson, nil)
return
}
grpcResp, errJson := m.ProxyRequest(req)
grpcResp, errJson := ProxyRequest(req)
if errJson != nil {
WriteError(w, errJson, nil)
return
@@ -147,8 +137,6 @@ func (m *ApiProxyMiddleware) WithMiddleware(path string) http.HandlerFunc {
var resp interface{}
if req.Method == "GET" {
resp = endpoint.GetResponse
} else if req.Method == "DELETE" {
resp = endpoint.DeleteResponse
} else {
resp = endpoint.PostResponse
}
@@ -176,27 +164,7 @@ func (m *ApiProxyMiddleware) WithMiddleware(path string) http.HandlerFunc {
WriteError(w, errJson, nil)
return
}
}
}
func handlePostRequestForEndpoint(endpoint *Endpoint, w http.ResponseWriter, req *http.Request) ErrorJson {
if errJson := deserializeRequestBodyIntoContainerWrapped(endpoint, req, w); errJson != nil {
return errJson
}
if errJson := ProcessRequestContainerFields(endpoint.PostRequest); errJson != nil {
return errJson
}
return SetRequestBodyToRequestContainer(endpoint.PostRequest, req)
}
func handleDeleteRequestForEndpoint(endpoint *Endpoint, req *http.Request) ErrorJson {
if errJson := DeserializeRequestBodyIntoContainer(req.Body, endpoint.DeleteRequest); errJson != nil {
return errJson
}
if errJson := ProcessRequestContainerFields(endpoint.DeleteRequest); errJson != nil {
return errJson
}
return SetRequestBodyToRequestContainer(endpoint.DeleteRequest, req)
})
}
func deserializeRequestBodyIntoContainerWrapped(endpoint *Endpoint, req *http.Request, w http.ResponseWriter) ErrorJson {

View File

@@ -7,7 +7,7 @@ import (
"strings"
"github.com/gorilla/mux"
butil "github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
butil "github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/wealdtech/go-bytesutil"
)

View File

@@ -6,8 +6,8 @@ import (
"testing"
"github.com/gorilla/mux"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestHandleURLParameters(t *testing.T) {

View File

@@ -3,13 +3,11 @@ package apimiddleware
import (
"encoding/base64"
"fmt"
"math/big"
"reflect"
"strconv"
"strings"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
"github.com/wealdtech/go-bytesutil"
@@ -32,26 +30,26 @@ func processField(s interface{}, processors []fieldProcessor) error {
sliceElem := t.Field(i).Type.Elem()
kind := sliceElem.Kind()
// Recursively process slices to struct pointers.
switch {
case kind == reflect.Ptr && sliceElem.Elem().Kind() == reflect.Struct:
if kind == reflect.Ptr && sliceElem.Elem().Kind() == reflect.Struct {
for j := 0; j < v.Field(i).Len(); j++ {
if err := processField(v.Field(i).Index(j).Interface(), processors); err != nil {
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
}
}
}
// Process each string in string slices.
case kind == reflect.String:
if kind == reflect.String {
for _, proc := range processors {
_, hasTag := t.Field(i).Tag.Lookup(proc.tag)
if !hasTag {
continue
}
for j := 0; j < v.Field(i).Len(); j++ {
if err := proc.f(v.Field(i).Index(j)); err != nil {
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
if hasTag {
for j := 0; j < v.Field(i).Len(); j++ {
if err := proc.f(v.Field(i).Index(j)); err != nil {
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
}
}
}
}
}
// Recursively process struct pointers.
case reflect.Ptr:
@@ -75,10 +73,6 @@ func processField(s interface{}, processors []fieldProcessor) error {
}
func hexToBase64Processor(v reflect.Value) error {
if v.String() == "0x" {
v.SetString("")
return nil
}
b, err := bytesutil.FromHexString(v.String())
if err != nil {
return err
@@ -89,8 +83,6 @@ func hexToBase64Processor(v reflect.Value) error {
func base64ToHexProcessor(v reflect.Value) error {
if v.String() == "" {
// Empty hex values are represented as "0x".
v.SetString("0x")
return nil
}
b, err := base64.StdEncoding.DecodeString(v.String())
@@ -101,69 +93,6 @@ func base64ToHexProcessor(v reflect.Value) error {
return nil
}
func base64ToChecksumAddressProcessor(v reflect.Value) error {
if v.String() == "" {
// Empty hex values are represented as "0x".
v.SetString("0x")
return nil
}
b, err := base64.StdEncoding.DecodeString(v.String())
if err != nil {
return err
}
v.SetString(common.BytesToAddress(b).Hex())
return nil
}
func base64ToUint256Processor(v reflect.Value) error {
if v.String() == "" {
return nil
}
littleEndian, err := base64.StdEncoding.DecodeString(v.String())
if err != nil {
return err
}
if len(littleEndian) != 32 {
return errors.New("invalid length for Uint256")
}
// Integers are stored as little-endian, but
// big.Int expects big-endian. So we need to reverse
// the byte order before decoding.
var bigEndian [32]byte
for i := 0; i < len(littleEndian); i++ {
bigEndian[i] = littleEndian[len(littleEndian)-1-i]
}
var uint256 big.Int
uint256.SetBytes(bigEndian[:])
v.SetString(uint256.String())
return nil
}
func uint256ToBase64Processor(v reflect.Value) error {
if v.String() == "" {
return nil
}
uint256, ok := new(big.Int).SetString(v.String(), 10)
if !ok {
return fmt.Errorf("could not parse Uint256")
}
bigEndian := uint256.Bytes()
if len(bigEndian) > 32 {
return fmt.Errorf("number too big for Uint256")
}
// Integers are stored as little-endian, but
// big.Int gives big-endian. So we need to reverse
// the byte order before encoding.
var littleEndian [32]byte
for i := 0; i < len(bigEndian); i++ {
littleEndian[i] = bigEndian[len(bigEndian)-1-i]
}
v.SetString(base64.StdEncoding.EncodeToString(littleEndian[:]))
return nil
}
func enumToLowercaseProcessor(v reflect.Value) error {
v.SetString(strings.ToLower(v.String()))
return nil

View File

@@ -4,13 +4,14 @@ import (
"bytes"
"encoding/json"
"io"
"net"
"io/ioutil"
"net/http"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api/grpc"
"github.com/prysmaticlabs/prysm/api/grpc"
)
// DeserializeRequestBodyIntoContainer deserializes the request's body into an endpoint-specific struct.
@@ -37,10 +38,6 @@ func ProcessRequestContainerFields(requestContainer interface{}) ErrorJson {
tag: "hex",
f: hexToBase64Processor,
},
{
tag: "uint256",
f: uint256ToBase64Processor,
},
}); err != nil {
return InternalServerErrorWithMessage(err, "could not process request data")
}
@@ -55,7 +52,7 @@ func SetRequestBodyToRequestContainer(requestContainer interface{}, req *http.Re
return InternalServerErrorWithMessage(err, "could not marshal request")
}
// Set the body to the new JSON.
req.Body = io.NopCloser(bytes.NewReader(j))
req.Body = ioutil.NopCloser(bytes.NewReader(j))
req.Header.Set("Content-Length", strconv.Itoa(len(j)))
req.ContentLength = int64(len(j))
return nil
@@ -78,14 +75,11 @@ func (m *ApiProxyMiddleware) PrepareRequestForProxying(endpoint Endpoint, req *h
}
// ProxyRequest proxies the request to grpc-gateway.
func (m *ApiProxyMiddleware) ProxyRequest(req *http.Request) (*http.Response, ErrorJson) {
func ProxyRequest(req *http.Request) (*http.Response, ErrorJson) {
// We do not use http.DefaultClient because it does not have any timeout.
netClient := &http.Client{Timeout: m.Timeout}
netClient := &http.Client{Timeout: time.Minute * 2}
grpcResp, err := netClient.Do(req)
if err != nil {
if err, ok := err.(net.Error); ok && err.Timeout() {
return nil, TimeoutError()
}
return nil, InternalServerErrorWithMessage(err, "could not proxy request")
}
if grpcResp == nil {
@@ -96,7 +90,7 @@ func (m *ApiProxyMiddleware) ProxyRequest(req *http.Request) (*http.Response, Er
// ReadGrpcResponseBody reads the body from the grpc-gateway's response.
func ReadGrpcResponseBody(r io.Reader) ([]byte, ErrorJson) {
body, err := io.ReadAll(r)
body, err := ioutil.ReadAll(r)
if err != nil {
return nil, InternalServerErrorWithMessage(err, "could not read response body")
}
@@ -104,8 +98,6 @@ func ReadGrpcResponseBody(r io.Reader) ([]byte, ErrorJson) {
}
// HandleGrpcResponseError acts on an error that resulted from a grpc-gateway's response.
// Whether there was an error is indicated by the bool return value. In case of an error,
// there is no need to write to the response because it's taken care of by the function.
func HandleGrpcResponseError(errJson ErrorJson, resp *http.Response, respBody []byte, w http.ResponseWriter) (bool, ErrorJson) {
responseHasError := false
if err := json.Unmarshal(respBody, errJson); err != nil {
@@ -119,14 +111,9 @@ func HandleGrpcResponseError(errJson ErrorJson, resp *http.Response, respBody []
w.Header().Set(h, v)
}
}
// Handle gRPC timeout.
if resp.StatusCode == http.StatusGatewayTimeout {
WriteError(w, TimeoutError(), resp.Header)
} else {
// Set code to HTTP code because unmarshalled body contained gRPC code.
errJson.SetCode(resp.StatusCode)
WriteError(w, errJson, resp.Header)
}
// Set code to HTTP code because unmarshalled body contained gRPC code.
errJson.SetCode(resp.StatusCode)
WriteError(w, errJson, resp.Header)
}
return responseHasError, nil
}
@@ -151,10 +138,6 @@ func ProcessMiddlewareResponseFields(responseContainer interface{}) ErrorJson {
tag: "hex",
f: base64ToHexProcessor,
},
{
tag: "address",
f: base64ToChecksumAddressProcessor,
},
{
tag: "enum",
f: enumToLowercaseProcessor,
@@ -163,10 +146,6 @@ func ProcessMiddlewareResponseFields(responseContainer interface{}) ErrorJson {
tag: "time",
f: timeToUnixProcessor,
},
{
tag: "uint256",
f: base64ToUint256Processor,
},
}); err != nil {
return InternalServerErrorWithMessage(err, "could not process response data")
}
@@ -208,7 +187,7 @@ func WriteMiddlewareResponseHeadersAndBody(grpcResp *http.Response, responseJson
} else {
w.WriteHeader(grpcResp.StatusCode)
}
if _, err := io.Copy(w, io.NopCloser(bytes.NewReader(responseJson))); err != nil {
if _, err := io.Copy(w, ioutil.NopCloser(bytes.NewReader(responseJson))); err != nil {
return InternalServerErrorWithMessage(err, "could not write response message")
}
} else {
@@ -262,7 +241,7 @@ func WriteError(w http.ResponseWriter, errJson ErrorJson, responseHeader http.He
w.Header().Set("Content-Length", strconv.Itoa(len(j)))
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(errJson.StatusCode())
if _, err := io.Copy(w, io.NopCloser(bytes.NewReader(j))); err != nil {
if _, err := io.Copy(w, ioutil.NopCloser(bytes.NewReader(j))); err != nil {
log.WithError(err).Error("Could not write error message")
}
}

View File

@@ -8,51 +8,37 @@ import (
"strings"
"testing"
"github.com/prysmaticlabs/prysm/v4/api/grpc"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/api/grpc"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/sirupsen/logrus/hooks/test"
)
type testRequestContainer struct {
TestString string
TestHexString string `hex:"true"`
TestEmptyHexString string `hex:"true"`
TestUint256String string `uint256:"true"`
TestString string
TestHexString string `hex:"true"`
}
func defaultRequestContainer() *testRequestContainer {
return &testRequestContainer{
TestString: "test string",
TestHexString: "0x666F6F", // hex encoding of "foo"
TestEmptyHexString: "0x",
TestUint256String: "4196",
TestString: "test string",
TestHexString: "0x666F6F", // hex encoding of "foo"
}
}
type testResponseContainer struct {
TestString string
TestHex string `hex:"true"`
TestEmptyHex string `hex:"true"`
TestAddress string `address:"true"`
TestEmptyAddress string `address:"true"`
TestUint256 string `uint256:"true"`
TestEnum string `enum:"true"`
TestTime string `time:"true"`
TestString string
TestHex string `hex:"true"`
TestEnum string `enum:"true"`
TestTime string `time:"true"`
}
func defaultResponseContainer() *testResponseContainer {
return &testResponseContainer{
TestString: "test string",
TestHex: "Zm9v", // base64 encoding of "foo"
TestEmptyHex: "",
TestAddress: "Zm9v",
TestEmptyAddress: "",
TestEnum: "Test Enum",
TestTime: "2006-01-02T15:04:05Z",
// base64 encoding of 4196 in little-endian
TestUint256: "ZBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=",
TestString: "test string",
TestHex: "Zm9v", // base64 encoding of "foo"
TestEnum: "Test Enum",
TestTime: "2006-01-02T15:04:05Z",
}
}
@@ -120,8 +106,6 @@ func TestProcessRequestContainerFields(t *testing.T) {
errJson := ProcessRequestContainerFields(container)
require.Equal(t, true, errJson == nil)
assert.Equal(t, "Zm9v", container.TestHexString)
assert.Equal(t, "", container.TestEmptyHexString)
assert.Equal(t, "ZBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=", container.TestUint256String)
})
t.Run("error", func(t *testing.T) {
@@ -144,8 +128,8 @@ func TestSetRequestBodyToRequestContainer(t *testing.T) {
contentLengthHeader, ok := request.Header["Content-Length"]
require.Equal(t, true, ok)
require.Equal(t, 1, len(contentLengthHeader), "wrong number of header values")
assert.Equal(t, "108", contentLengthHeader[0])
assert.Equal(t, int64(108), request.ContentLength)
assert.Equal(t, "55", contentLengthHeader[0])
assert.Equal(t, int64(55), request.ContentLength)
}
func TestPrepareRequestForProxying(t *testing.T) {
@@ -250,10 +234,6 @@ func TestProcessMiddlewareResponseFields(t *testing.T) {
errJson := ProcessMiddlewareResponseFields(container)
require.Equal(t, true, errJson == nil)
assert.Equal(t, "0x666f6f", container.TestHex)
assert.Equal(t, "0x", container.TestEmptyHex)
assert.Equal(t, "0x0000000000000000000000000000000000666F6f", container.TestAddress)
assert.Equal(t, "0x", container.TestEmptyAddress)
assert.Equal(t, "4196", container.TestUint256)
assert.Equal(t, "test enum", container.TestEnum)
assert.Equal(t, "1136214245", container.TestTime)
})
@@ -298,7 +278,7 @@ func TestWriteMiddlewareResponseHeadersAndBody(t *testing.T) {
v, ok = writer.Header()["Content-Length"]
require.Equal(t, true, ok, "header not found")
require.Equal(t, 1, len(v), "wrong number of header values")
assert.Equal(t, "224", v[0])
assert.Equal(t, "102", v[0])
assert.Equal(t, 204, writer.Code)
assert.DeepEqual(t, responseJson, writer.Body.Bytes())
})

View File

@@ -41,13 +41,6 @@ func InternalServerError(err error) *DefaultErrorJson {
}
}
func TimeoutError() *DefaultErrorJson {
return &DefaultErrorJson{
Message: "Request timeout",
Code: http.StatusRequestTimeout,
}
}
// StatusCode returns the error's underlying error code.
func (e *DefaultErrorJson) StatusCode() int {
return e.Code

View File

@@ -13,8 +13,8 @@ import (
"github.com/gorilla/mux"
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
"github.com/prysmaticlabs/prysm/v4/runtime"
"github.com/prysmaticlabs/prysm/api/gateway/apimiddleware"
"github.com/prysmaticlabs/prysm/runtime"
"github.com/rs/cors"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
@@ -34,53 +34,74 @@ type PbMux struct {
type PbHandlerRegistration func(context.Context, *gwruntime.ServeMux, *grpc.ClientConn) error
// MuxHandler is a function that implements the mux handler functionality.
type MuxHandler func(
apiMiddlewareHandler *apimiddleware.ApiProxyMiddleware,
h http.HandlerFunc,
w http.ResponseWriter,
req *http.Request,
)
// Config parameters for setting up the gateway service.
type config struct {
maxCallRecvMsgSize uint64
remoteCert string
gatewayAddr string
remoteAddr string
allowedOrigins []string
apiMiddlewareEndpointFactory apimiddleware.EndpointFactory
muxHandler MuxHandler
pbHandlers []*PbMux
router *mux.Router
timeout time.Duration
}
type MuxHandler func(http.Handler, http.ResponseWriter, *http.Request)
// Gateway is the gRPC gateway to serve HTTP JSON traffic as a proxy and forward it to the gRPC server.
type Gateway struct {
cfg *config
conn *grpc.ClientConn
server *http.Server
cancel context.CancelFunc
proxy *apimiddleware.ApiProxyMiddleware
ctx context.Context
startFailure error
conn *grpc.ClientConn
pbHandlers []*PbMux
muxHandler MuxHandler
maxCallRecvMsgSize uint64
router *mux.Router
server *http.Server
cancel context.CancelFunc
remoteCert string
gatewayAddr string
apiMiddlewareEndpointFactory apimiddleware.EndpointFactory
ctx context.Context
startFailure error
remoteAddr string
allowedOrigins []string
}
// New returns a new instance of the Gateway.
func New(ctx context.Context, opts ...Option) (*Gateway, error) {
func New(
ctx context.Context,
pbHandlers []*PbMux,
muxHandler MuxHandler,
remoteAddr,
gatewayAddress string,
) *Gateway {
g := &Gateway{
ctx: ctx,
cfg: &config{},
pbHandlers: pbHandlers,
muxHandler: muxHandler,
router: mux.NewRouter(),
gatewayAddr: gatewayAddress,
ctx: ctx,
remoteAddr: remoteAddr,
allowedOrigins: []string{},
}
for _, opt := range opts {
if err := opt(g); err != nil {
return nil, err
}
}
if g.cfg.router == nil {
g.cfg.router = mux.NewRouter()
}
return g, nil
return g
}
// WithRouter allows adding a custom mux router to the gateway.
func (g *Gateway) WithRouter(r *mux.Router) *Gateway {
g.router = r
return g
}
// WithAllowedOrigins allows adding a set of allowed origins to the gateway.
func (g *Gateway) WithAllowedOrigins(origins []string) *Gateway {
g.allowedOrigins = origins
return g
}
// WithRemoteCert allows adding a custom certificate to the gateway,
func (g *Gateway) WithRemoteCert(cert string) *Gateway {
g.remoteCert = cert
return g
}
// WithMaxCallRecvMsgSize allows specifying the maximum allowed gRPC message size.
func (g *Gateway) WithMaxCallRecvMsgSize(size uint64) *Gateway {
g.maxCallRecvMsgSize = size
return g
}
// WithApiMiddleware allows adding API Middleware proxy to the gateway.
func (g *Gateway) WithApiMiddleware(endpointFactory apimiddleware.EndpointFactory) *Gateway {
g.apiMiddlewareEndpointFactory = endpointFactory
return g
}
// Start the gateway service.
@@ -88,7 +109,7 @@ func (g *Gateway) Start() {
ctx, cancel := context.WithCancel(g.ctx)
g.cancel = cancel
conn, err := g.dial(ctx, "tcp", g.cfg.remoteAddr)
conn, err := g.dial(ctx, "tcp", g.remoteAddr)
if err != nil {
log.WithError(err).Error("Failed to connect to gRPC server")
g.startFailure = err
@@ -96,7 +117,7 @@ func (g *Gateway) Start() {
}
g.conn = conn
for _, h := range g.cfg.pbHandlers {
for _, h := range g.pbHandlers {
for _, r := range h.Registrations {
if err := r(ctx, h.Mux, g.conn); err != nil {
log.WithError(err).Error("Failed to register handler")
@@ -105,30 +126,29 @@ func (g *Gateway) Start() {
}
}
for _, p := range h.Patterns {
g.cfg.router.PathPrefix(p).Handler(h.Mux)
g.router.PathPrefix(p).Handler(h.Mux)
}
}
corsMux := g.corsMiddleware(g.cfg.router)
corsMux := g.corsMiddleware(g.router)
if g.cfg.apiMiddlewareEndpointFactory != nil && !g.cfg.apiMiddlewareEndpointFactory.IsNil() {
g.registerApiMiddleware()
}
if g.cfg.muxHandler != nil {
g.cfg.router.PathPrefix("/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
g.cfg.muxHandler(g.proxy, corsMux.ServeHTTP, w, r)
if g.muxHandler != nil {
g.router.PathPrefix("/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
g.muxHandler(corsMux, w, r)
})
}
if g.apiMiddlewareEndpointFactory != nil && !g.apiMiddlewareEndpointFactory.IsNil() {
go g.registerApiMiddleware()
}
g.server = &http.Server{
Addr: g.cfg.gatewayAddr,
Handler: corsMux,
ReadHeaderTimeout: time.Second,
Addr: g.gatewayAddr,
Handler: g.router,
}
go func() {
log.WithField("address", g.cfg.gatewayAddr).Info("Starting gRPC gateway")
log.WithField("address", g.gatewayAddr).Info("Starting gRPC gateway")
if err := g.server.ListenAndServe(); err != http.ErrServerClosed {
log.WithError(err).Error("Failed to start gRPC gateway")
g.startFailure = err
@@ -142,9 +162,11 @@ func (g *Gateway) Status() error {
if g.startFailure != nil {
return g.startFailure
}
if s := g.conn.GetState(); s != connectivity.Ready {
return fmt.Errorf("grpc server is %s", s)
}
return nil
}
@@ -161,16 +183,18 @@ func (g *Gateway) Stop() error {
}
}
}
if g.cancel != nil {
g.cancel()
}
return nil
}
func (g *Gateway) corsMiddleware(h http.Handler) http.Handler {
c := cors.New(cors.Options{
AllowedOrigins: g.cfg.allowedOrigins,
AllowedMethods: []string{http.MethodPost, http.MethodGet, http.MethodDelete, http.MethodOptions},
AllowedOrigins: g.allowedOrigins,
AllowedMethods: []string{http.MethodPost, http.MethodGet, http.MethodOptions},
AllowCredentials: true,
MaxAge: 600,
AllowedHeaders: []string{"*"},
@@ -212,8 +236,8 @@ func (g *Gateway) dial(ctx context.Context, network, addr string) (*grpc.ClientC
// "addr" must be a valid TCP address with a port number.
func (g *Gateway) dialTCP(ctx context.Context, addr string) (*grpc.ClientConn, error) {
security := grpc.WithInsecure()
if len(g.cfg.remoteCert) > 0 {
creds, err := credentials.NewClientTLSFromFile(g.cfg.remoteCert, "")
if len(g.remoteCert) > 0 {
creds, err := credentials.NewClientTLSFromFile(g.remoteCert, "")
if err != nil {
return nil, err
}
@@ -221,7 +245,7 @@ func (g *Gateway) dialTCP(ctx context.Context, addr string) (*grpc.ClientConn, e
}
opts := []grpc.DialOption{
security,
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(g.cfg.maxCallRecvMsgSize))),
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(g.maxCallRecvMsgSize))),
}
return grpc.DialContext(ctx, addr, opts...)
@@ -242,17 +266,16 @@ func (g *Gateway) dialUnix(ctx context.Context, addr string) (*grpc.ClientConn,
opts := []grpc.DialOption{
grpc.WithInsecure(),
grpc.WithContextDialer(f),
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(g.cfg.maxCallRecvMsgSize))),
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(g.maxCallRecvMsgSize))),
}
return grpc.DialContext(ctx, addr, opts...)
}
func (g *Gateway) registerApiMiddleware() {
g.proxy = &apimiddleware.ApiProxyMiddleware{
GatewayAddress: g.cfg.gatewayAddr,
EndpointCreator: g.cfg.apiMiddlewareEndpointFactory,
Timeout: g.cfg.timeout,
proxy := &apimiddleware.ApiProxyMiddleware{
GatewayAddress: g.gatewayAddr,
EndpointCreator: g.apiMiddlewareEndpointFactory,
}
log.Info("Starting API middleware")
g.proxy.Run(g.cfg.router)
proxy.Run(g.router)
}

View File

@@ -10,10 +10,10 @@ import (
"testing"
"github.com/gorilla/mux"
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/api/gateway/apimiddleware"
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
logTest "github.com/sirupsen/logrus/hooks/test"
"github.com/urfave/cli/v2"
)
@@ -40,30 +40,26 @@ func TestGateway_Customized(t *testing.T) {
size := uint64(100)
endpointFactory := &mockEndpointFactory{}
opts := []Option{
WithRouter(r),
WithRemoteCert(cert),
WithAllowedOrigins(origins),
WithMaxCallRecvMsgSize(size),
WithApiMiddleware(endpointFactory),
WithMuxHandler(func(
_ *apimiddleware.ApiProxyMiddleware,
_ http.HandlerFunc,
_ http.ResponseWriter,
_ *http.Request,
) {
}),
}
g := New(
context.Background(),
[]*PbMux{},
func(handler http.Handler, writer http.ResponseWriter, request *http.Request) {
g, err := New(context.Background(), opts...)
require.NoError(t, err)
},
"",
"",
).WithRouter(r).
WithRemoteCert(cert).
WithAllowedOrigins(origins).
WithMaxCallRecvMsgSize(size).
WithApiMiddleware(endpointFactory)
assert.Equal(t, r, g.cfg.router)
assert.Equal(t, cert, g.cfg.remoteCert)
require.Equal(t, 1, len(g.cfg.allowedOrigins))
assert.Equal(t, origins[0], g.cfg.allowedOrigins[0])
assert.Equal(t, size, g.cfg.maxCallRecvMsgSize)
assert.Equal(t, endpointFactory, g.cfg.apiMiddlewareEndpointFactory)
assert.Equal(t, r, g.router)
assert.Equal(t, cert, g.remoteCert)
require.Equal(t, 1, len(g.allowedOrigins))
assert.Equal(t, origins[0], g.allowedOrigins[0])
assert.Equal(t, size, g.maxCallRecvMsgSize)
assert.Equal(t, endpointFactory, g.apiMiddlewareEndpointFactory)
}
func TestGateway_StartStop(t *testing.T) {
@@ -79,27 +75,23 @@ func TestGateway_StartStop(t *testing.T) {
selfAddress := fmt.Sprintf("%s:%d", rpcHost, ctx.Int(flags.RPCPort.Name))
gatewayAddress := fmt.Sprintf("%s:%d", gatewayHost, gatewayPort)
opts := []Option{
WithGatewayAddr(gatewayAddress),
WithRemoteAddr(selfAddress),
WithMuxHandler(func(
_ *apimiddleware.ApiProxyMiddleware,
_ http.HandlerFunc,
_ http.ResponseWriter,
_ *http.Request,
) {
}),
}
g := New(
ctx.Context,
[]*PbMux{},
func(handler http.Handler, writer http.ResponseWriter, request *http.Request) {
g, err := New(context.Background(), opts...)
require.NoError(t, err)
},
selfAddress,
gatewayAddress,
)
g.Start()
go func() {
require.LogsContain(t, hook, "Starting gRPC gateway")
require.LogsDoNotContain(t, hook, "Starting API middleware")
}()
err = g.Stop()
err := g.Stop()
require.NoError(t, err)
}
@@ -114,15 +106,15 @@ func TestGateway_NilHandler_NotFoundHandlerRegistered(t *testing.T) {
selfAddress := fmt.Sprintf("%s:%d", rpcHost, ctx.Int(flags.RPCPort.Name))
gatewayAddress := fmt.Sprintf("%s:%d", gatewayHost, gatewayPort)
opts := []Option{
WithGatewayAddr(gatewayAddress),
WithRemoteAddr(selfAddress),
}
g, err := New(context.Background(), opts...)
require.NoError(t, err)
g := New(
ctx.Context,
[]*PbMux{},
/* muxHandler */ nil,
selfAddress,
gatewayAddress,
)
writer := httptest.NewRecorder()
g.cfg.router.ServeHTTP(writer, &http.Request{Method: "GET", Host: "localhost", URL: &url.URL{Path: "/foo"}})
g.router.ServeHTTP(writer, &http.Request{Method: "GET", Host: "localhost", URL: &url.URL{Path: "/foo"}})
assert.Equal(t, http.StatusNotFound, writer.Code)
}

View File

@@ -1,30 +0,0 @@
package gateway
import (
"context"
"net/http"
"strconv"
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"google.golang.org/protobuf/proto"
)
func HttpResponseModifier(ctx context.Context, w http.ResponseWriter, _ proto.Message) error {
md, ok := gwruntime.ServerMetadataFromContext(ctx)
if !ok {
return nil
}
// set http status code
if vals := md.HeaderMD.Get("x-http-code"); len(vals) > 0 {
code, err := strconv.Atoi(vals[0])
if err != nil {
return err
}
// delete the headers to not expose any grpc-metadata in http response
delete(md.HeaderMD, "x-http-code")
delete(w.Header(), "Grpc-Metadata-X-Http-Code")
w.WriteHeader(code)
}
return nil
}

View File

@@ -1,88 +0,0 @@
package gateway
import (
"time"
"github.com/gorilla/mux"
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
)
type Option func(g *Gateway) error
func WithPbHandlers(handlers []*PbMux) Option {
return func(g *Gateway) error {
g.cfg.pbHandlers = handlers
return nil
}
}
func WithMuxHandler(m MuxHandler) Option {
return func(g *Gateway) error {
g.cfg.muxHandler = m
return nil
}
}
func WithGatewayAddr(addr string) Option {
return func(g *Gateway) error {
g.cfg.gatewayAddr = addr
return nil
}
}
func WithRemoteAddr(addr string) Option {
return func(g *Gateway) error {
g.cfg.remoteAddr = addr
return nil
}
}
// WithRouter allows adding a custom mux router to the gateway.
func WithRouter(r *mux.Router) Option {
return func(g *Gateway) error {
g.cfg.router = r
return nil
}
}
// WithAllowedOrigins allows adding a set of allowed origins to the gateway.
func WithAllowedOrigins(origins []string) Option {
return func(g *Gateway) error {
g.cfg.allowedOrigins = origins
return nil
}
}
// WithRemoteCert allows adding a custom certificate to the gateway,
func WithRemoteCert(cert string) Option {
return func(g *Gateway) error {
g.cfg.remoteCert = cert
return nil
}
}
// WithMaxCallRecvMsgSize allows specifying the maximum allowed gRPC message size.
func WithMaxCallRecvMsgSize(size uint64) Option {
return func(g *Gateway) error {
g.cfg.maxCallRecvMsgSize = size
return nil
}
}
// WithApiMiddleware allows adding an API middleware proxy to the gateway.
func WithApiMiddleware(endpointFactory apimiddleware.EndpointFactory) Option {
return func(g *Gateway) error {
g.cfg.apiMiddlewareEndpointFactory = endpointFactory
return nil
}
}
// WithTimeout allows changing the timeout value for API calls.
func WithTimeout(seconds uint64) Option {
return func(g *Gateway) error {
g.cfg.timeout = time.Second * time.Duration(seconds)
gwruntime.DefaultContextTimeout = time.Second * time.Duration(seconds)
return nil
}
}

View File

@@ -6,7 +6,7 @@ go_library(
"grpcutils.go",
"parameters.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/api/grpc",
importpath = "github.com/prysmaticlabs/prysm/api/grpc",
visibility = ["//visibility:public"],
deps = [
"@com_github_sirupsen_logrus//:go_default_library",

View File

@@ -7,8 +7,8 @@ import (
"testing"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
logTest "github.com/sirupsen/logrus/hooks/test"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"

View File

@@ -3,7 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["pagination.go"],
importpath = "github.com/prysmaticlabs/prysm/v4/api/pagination",
importpath = "github.com/prysmaticlabs/prysm/api/pagination",
visibility = ["//visibility:public"],
deps = [
"//config/params:go_default_library",

View File

@@ -6,7 +6,7 @@ import (
"strconv"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/config/params"
)
// StartAndEndPage takes in the requested page token, wanted page size, total page size.

View File

@@ -3,9 +3,9 @@ package pagination_test
import (
"testing"
"github.com/prysmaticlabs/prysm/v4/api/pagination"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/api/pagination"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestStartAndEndPage(t *testing.T) {

View File

@@ -8,7 +8,7 @@ go_library(
"multilock.go",
"scatter.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/async",
importpath = "github.com/prysmaticlabs/prysm/async",
visibility = ["//visibility:public"],
deps = ["@com_github_sirupsen_logrus//:go_default_library"],
)

View File

@@ -3,7 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["abool.go"],
importpath = "github.com/prysmaticlabs/prysm/v4/async/abool",
importpath = "github.com/prysmaticlabs/prysm/async/abool",
visibility = ["//visibility:public"],
)

View File

@@ -6,8 +6,8 @@ import (
"sync"
"testing"
"github.com/prysmaticlabs/prysm/v4/async"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/async"
"github.com/prysmaticlabs/prysm/testing/require"
log "github.com/sirupsen/logrus"
)

View File

@@ -3,21 +3,20 @@ package async_test
import (
"context"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/prysmaticlabs/prysm/v4/async"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
"github.com/prysmaticlabs/prysm/async"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
)
func TestDebounce_NoEvents(t *testing.T) {
eventsChan := make(chan interface{}, 100)
ctx, cancel := context.WithCancel(context.Background())
interval := time.Second
timesHandled := int32(0)
timesHandled := 0
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
@@ -27,21 +26,21 @@ func TestDebounce_NoEvents(t *testing.T) {
}()
go func() {
async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
atomic.AddInt32(&timesHandled, 1)
timesHandled++
})
wg.Done()
}()
if util.WaitTimeout(wg, interval*2) {
t.Fatalf("Test should have exited by now, timed out")
}
assert.Equal(t, int32(0), atomic.LoadInt32(&timesHandled), "Wrong number of handled calls")
assert.Equal(t, 0, timesHandled, "Wrong number of handled calls")
}
func TestDebounce_CtxClosing(t *testing.T) {
eventsChan := make(chan interface{}, 100)
ctx, cancel := context.WithCancel(context.Background())
interval := time.Second
timesHandled := int32(0)
timesHandled := 0
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
@@ -63,23 +62,23 @@ func TestDebounce_CtxClosing(t *testing.T) {
}()
go func() {
async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
atomic.AddInt32(&timesHandled, 1)
timesHandled++
})
wg.Done()
}()
if util.WaitTimeout(wg, interval*2) {
t.Fatalf("Test should have exited by now, timed out")
}
assert.Equal(t, int32(0), atomic.LoadInt32(&timesHandled), "Wrong number of handled calls")
assert.Equal(t, 0, timesHandled, "Wrong number of handled calls")
}
func TestDebounce_SingleHandlerInvocation(t *testing.T) {
eventsChan := make(chan interface{}, 100)
ctx, cancel := context.WithCancel(context.Background())
interval := time.Second
timesHandled := int32(0)
timesHandled := 0
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
atomic.AddInt32(&timesHandled, 1)
timesHandled++
})
for i := 0; i < 100; i++ {
eventsChan <- struct{}{}
@@ -87,7 +86,7 @@ func TestDebounce_SingleHandlerInvocation(t *testing.T) {
// We should expect 100 rapid fire changes to only have caused
// 1 handler to trigger after the debouncing period.
time.Sleep(interval * 2)
assert.Equal(t, int32(1), atomic.LoadInt32(&timesHandled), "Wrong number of handled calls")
assert.Equal(t, 1, timesHandled, "Wrong number of handled calls")
cancel()
}
@@ -95,23 +94,23 @@ func TestDebounce_MultipleHandlerInvocation(t *testing.T) {
eventsChan := make(chan interface{}, 100)
ctx, cancel := context.WithCancel(context.Background())
interval := time.Second
timesHandled := int32(0)
timesHandled := 0
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
atomic.AddInt32(&timesHandled, 1)
timesHandled++
})
for i := 0; i < 100; i++ {
eventsChan <- struct{}{}
}
require.Equal(t, int32(0), atomic.LoadInt32(&timesHandled), "Events must prevent from handler execution")
require.Equal(t, 0, timesHandled, "Events must prevent from handler execution")
// By this time the first event should be triggered.
time.Sleep(2 * time.Second)
assert.Equal(t, int32(1), atomic.LoadInt32(&timesHandled), "Wrong number of handled calls")
assert.Equal(t, 1, timesHandled, "Wrong number of handled calls")
// Second event.
eventsChan <- struct{}{}
time.Sleep(2 * time.Second)
assert.Equal(t, int32(2), atomic.LoadInt32(&timesHandled), "Wrong number of handled calls")
assert.Equal(t, 2, timesHandled, "Wrong number of handled calls")
cancel()
}

View File

@@ -6,7 +6,7 @@ go_library(
"feed.go",
"subscription.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/async/event",
importpath = "github.com/prysmaticlabs/prysm/async/event",
visibility = ["//visibility:public"],
deps = ["//time/mclock:go_default_library"],
)

View File

@@ -19,7 +19,7 @@ package event_test
import (
"fmt"
"github.com/prysmaticlabs/prysm/v4/async/event"
"github.com/prysmaticlabs/prysm/async/event"
)
func ExampleFeed_acknowledgedEvents() {

View File

@@ -20,7 +20,7 @@ import (
"fmt"
"sync"
"github.com/prysmaticlabs/prysm/v4/async/event"
"github.com/prysmaticlabs/prysm/async/event"
)
// This example demonstrates how SubscriptionScope can be used to control the lifetime of

View File

@@ -19,7 +19,7 @@ package event_test
import (
"fmt"
"github.com/prysmaticlabs/prysm/v4/async/event"
"github.com/prysmaticlabs/prysm/async/event"
)
func ExampleNewSubscription() {

View File

@@ -23,7 +23,7 @@ import (
"testing"
"time"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/testing/assert"
)
func TestFeedPanics(t *testing.T) {

View File

@@ -21,7 +21,7 @@ import (
"sync"
"time"
"github.com/prysmaticlabs/prysm/v4/time/mclock"
"github.com/prysmaticlabs/prysm/time/mclock"
)
// waitQuotient is divided against the max backoff time, in order to have N requests based on the full

View File

@@ -23,7 +23,7 @@ import (
"testing"
"time"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/testing/require"
)
var errInts = errors.New("error in subscribeInts")

View File

@@ -2,25 +2,24 @@ package async_test
import (
"context"
"sync/atomic"
"testing"
"time"
"github.com/prysmaticlabs/prysm/v4/async"
"github.com/prysmaticlabs/prysm/async"
)
func TestEveryRuns(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
i := int32(0)
i := 0
async.RunEvery(ctx, 100*time.Millisecond, func() {
atomic.AddInt32(&i, 1)
i++
})
// Sleep for a bit and ensure the value has increased.
time.Sleep(200 * time.Millisecond)
if atomic.LoadInt32(&i) == 0 {
if i == 0 {
t.Error("Counter failed to increment with ticker")
}
@@ -29,12 +28,12 @@ func TestEveryRuns(t *testing.T) {
// Sleep for a bit to let the cancel take place.
time.Sleep(100 * time.Millisecond)
last := atomic.LoadInt32(&i)
last := i
// Sleep for a bit and ensure the value has not increased.
time.Sleep(200 * time.Millisecond)
if atomic.LoadInt32(&i) != last {
if i != last {
t.Error("Counter incremented after stop")
}
}

View File

@@ -3,9 +3,7 @@ Copyright 2017 Albert Tedja
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -51,7 +49,7 @@ func (lk *Lock) Lock() {
lk.unlock <- 1
}
// Unlock unlocks this lock. Must be called after Lock.
// Unlocks this lock. Must be called after Lock.
// Can only be invoked if there is a previous call to Lock.
func (lk *Lock) Unlock() {
<-lk.unlock
@@ -67,14 +65,14 @@ func (lk *Lock) Unlock() {
<-lk.lock
}
// Yield temporarily unlocks, gives up the cpu time to other goroutine, and attempts to lock again.
// Temporarily unlocks, gives up the cpu time to other goroutine, and attempts to lock again.
func (lk *Lock) Yield() {
lk.Unlock()
runtime.Gosched()
lk.Lock()
}
// NewMultilock creates a new multilock for the specified keys
// Creates a new multilock for the specified keys
func NewMultilock(locks ...string) *Lock {
if len(locks) == 0 {
return nil
@@ -89,7 +87,7 @@ func NewMultilock(locks ...string) *Lock {
}
}
// Clean cleans old unused locks. Returns removed keys.
// Cleans old unused locks. Returns removed keys.
func Clean() []string {
locks.lock <- 1
defer func() { <-locks.lock }()

View File

@@ -3,9 +3,7 @@ Copyright 2017 Albert Tedja
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -24,22 +22,22 @@ import (
func TestUnique(t *testing.T) {
var arr []string
a := assert.New(t)
assert := assert.New(t)
arr = []string{"a", "b", "c"}
a.Equal(arr, unique(arr))
assert.Equal(arr, unique(arr))
arr = []string{"a", "a", "a"}
a.Equal([]string{"a"}, unique(arr))
assert.Equal([]string{"a"}, unique(arr))
arr = []string{"a", "a", "b"}
a.Equal([]string{"a", "b"}, unique(arr))
assert.Equal([]string{"a", "b"}, unique(arr))
arr = []string{"a", "b", "a"}
a.Equal([]string{"a", "b"}, unique(arr))
assert.Equal([]string{"a", "b"}, unique(arr))
arr = []string{"a", "b", "c", "b", "d"}
a.Equal([]string{"a", "b", "c", "d"}, unique(arr))
assert.Equal([]string{"a", "b", "c", "d"}, unique(arr))
}
func TestGetChan(t *testing.T) {
@@ -47,9 +45,9 @@ func TestGetChan(t *testing.T) {
ch2 := getChan("aa")
ch3 := getChan("a")
a := assert.New(t)
a.NotEqual(ch1, ch2)
a.Equal(ch1, ch3)
assert := assert.New(t)
assert.NotEqual(ch1, ch2)
assert.Equal(ch1, ch3)
}
func TestLockUnlock(_ *testing.T) {
@@ -112,8 +110,9 @@ func TestLockUnlock_CleansUnused(t *testing.T) {
lock := NewMultilock("dog", "cat", "owl")
lock.Lock()
assert.Equal(t, 3, len(locks.list))
lock.Unlock()
defer lock.Unlock()
<-time.After(100 * time.Millisecond)
wg.Done()
}()
wg.Wait()

View File

@@ -14,7 +14,7 @@ type WorkerResults struct {
// Scatter scatters a computation across multiple goroutines.
// This breaks the task in to a number of chunks and executes those chunks in parallel with the function provided.
// Results returned are collected and presented as a set of WorkerResults, which can be reassembled by the calling function.
// Results returned are collected and presented a a set of WorkerResults, which can be reassembled by the calling function.
// Any error that occurs in the workers will be passed back to the calling function.
func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (interface{}, error)) ([]*WorkerResults, error) {
if inputLen <= 0 {

View File

@@ -5,9 +5,9 @@ import (
"sync"
"testing"
"github.com/prysmaticlabs/prysm/v4/async"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/async"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestDouble(t *testing.T) {

View File

@@ -4,18 +4,13 @@ go_library(
name = "go_default_library",
srcs = [
"chain_info.go",
"chain_info_forkchoice.go",
"error.go",
"execution_engine.go",
"forkchoice_update_execution.go",
"head.go",
"head_sync_committee_info.go",
"info.go",
"init_sync_process_block.go",
"log.go",
"merge_ascii_art.go",
"metrics.go",
"options.go",
"pow_block.go",
"process_attestation.go",
"process_attestation_helpers.go",
"process_block.go",
@@ -25,12 +20,12 @@ go_library(
"service.go",
"weak_subjectivity_checks.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain",
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain",
visibility = [
"//beacon-chain:__subpackages__",
"//cmd/beacon-chain:__subpackages__",
"//testing/fuzz:__pkg__",
"//testing/slasher/simulator:__pkg__",
"//testing/spectest:__subpackages__",
],
deps = [
"//async:go_default_library",
@@ -38,7 +33,6 @@ go_library(
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
@@ -48,44 +42,33 @@ go_library(
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/db/kv:go_default_library",
"//beacon-chain/execution:go_default_library",
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/forkchoice/protoarray:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/blstoexec:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/startup:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/payload-attribute:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//proto/prysm/v1alpha1/block:go_default_library",
"//runtime/version:go_default_library",
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_emicklei_dot//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
@@ -106,21 +89,17 @@ go_test(
"blockchain_test.go",
"chain_info_test.go",
"checktags_test.go",
"execution_engine_test.go",
"forkchoice_update_execution_test.go",
"head_sync_committee_info_test.go",
"head_test.go",
"info_test.go",
"init_test.go",
"log_test.go",
"metrics_test.go",
"mock_test.go",
"pow_block_test.go",
"process_attestation_test.go",
"process_block_test.go",
"receive_attestation_test.go",
"receive_block_test.go",
"service_test.go",
"setup_test.go",
"weak_subjectivity_checks_test.go",
],
embed = [":go_default_library"],
@@ -134,19 +113,14 @@ go_test(
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/execution:go_default_library",
"//beacon-chain/execution/testing:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//config/fieldparams:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/blocks/testing:go_default_library",
"//container/trie:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/wrapper:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
@@ -167,10 +141,8 @@ go_test(
"chain_info_norace_test.go",
"checktags_test.go",
"init_test.go",
"mock_test.go",
"receive_block_test.go",
"service_norace_test.go",
"setup_test.go",
],
embed = [":go_default_library"],
gc_goopts = [
@@ -192,16 +164,12 @@ go_test(
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/execution:go_default_library",
"//beacon-chain/execution/testing:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/blocks/testing:go_default_library",
"//container/trie:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/wrapper:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",

View File

@@ -1,7 +1,7 @@
package blockchain
import (
"io"
"io/ioutil"
"testing"
"github.com/sirupsen/logrus"
@@ -9,7 +9,7 @@ import (
func TestMain(m *testing.M) {
logrus.SetLevel(logrus.DebugLevel)
logrus.SetOutput(io.Discard)
logrus.SetOutput(ioutil.Discard)
m.Run()
}

View File

@@ -1,79 +1,58 @@
package blockchain
import (
"bytes"
"context"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"go.opencensus.io/trace"
)
// ChainInfoFetcher defines a common interface for methods in blockchain service which
// directly retrieve chain info related data.
// directly retrieves chain info related data.
type ChainInfoFetcher interface {
HeadFetcher
FinalizationFetcher
GenesisFetcher
CanonicalFetcher
ForkFetcher
TimeFetcher
HeadDomainFetcher
ForkchoiceFetcher
}
// ForkchoiceFetcher defines a common interface for methods that access directly
// forkchoice information. These typically require a lock and external callers
// are requested to call methods within this blockchain package that takes care
// of locking forkchoice
type ForkchoiceFetcher interface {
Ancestor(context.Context, []byte, primitives.Slot) ([]byte, error)
CachedHeadRoot() [32]byte
GetProposerHead() [32]byte
SetForkChoiceGenesisTime(uint64)
UpdateHead(context.Context, primitives.Slot)
HighestReceivedBlockSlot() primitives.Slot
ReceivedBlocksLastEpoch() (uint64, error)
InsertNode(context.Context, state.BeaconState, [32]byte) error
ForkChoiceDump(context.Context) (*ethpbv1.ForkChoiceDump, error)
NewSlot(context.Context, primitives.Slot) error
ProposerBoost() [32]byte
}
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
type TimeFetcher interface {
GenesisTime() time.Time
CurrentSlot() primitives.Slot
CurrentSlot() types.Slot
}
// GenesisFetcher retrieves the Ethereum consensus data related to its genesis.
type GenesisFetcher interface {
GenesisValidatorsRoot() [32]byte
GenesisValidatorRoot() [32]byte
}
// HeadFetcher defines a common interface for methods in blockchain service which
// directly retrieve head related data.
// directly retrieves head related data.
type HeadFetcher interface {
HeadSlot() primitives.Slot
HeadSlot() types.Slot
HeadRoot(ctx context.Context) ([]byte, error)
HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error)
HeadBlock(ctx context.Context) (block.SignedBeaconBlock, error)
HeadState(ctx context.Context) (state.BeaconState, error)
HeadStateReadOnly(ctx context.Context) (state.ReadOnlyBeaconState, error)
HeadValidatorsIndices(ctx context.Context, epoch primitives.Epoch) ([]primitives.ValidatorIndex, error)
HeadGenesisValidatorsRoot() [32]byte
HeadValidatorsIndices(ctx context.Context, epoch types.Epoch) ([]types.ValidatorIndex, error)
HeadSeed(ctx context.Context, epoch types.Epoch) ([32]byte, error)
HeadGenesisValidatorRoot() [32]byte
HeadETH1Data() *ethpb.Eth1Data
HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool)
HeadValidatorIndexToPublicKey(ctx context.Context, index primitives.ValidatorIndex) ([fieldparams.BLSPubkeyLength]byte, error)
ChainHeads() ([][32]byte, []primitives.Slot)
HeadPublicKeyToValidatorIndex(ctx context.Context, pubKey [48]byte) (types.ValidatorIndex, bool)
HeadValidatorIndexToPublicKey(ctx context.Context, index types.ValidatorIndex) ([48]byte, error)
ProtoArrayStore() *protoarray.Store
ChainHeads() ([][32]byte, []types.Slot)
HeadSyncCommitteeFetcher
HeadDomainFetcher
}
@@ -81,65 +60,51 @@ type HeadFetcher interface {
// ForkFetcher retrieves the current fork information of the Ethereum beacon chain.
type ForkFetcher interface {
CurrentFork() *ethpb.Fork
GenesisFetcher
TimeFetcher
}
// TemporalOracle is like ForkFetcher minus CurrentFork()
type TemporalOracle interface {
GenesisFetcher
TimeFetcher
}
// CanonicalFetcher retrieves the current chain's canonical information.
type CanonicalFetcher interface {
IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error)
VerifyBlkDescendant(ctx context.Context, blockRoot [32]byte) error
}
// FinalizationFetcher defines a common interface for methods in blockchain service which
// directly retrieve finalization and justification related data.
// directly retrieves finalization and justification related data.
type FinalizationFetcher interface {
FinalizedCheckpt() *ethpb.Checkpoint
CurrentJustifiedCheckpt() *ethpb.Checkpoint
PreviousJustifiedCheckpt() *ethpb.Checkpoint
UnrealizedJustifiedPayloadBlockHash() [32]byte
FinalizedBlockHash() [32]byte
InForkchoice([32]byte) bool
IsFinalized(ctx context.Context, blockRoot [32]byte) bool
}
// OptimisticModeFetcher retrieves information about optimistic status of the node.
type OptimisticModeFetcher interface {
IsOptimistic(ctx context.Context) (bool, error)
IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error)
}
// FinalizedCheckpt returns the latest finalized checkpoint from chain store.
// FinalizedCheckpt returns the latest finalized checkpoint from head state.
func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
return &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
if s.finalizedCheckpt == nil {
return &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
}
return ethpb.CopyCheckpoint(s.finalizedCheckpt)
}
// PreviousJustifiedCheckpt returns the current justified checkpoint from chain store.
func (s *Service) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
cp := s.cfg.ForkChoiceStore.PreviousJustifiedCheckpoint()
return &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
}
// CurrentJustifiedCheckpt returns the current justified checkpoint from chain store.
// CurrentJustifiedCheckpt returns the current justified checkpoint from head state.
func (s *Service) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
cp := s.cfg.ForkChoiceStore.JustifiedCheckpoint()
return &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
if s.justifiedCheckpt == nil {
return &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
}
return ethpb.CopyCheckpoint(s.justifiedCheckpt)
}
// PreviousJustifiedCheckpt returns the previous justified checkpoint from head state.
func (s *Service) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
if s.prevJustifiedCheckpt == nil {
return &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
}
return ethpb.CopyCheckpoint(s.prevJustifiedCheckpt)
}
// HeadSlot returns the slot of the head of the chain.
func (s *Service) HeadSlot() primitives.Slot {
func (s *Service) HeadSlot() types.Slot {
s.headLock.RLock()
defer s.headLock.RUnlock()
@@ -155,8 +120,9 @@ func (s *Service) HeadRoot(ctx context.Context) ([]byte, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
if s.head != nil && s.head.root != params.BeaconConfig().ZeroHash {
return bytesutil.SafeCopyBytes(s.head.root[:]), nil
if s.headRoot() != params.BeaconConfig().ZeroHash {
r := s.headRoot()
return r[:], nil
}
b, err := s.cfg.BeaconDB.HeadBlock(ctx)
@@ -178,12 +144,12 @@ func (s *Service) HeadRoot(ctx context.Context) ([]byte, error) {
// HeadBlock returns the head block of the chain.
// If the head is nil from service struct,
// it will attempt to get the head block from DB.
func (s *Service) HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error) {
func (s *Service) HeadBlock(ctx context.Context) (block.SignedBeaconBlock, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
if s.hasHeadState() {
return s.headBlock()
return s.headBlock(), nil
}
return s.cfg.BeaconDB.HeadBlock(ctx)
@@ -208,41 +174,31 @@ func (s *Service) HeadState(ctx context.Context) (state.BeaconState, error) {
return s.cfg.StateGen.StateByRoot(ctx, s.headRoot())
}
// HeadStateReadOnly returns the read only head state of the chain.
// If the head is nil from service struct, it will attempt to get the
// head state from DB. Any callers of this method MUST only use the
// state instance to read fields from the state. Any type assertions back
// to the concrete type and subsequent use of it could lead to corruption
// of the state.
func (s *Service) HeadStateReadOnly(ctx context.Context) (state.ReadOnlyBeaconState, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.HeadStateReadOnly")
defer span.End()
s.headLock.RLock()
defer s.headLock.RUnlock()
ok := s.hasHeadState()
span.AddAttributes(trace.BoolAttribute("cache_hit", ok))
if ok {
return s.headStateReadOnly(ctx), nil
}
return s.cfg.StateGen.StateByRoot(ctx, s.headRoot())
}
// HeadValidatorsIndices returns a list of active validator indices from the head view of a given epoch.
func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch primitives.Epoch) ([]primitives.ValidatorIndex, error) {
func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch types.Epoch) ([]types.ValidatorIndex, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
if !s.hasHeadState() {
return []primitives.ValidatorIndex{}, nil
return []types.ValidatorIndex{}, nil
}
return helpers.ActiveValidatorIndices(ctx, s.headState(ctx), epoch)
}
// HeadGenesisValidatorsRoot returns genesis validators root of the head state.
func (s *Service) HeadGenesisValidatorsRoot() [32]byte {
// HeadSeed returns the seed from the head view of a given epoch.
func (s *Service) HeadSeed(ctx context.Context, epoch types.Epoch) ([32]byte, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
if !s.hasHeadState() {
return [32]byte{}, nil
}
return helpers.Seed(s.headState(ctx), epoch, params.BeaconConfig().DomainBeaconAttester)
}
// HeadGenesisValidatorRoot returns genesis validator root of the head state.
func (s *Service) HeadGenesisValidatorRoot() [32]byte {
s.headLock.RLock()
defer s.headLock.RUnlock()
@@ -250,7 +206,7 @@ func (s *Service) HeadGenesisValidatorsRoot() [32]byte {
return [32]byte{}
}
return s.headGenesisValidatorsRoot()
return s.headGenesisValidatorRoot()
}
// HeadETH1Data returns the eth1data of the current head state.
@@ -264,21 +220,26 @@ func (s *Service) HeadETH1Data() *ethpb.Eth1Data {
return s.head.state.Eth1Data()
}
// ProtoArrayStore returns the proto array store object.
func (s *Service) ProtoArrayStore() *protoarray.Store {
return s.cfg.ForkChoiceStore.Store()
}
// GenesisTime returns the genesis time of beacon chain.
func (s *Service) GenesisTime() time.Time {
return s.genesisTime
}
// GenesisValidatorsRoot returns the genesis validators
// GenesisValidatorRoot returns the genesis validator
// root of the chain.
func (s *Service) GenesisValidatorsRoot() [32]byte {
func (s *Service) GenesisValidatorRoot() [32]byte {
s.headLock.RLock()
defer s.headLock.RUnlock()
if !s.hasHeadState() {
return [32]byte{}
}
return bytesutil.ToBytes32(s.head.state.GenesisValidatorsRoot())
return bytesutil.ToBytes32(s.head.state.GenesisValidatorRoot())
}
// CurrentFork retrieves the latest fork information of the beacon chain.
@@ -297,209 +258,57 @@ func (s *Service) CurrentFork() *ethpb.Fork {
// IsCanonical returns true if the input block root is part of the canonical chain.
func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
// If the block has not been finalized, check fork choice store to see if the block is canonical
if s.cfg.ForkChoiceStore.HasNode(blockRoot) {
return s.cfg.ForkChoiceStore.IsCanonical(blockRoot), nil
// If the block has been finalized, the block will always be part of the canonical chain.
if s.cfg.BeaconDB.IsFinalizedBlock(ctx, blockRoot) {
return true, nil
}
// If the block has been finalized, the block will always be part of the canonical chain.
return s.cfg.BeaconDB.IsFinalizedBlock(ctx, blockRoot), nil
// If the block has not been finalized, check fork choice store to see if the block is canonical
return s.cfg.ForkChoiceStore.IsCanonical(blockRoot), nil
}
// ChainHeads returns all possible chain heads (leaves of fork choice tree).
// Heads roots and heads slots are returned.
func (s *Service) ChainHeads() ([][32]byte, []types.Slot) {
nodes := s.ProtoArrayStore().Nodes()
// Deliberate choice to not preallocate space for below.
// Heads cant be more than 2-3 in the worst case where pre-allocation will be 64 to begin with.
headsRoots := make([][32]byte, 0)
headsSlots := make([]types.Slot, 0)
nonExistentNode := ^uint64(0)
for _, node := range nodes {
// Possible heads have no children.
if node.BestDescendant() == nonExistentNode && node.BestChild() == nonExistentNode {
headsRoots = append(headsRoots, node.Root())
headsSlots = append(headsSlots, node.Slot())
}
}
return headsRoots, headsSlots
}
// HeadPublicKeyToValidatorIndex returns the validator index of the `pubkey` in current head state.
func (s *Service) HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool) {
func (s *Service) HeadPublicKeyToValidatorIndex(ctx context.Context, pubKey [48]byte) (types.ValidatorIndex, bool) {
s.headLock.RLock()
defer s.headLock.RUnlock()
if !s.hasHeadState() {
return 0, false
}
return s.headValidatorIndexAtPubkey(pubKey)
return s.headState(ctx).ValidatorIndexByPubkey(pubKey)
}
// HeadValidatorIndexToPublicKey returns the pubkey of the validator `index` in current head state.
func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index primitives.ValidatorIndex) ([fieldparams.BLSPubkeyLength]byte, error) {
func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index types.ValidatorIndex) ([48]byte, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
if !s.hasHeadState() {
return [fieldparams.BLSPubkeyLength]byte{}, nil
return [48]byte{}, nil
}
v, err := s.headValidatorAtIndex(index)
if err != nil {
return [fieldparams.BLSPubkeyLength]byte{}, err
return [48]byte{}, err
}
return v.PublicKey(), nil
}
// IsOptimistic returns true if the current head is optimistic.
func (s *Service) IsOptimistic(_ context.Context) (bool, error) {
if slots.ToEpoch(s.CurrentSlot()) < params.BeaconConfig().BellatrixForkEpoch {
return false, nil
}
s.headLock.RLock()
headRoot := s.head.root
s.headLock.RUnlock()
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(headRoot)
if err == nil {
return optimistic, nil
}
if !errors.Is(err, doublylinkedtree.ErrNilNode) {
return true, err
}
// If fockchoice does not have the headroot, then the node is considered
// optimistic
return true, nil
}
// IsFinalized returns true if the input root is finalized.
// It first checks latest finalized root then checks finalized root index in DB.
func (s *Service) IsFinalized(ctx context.Context, root [32]byte) bool {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
if s.cfg.ForkChoiceStore.FinalizedCheckpoint().Root == root {
return true
}
// If node exists in our store, then it is not
// finalized.
if s.cfg.ForkChoiceStore.HasNode(root) {
return false
}
return s.cfg.BeaconDB.IsFinalizedBlock(ctx, root)
}
// InForkchoice returns true if the given root is found in forkchoice
// This in particular means that the blockroot is a descendant of the
// finalized checkpoint
func (s *Service) InForkchoice(root [32]byte) bool {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.HasNode(root)
}
// IsOptimisticForRoot takes the root as argument instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
s.cfg.ForkChoiceStore.RLock()
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(root)
s.cfg.ForkChoiceStore.RUnlock()
if err == nil {
return optimistic, nil
}
if !errors.Is(err, doublylinkedtree.ErrNilNode) {
return false, err
}
// if the requested root is the headroot and the root is not found in
// forkchoice, the node should respond that it is optimistic
headRoot, err := s.HeadRoot(ctx)
if err != nil {
return true, err
}
if bytes.Equal(headRoot, root[:]) {
return true, nil
}
ss, err := s.cfg.BeaconDB.StateSummary(ctx, root)
if err != nil {
return false, err
}
if ss == nil {
ss, err = s.recoverStateSummary(ctx, root)
if err != nil {
return true, err
}
}
validatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(ctx)
if err != nil {
return false, err
}
if slots.ToEpoch(ss.Slot) > validatedCheckpoint.Epoch {
return true, nil
}
// Historical non-canonical blocks here are returned as optimistic for safety.
isCanonical, err := s.IsCanonical(ctx, root)
if err != nil {
return false, err
}
if slots.ToEpoch(ss.Slot)+1 < validatedCheckpoint.Epoch {
return !isCanonical, nil
}
// Checkpoint root could be zeros before the first finalized epoch. Use genesis root if the case.
lastValidated, err := s.cfg.BeaconDB.StateSummary(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(validatedCheckpoint.Root)))
if err != nil {
return false, err
}
if lastValidated == nil {
lastValidated, err = s.recoverStateSummary(ctx, root)
if err != nil {
return false, err
}
}
if ss.Slot > lastValidated.Slot {
return true, nil
}
return !isCanonical, nil
}
// Ancestor returns the block root of an ancestry block from the input block root.
//
// Spec pseudocode definition:
//
// def get_ancestor(store: Store, root: Root, slot: Slot) -> Root:
// block = store.blocks[root]
// if block.slot > slot:
// return get_ancestor(store, block.parent_root, slot)
// elif block.slot == slot:
// return root
// else:
// # root is older than queried slot, thus a skip slot. Return most recent root prior to slot
// return root
func (s *Service) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.ancestor")
defer span.End()
r := bytesutil.ToBytes32(root)
// Get ancestor root from fork choice store instead of recursively looking up blocks in DB.
// This is most optimal outcome.
s.cfg.ForkChoiceStore.RLock()
ar, err := s.cfg.ForkChoiceStore.AncestorRoot(ctx, r, slot)
s.cfg.ForkChoiceStore.RUnlock()
if err != nil {
// Try getting ancestor root from DB when failed to retrieve from fork choice store.
// This is the second line of defense for retrieving ancestor root.
ar, err = s.ancestorByDB(ctx, r, slot)
if err != nil {
return nil, err
}
}
return ar[:], nil
}
// SetGenesisTime sets the genesis time of beacon chain.
func (s *Service) SetGenesisTime(t time.Time) {
s.genesisTime = t
}
func (s *Service) recoverStateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error) {
if s.cfg.BeaconDB.HasBlock(ctx, blockRoot) {
b, err := s.cfg.BeaconDB.Block(ctx, blockRoot)
if err != nil {
return nil, err
}
summary := &ethpb.StateSummary{Slot: b.Block().Slot(), Root: blockRoot[:]}
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, summary); err != nil {
return nil, err
}
return summary, nil
}
return nil, errBlockDoesNotExist
}

View File

@@ -1,94 +0,0 @@
package blockchain
import (
"context"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
)
// CachedHeadRoot returns the corresponding value from Forkchoice
func (s *Service) CachedHeadRoot() [32]byte {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.CachedHeadRoot()
}
// GetProposerHead returns the corresponding value from forkchoice
func (s *Service) GetProposerHead() [32]byte {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.GetProposerHead()
}
// SetForkChoiceGenesisTime sets the genesis time in Forkchoice
func (s *Service) SetForkChoiceGenesisTime(timestamp uint64) {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
s.cfg.ForkChoiceStore.SetGenesisTime(timestamp)
}
// HighestReceivedBlockSlot returns the corresponding value from forkchoice
func (s *Service) HighestReceivedBlockSlot() primitives.Slot {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.HighestReceivedBlockSlot()
}
// ReceivedBlocksLastEpoch returns the corresponding value from forkchoice
func (s *Service) ReceivedBlocksLastEpoch() (uint64, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.ReceivedBlocksLastEpoch()
}
// InsertNode is a wrapper for node insertion which is self locked
func (s *Service) InsertNode(ctx context.Context, st state.BeaconState, root [32]byte) error {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, root)
}
// ForkChoiceDump returns the corresponding value from forkchoice
func (s *Service) ForkChoiceDump(ctx context.Context) (*ethpbv1.ForkChoiceDump, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.ForkChoiceDump(ctx)
}
// NewSlot returns the corresponding value from forkchoice
func (s *Service) NewSlot(ctx context.Context, slot primitives.Slot) error {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
return s.cfg.ForkChoiceStore.NewSlot(ctx, slot)
}
// ProposerBoost wraps the corresponding method from forkchoice
func (s *Service) ProposerBoost() [32]byte {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
return s.cfg.ForkChoiceStore.ProposerBoost()
}
// ChainHeads returns all possible chain heads (leaves of fork choice tree).
// Heads roots and heads slots are returned.
func (s *Service) ChainHeads() ([][32]byte, []primitives.Slot) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.Tips()
}
// UnrealizedJustifiedPayloadBlockHash returns unrealized justified payload block hash from forkchoice.
func (s *Service) UnrealizedJustifiedPayloadBlockHash() [32]byte {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
}
// FinalizedBlockHash returns finalized payload block hash from forkchoice.
func (s *Service) FinalizedBlockHash() [32]byte {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
}

View File

@@ -4,14 +4,11 @@ import (
"context"
"testing"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestHeadSlot_DataRace(t *testing.T) {
@@ -19,78 +16,46 @@ func TestHeadSlot_DataRace(t *testing.T) {
s := &Service{
cfg: &config{BeaconDB: beaconDB},
}
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 1)
wait := make(chan struct{})
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
s.HeadSlot()
<-wait
}
func TestHeadRoot_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
head: &head{root: [32]byte{'A'}},
}
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
wait := make(chan struct{})
st, _ := util.DeterministicGenesisState(t, 1)
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
_, err = s.HeadRoot(context.Background())
_, err := s.HeadRoot(context.Background())
require.NoError(t, err)
<-wait
}
func TestHeadBlock_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}})
require.NoError(t, err)
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
head: &head{block: wsb},
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
head: &head{block: wrapper.WrappedPhase0SignedBeaconBlock(&ethpb.SignedBeaconBlock{})},
}
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
wait := make(chan struct{})
st, _ := util.DeterministicGenesisState(t, 1)
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
_, err = s.HeadBlock(context.Background())
_, err := s.HeadBlock(context.Background())
require.NoError(t, err)
<-wait
}
func TestHeadState_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
}
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
wait := make(chan struct{})
st, _ := util.DeterministicGenesisState(t, 1)
root := bytesutil.ToBytes32(bytesutil.PadTo([]byte{'s'}, 32))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(context.Background(), root))
require.NoError(t, beaconDB.SaveState(context.Background(), st, root))
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
_, err = s.HeadState(context.Background())
_, err := s.HeadState(context.Background())
require.NoError(t, err)
<-wait
}

View File

@@ -5,21 +5,18 @@ import (
"testing"
"time"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"google.golang.org/protobuf/proto"
)
@@ -28,38 +25,10 @@ var _ ChainInfoFetcher = (*Service)(nil)
var _ TimeFetcher = (*Service)(nil)
var _ ForkFetcher = (*Service)(nil)
// prepareForkchoiceState prepares a beacon state with the given data to mock
// insert into forkchoice
func prepareForkchoiceState(
_ context.Context,
slot primitives.Slot,
blockRoot [32]byte,
parentRoot [32]byte,
payloadHash [32]byte,
justified *ethpb.Checkpoint,
finalized *ethpb.Checkpoint,
) (state.BeaconState, [32]byte, error) {
blockHeader := &ethpb.BeaconBlockHeader{
ParentRoot: parentRoot[:],
}
executionHeader := &enginev1.ExecutionPayloadHeader{
BlockHash: payloadHash[:],
}
base := &ethpb.BeaconStateBellatrix{
Slot: slot,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
BlockRoots: make([][]byte, 1),
CurrentJustifiedCheckpoint: justified,
FinalizedCheckpoint: finalized,
LatestExecutionPayloadHeader: executionHeader,
LatestBlockHeader: blockHeader,
}
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
st, err := state_native.InitializeFromProtoBellatrix(base)
return st, blockRoot, err
func TestFinalizedCheckpt_Nil(t *testing.T) {
beaconDB := testDB.SetupDB(t)
c := setupBeaconChain(t, beaconDB)
assert.DeepEqual(t, params.BeaconConfig().ZeroHash[:], c.FinalizedCheckpt().Root, "Incorrect pre chain start value")
}
func TestHeadRoot_Nil(t *testing.T) {
@@ -70,106 +39,96 @@ func TestHeadRoot_Nil(t *testing.T) {
assert.DeepEqual(t, params.BeaconConfig().ZeroHash[:], headRoot, "Incorrect pre chain start value")
}
func TestFinalizedCheckpt_CanRetrieve(t *testing.T) {
beaconDB := testDB.SetupDB(t)
cp := &ethpb.Checkpoint{Epoch: 5, Root: bytesutil.PadTo([]byte("foo"), 32)}
c := setupBeaconChain(t, beaconDB)
c.finalizedCheckpt = cp
assert.Equal(t, cp.Epoch, c.FinalizedCheckpt().Epoch, "Unexpected finalized epoch")
}
func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
service, tr := minimalTestService(t)
ctx, fcs := tr.ctx, tr.fcs
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
cp := service.FinalizedCheckpt()
assert.DeepEqual(t, [32]byte{}, bytesutil.ToBytes32(cp.Root))
cp = service.CurrentJustifiedCheckpt()
assert.DeepEqual(t, [32]byte{}, bytesutil.ToBytes32(cp.Root))
// check that forkchoice has the right genesis root as the node root
root, err := fcs.Head(ctx)
require.NoError(t, err)
require.Equal(t, service.originBlockRoot, root)
beaconDB := testDB.SetupDB(t)
genesisRoot := [32]byte{'A'}
cp := &ethpb.Checkpoint{Root: genesisRoot[:]}
c := setupBeaconChain(t, beaconDB)
c.finalizedCheckpt = cp
c.genesisRoot = genesisRoot
assert.DeepEqual(t, c.genesisRoot[:], c.FinalizedCheckpt().Root)
}
func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
beaconDB := testDB.SetupDB(t)
jroot := [32]byte{'j'}
cp := &forkchoicetypes.Checkpoint{Epoch: 6, Root: jroot}
bState, _ := util.DeterministicGenesisState(t, 10)
require.NoError(t, beaconDB.SaveState(ctx, bState, jroot))
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, cp))
jp := service.CurrentJustifiedCheckpt()
assert.Equal(t, cp.Epoch, jp.Epoch, "Unexpected justified epoch")
require.Equal(t, cp.Root, bytesutil.ToBytes32(jp.Root))
c := setupBeaconChain(t, beaconDB)
assert.Equal(t, params.BeaconConfig().ZeroHash, bytesutil.ToBytes32(c.CurrentJustifiedCheckpt().Root), "Unexpected justified epoch")
cp := &ethpb.Checkpoint{Epoch: 6, Root: bytesutil.PadTo([]byte("foo"), 32)}
c.justifiedCheckpt = cp
assert.Equal(t, cp.Epoch, c.CurrentJustifiedCheckpt().Epoch, "Unexpected justified epoch")
}
func TestFinalizedBlockHash(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
func TestJustifiedCheckpt_GenesisRootOk(t *testing.T) {
beaconDB := testDB.SetupDB(t)
r := [32]byte{'f'}
cp := &forkchoicetypes.Checkpoint{Epoch: 6, Root: r}
bState, _ := util.DeterministicGenesisState(t, 10)
require.NoError(t, beaconDB.SaveState(ctx, bState, r))
require.NoError(t, fcs.UpdateFinalizedCheckpoint(cp))
h := service.FinalizedBlockHash()
require.Equal(t, params.BeaconConfig().ZeroHash, h)
require.Equal(t, r, fcs.FinalizedCheckpoint().Root)
c := setupBeaconChain(t, beaconDB)
genesisRoot := [32]byte{'B'}
cp := &ethpb.Checkpoint{Root: genesisRoot[:]}
c.justifiedCheckpt = cp
c.genesisRoot = genesisRoot
assert.DeepEqual(t, c.genesisRoot[:], c.CurrentJustifiedCheckpt().Root)
}
func TestUnrealizedJustifiedBlockHash(t *testing.T) {
ctx := context.Background()
service := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
ojc := &ethpb.Checkpoint{Root: []byte{'j'}}
ofc := &ethpb.Checkpoint{Root: []byte{'f'}}
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
service.cfg.ForkChoiceStore.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) { return []uint64{}, nil })
require.NoError(t, service.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 6, Root: [32]byte{'j'}}))
func TestPreviousJustifiedCheckpt_CanRetrieve(t *testing.T) {
beaconDB := testDB.SetupDB(t)
h := service.UnrealizedJustifiedPayloadBlockHash()
require.Equal(t, params.BeaconConfig().ZeroHash, h)
require.Equal(t, [32]byte{'j'}, service.cfg.ForkChoiceStore.JustifiedCheckpoint().Root)
cp := &ethpb.Checkpoint{Epoch: 7, Root: bytesutil.PadTo([]byte("foo"), 32)}
c := setupBeaconChain(t, beaconDB)
assert.Equal(t, params.BeaconConfig().ZeroHash, bytesutil.ToBytes32(c.CurrentJustifiedCheckpt().Root), "Unexpected justified epoch")
c.prevJustifiedCheckpt = cp
assert.Equal(t, cp.Epoch, c.PreviousJustifiedCheckpt().Epoch, "Unexpected previous justified epoch")
}
func TestPrevJustifiedCheckpt_GenesisRootOk(t *testing.T) {
beaconDB := testDB.SetupDB(t)
genesisRoot := [32]byte{'C'}
cp := &ethpb.Checkpoint{Root: genesisRoot[:]}
c := setupBeaconChain(t, beaconDB)
c.prevJustifiedCheckpt = cp
c.genesisRoot = genesisRoot
assert.DeepEqual(t, c.genesisRoot[:], c.PreviousJustifiedCheckpt().Root)
}
func TestHeadSlot_CanRetrieve(t *testing.T) {
c := &Service{}
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{})
s, err := v1.InitializeFromProto(&ethpb.BeaconState{})
require.NoError(t, err)
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
b.SetSlot(100)
c.head = &head{block: b, state: s}
assert.Equal(t, primitives.Slot(100), c.HeadSlot())
c.head = &head{slot: 100, state: s}
assert.Equal(t, types.Slot(100), c.HeadSlot())
}
func TestHeadRoot_CanRetrieve(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
r, err := service.HeadRoot(ctx)
c := &Service{}
c.head = &head{root: [32]byte{'A'}}
r, err := c.HeadRoot(context.Background())
require.NoError(t, err)
assert.Equal(t, service.originBlockRoot, bytesutil.ToBytes32(r))
assert.Equal(t, [32]byte{'A'}, bytesutil.ToBytes32(r))
}
func TestHeadRoot_UseDB(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB := tr.ctx, tr.db
service.head = &head{root: params.BeaconConfig().ZeroHash}
beaconDB := testDB.SetupDB(t)
c := &Service{cfg: &config{BeaconDB: beaconDB}}
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
require.NoError(t, beaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: br[:]}))
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, br))
r, err := service.HeadRoot(ctx)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: br[:]}))
require.NoError(t, beaconDB.SaveHeadBlockRoot(context.Background(), br))
r, err := c.HeadRoot(context.Background())
require.NoError(t, err)
assert.Equal(t, br, bytesutil.ToBytes32(r))
}
@@ -177,28 +136,24 @@ func TestHeadRoot_UseDB(t *testing.T) {
func TestHeadBlock_CanRetrieve(t *testing.T) {
b := util.NewBeaconBlock()
b.Block.Slot = 1
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{})
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(b)
s, err := v1.InitializeFromProto(&ethpb.BeaconState{})
require.NoError(t, err)
c := &Service{}
c.head = &head{block: wsb, state: s}
c.head = &head{block: wrapper.WrappedPhase0SignedBeaconBlock(b), state: s}
received, err := c.HeadBlock(context.Background())
recevied, err := c.HeadBlock(context.Background())
require.NoError(t, err)
pb, err := received.Proto()
require.NoError(t, err)
assert.DeepEqual(t, b, pb, "Incorrect head block received")
assert.DeepEqual(t, b, recevied.Proto(), "Incorrect head block received")
}
func TestHeadState_CanRetrieve(t *testing.T) {
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Slot: 2, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
s, err := v1.InitializeFromProto(&ethpb.BeaconState{Slot: 2, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
require.NoError(t, err)
c := &Service{}
c.head = &head{state: s}
headState, err := c.HeadState(context.Background())
require.NoError(t, err)
assert.DeepEqual(t, headState.ToProtoUnsafe(), s.ToProtoUnsafe(), "Incorrect head state received")
assert.DeepEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Incorrect head state received")
}
func TestGenesisTime_CanRetrieve(t *testing.T) {
@@ -209,7 +164,7 @@ func TestGenesisTime_CanRetrieve(t *testing.T) {
func TestCurrentFork_CanRetrieve(t *testing.T) {
f := &ethpb.Fork{Epoch: 999}
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Fork: f})
s, err := v1.InitializeFromProto(&ethpb.BeaconState{Fork: f})
require.NoError(t, err)
c := &Service{}
c.head = &head{state: s}
@@ -229,15 +184,15 @@ func TestCurrentFork_NilHeadSTate(t *testing.T) {
}
}
func TestGenesisValidatorsRoot_CanRetrieve(t *testing.T) {
func TestGenesisValidatorRoot_CanRetrieve(t *testing.T) {
// Should not panic if head state is nil.
c := &Service{}
assert.Equal(t, [32]byte{}, c.GenesisValidatorsRoot(), "Did not get correct genesis validators root")
assert.Equal(t, [32]byte{}, c.GenesisValidatorRoot(), "Did not get correct genesis validator root")
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{GenesisValidatorsRoot: []byte{'a'}})
s, err := v1.InitializeFromProto(&ethpb.BeaconState{GenesisValidatorsRoot: []byte{'a'}})
require.NoError(t, err)
c.head = &head{state: s}
assert.Equal(t, [32]byte{'a'}, c.GenesisValidatorsRoot(), "Did not get correct genesis validators root")
assert.Equal(t, [32]byte{'a'}, c.GenesisValidatorRoot(), "Did not get correct genesis validator root")
}
func TestHeadETH1Data_Nil(t *testing.T) {
@@ -248,7 +203,7 @@ func TestHeadETH1Data_Nil(t *testing.T) {
func TestHeadETH1Data_CanRetrieve(t *testing.T) {
d := &ethpb.Eth1Data{DepositCount: 999}
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Eth1Data: d})
s, err := v1.InitializeFromProto(&ethpb.BeaconState{Eth1Data: d})
require.NoError(t, err)
c := &Service{}
c.head = &head{state: s}
@@ -266,7 +221,7 @@ func TestIsCanonical_Ok(t *testing.T) {
blk.Block.Slot = 0
root, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, blk)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk)))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, root))
can, err := c.IsCanonical(ctx, root)
require.NoError(t, err)
@@ -292,57 +247,54 @@ func TestService_HeadValidatorsIndices(t *testing.T) {
require.Equal(t, 10, len(indices))
}
func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
func TestService_HeadSeed(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 1)
c := &Service{}
seed, err := helpers.Seed(s, 0, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
c.head = &head{}
root, err := c.HeadSeed(context.Background(), 0)
require.NoError(t, err)
require.Equal(t, [32]byte{}, root)
c.head = &head{state: s}
root, err = c.HeadSeed(context.Background(), 0)
require.NoError(t, err)
require.DeepEqual(t, seed, root)
}
func TestService_HeadGenesisValidatorRoot(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 1)
c := &Service{}
c.head = &head{}
root := c.HeadGenesisValidatorsRoot()
root := c.HeadGenesisValidatorRoot()
require.Equal(t, [32]byte{}, root)
c.head = &head{state: s}
root = c.HeadGenesisValidatorsRoot()
require.DeepEqual(t, root[:], s.GenesisValidatorsRoot())
root = c.HeadGenesisValidatorRoot()
require.DeepEqual(t, root[:], s.GenesisValidatorRoot())
}
//
// A <- B <- C
// \ \
// \ ---------- E
// ---------- D
func TestService_ProtoArrayStore(t *testing.T) {
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}}
p := c.ProtoArrayStore()
require.Equal(t, 0, int(p.FinalizedEpoch()))
}
func TestService_ChainHeads(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}}
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, [32]byte{}, 0, 0))
roots, slots := c.ChainHeads()
require.Equal(t, 3, len(roots))
rootMap := map[[32]byte]primitives.Slot{{'c'}: 102, {'d'}: 103, {'e'}: 104}
for i, root := range roots {
slot, ok := rootMap[root]
require.Equal(t, true, ok)
require.Equal(t, slot, slots[i])
}
require.DeepEqual(t, [][32]byte{{'c'}, {'d'}, {'e'}}, roots)
require.DeepEqual(t, []types.Slot{102, 103, 104}, slots)
}
func TestService_HeadPublicKeyToValidatorIndex(t *testing.T) {
@@ -350,29 +302,29 @@ func TestService_HeadPublicKeyToValidatorIndex(t *testing.T) {
c := &Service{}
c.head = &head{state: s}
_, e := c.HeadPublicKeyToValidatorIndex([fieldparams.BLSPubkeyLength]byte{})
_, e := c.HeadPublicKeyToValidatorIndex(context.Background(), [48]byte{})
require.Equal(t, false, e)
v, err := s.ValidatorAtIndex(0)
require.NoError(t, err)
i, e := c.HeadPublicKeyToValidatorIndex(bytesutil.ToBytes48(v.PublicKey))
i, e := c.HeadPublicKeyToValidatorIndex(context.Background(), bytesutil.ToBytes48(v.PublicKey))
require.Equal(t, true, e)
require.Equal(t, primitives.ValidatorIndex(0), i)
require.Equal(t, types.ValidatorIndex(0), i)
}
func TestService_HeadPublicKeyToValidatorIndexNil(t *testing.T) {
c := &Service{}
c.head = nil
idx, e := c.HeadPublicKeyToValidatorIndex([fieldparams.BLSPubkeyLength]byte{})
idx, e := c.HeadPublicKeyToValidatorIndex(context.Background(), [48]byte{})
require.Equal(t, false, e)
require.Equal(t, primitives.ValidatorIndex(0), idx)
require.Equal(t, types.ValidatorIndex(0), idx)
c.head = &head{state: nil}
i, e := c.HeadPublicKeyToValidatorIndex([fieldparams.BLSPubkeyLength]byte{})
i, e := c.HeadPublicKeyToValidatorIndex(context.Background(), [48]byte{})
require.Equal(t, false, e)
require.Equal(t, primitives.ValidatorIndex(0), i)
require.Equal(t, types.ValidatorIndex(0), i)
}
func TestService_HeadValidatorIndexToPublicKey(t *testing.T) {
@@ -395,190 +347,10 @@ func TestService_HeadValidatorIndexToPublicKeyNil(t *testing.T) {
p, err := c.HeadValidatorIndexToPublicKey(context.Background(), 0)
require.NoError(t, err)
require.Equal(t, [fieldparams.BLSPubkeyLength]byte{}, p)
require.Equal(t, [48]byte{}, p)
c.head = &head{state: nil}
p, err = c.HeadValidatorIndexToPublicKey(context.Background(), 0)
require.NoError(t, err)
require.Equal(t, [fieldparams.BLSPubkeyLength]byte{}, p)
}
func TestService_IsOptimistic(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.BellatrixForkEpoch = 0
params.OverrideBeaconConfig(cfg)
ctx := context.Background()
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
opt, err := c.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, true, opt)
}
func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
ctx := context.Background()
c := &Service{genesisTime: time.Now()}
opt, err := c.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, false, opt)
}
func TestService_IsOptimisticForRoot(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
require.NoError(t, err)
require.Equal(t, true, opt)
}
func TestService_IsOptimisticForRoot_DB(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, b)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: br[:], Slot: 10}))
optimisticBlock := util.NewBeaconBlock()
optimisticBlock.Block.Slot = 97
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, optimisticBlock)
validatedBlock := util.NewBeaconBlock()
validatedBlock.Block.Slot = 9
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, validatedBlock)
validatedCheckpoint := &ethpb.Checkpoint{Root: br[:]}
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
cp := &ethpb.Checkpoint{
Epoch: 1,
Root: validatedRoot[:],
}
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, validatedRoot))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, cp))
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
require.NoError(t, err)
require.Equal(t, false, validated)
// Before the first finalized epoch, finalized root could be zeros.
validatedCheckpoint = &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, br))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:], Slot: 10}))
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
}
func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, b)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: br[:], Slot: 10}))
optimisticBlock := util.NewBeaconBlock()
optimisticBlock.Block.Slot = 97
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, optimisticBlock)
validatedBlock := util.NewBeaconBlock()
validatedBlock.Block.Slot = 9
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, validatedBlock)
validatedCheckpoint := &ethpb.Checkpoint{Root: br[:]}
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
require.NoError(t, err)
require.Equal(t, true, validated)
}
func TestService_IsOptimisticForRoot_StateSummaryRecovered(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, b)
_, err = c.IsOptimisticForRoot(ctx, br)
assert.NoError(t, err)
summ, err := beaconDB.StateSummary(ctx, br)
assert.NoError(t, err)
assert.NotNil(t, summ)
assert.Equal(t, 10, int(summ.Slot))
assert.DeepEqual(t, br[:], summ.Root)
}
func TestService_IsFinalized(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}}
r1 := [32]byte{'a'}
require.NoError(t, c.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
Root: r1,
}))
b := util.NewBeaconBlock()
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, b)
require.NoError(t, beaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: br[:], Slot: 10}))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, br))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{
Root: br[:],
}))
require.Equal(t, true, c.IsFinalized(ctx, r1))
require.Equal(t, true, c.IsFinalized(ctx, br))
require.Equal(t, false, c.IsFinalized(ctx, [32]byte{'c'}))
require.Equal(t, [48]byte{}, p)
}

View File

@@ -1,4 +1,4 @@
//go:build !develop
// +build !develop
package blockchain

View File

@@ -1,95 +0,0 @@
package blockchain
import "github.com/pkg/errors"
var (
// ErrInvalidPayload is returned when the payload is invalid
ErrInvalidPayload = invalidBlock{error: errors.New("received an INVALID payload from execution engine")}
// ErrInvalidBlockHashPayloadStatus is returned when the payload has invalid block hash.
ErrInvalidBlockHashPayloadStatus = invalidBlock{error: errors.New("received an INVALID_BLOCK_HASH payload from execution engine")}
// ErrUndefinedExecutionEngineError is returned when the execution engine returns an error that is not defined
ErrUndefinedExecutionEngineError = errors.New("received an undefined execution engine error")
// errNilFinalizedInStore is returned when a nil finalized checkpt is returned from store.
errNilFinalizedInStore = errors.New("nil finalized checkpoint returned from store")
// errNilFinalizedCheckpoint is returned when a nil finalized checkpt is returned from a state.
errNilFinalizedCheckpoint = errors.New("nil finalized checkpoint returned from state")
// errNilJustifiedCheckpoint is returned when a nil justified checkpt is returned from a state.
errNilJustifiedCheckpoint = errors.New("nil justified checkpoint returned from state")
// errBlockDoesNotExist is returned when a block does not exist for a particular state summary.
errBlockDoesNotExist = errors.New("could not find block in DB")
// errWrongBlockCount is returned when the wrong number of blocks or block roots is used
errWrongBlockCount = errors.New("wrong number of blocks or block roots")
// errBlockNotFoundInCacheOrDB is returned when a block is not found in the cache or DB.
errBlockNotFoundInCacheOrDB = errors.New("block not found in cache or db")
// errWSBlockNotFound is returned when a block is not found in the WS cache or DB.
errWSBlockNotFound = errors.New("weak subjectivity root not found in db")
// errWSBlockNotFoundInEpoch is returned when a block is not found in the WS cache or DB within epoch.
errWSBlockNotFoundInEpoch = errors.New("weak subjectivity root not found in db within epoch")
// errNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
ErrNotDescendantOfFinalized = invalidBlock{error: errors.New("not descendant of finalized checkpoint")}
)
// An invalid block is the block that fails state transition based on the core protocol rules.
// The beacon node shall not be accepting nor building blocks that branch off from an invalid block.
// Some examples of invalid blocks are:
// The block violates state transition rules.
// The block is deemed invalid according to execution layer client.
// The block violates certain fork choice rules (before finalized slot, not finalized ancestor)
type invalidBlock struct {
invalidAncestorRoots [][32]byte
error
root [32]byte
}
type invalidBlockError interface {
Error() string
InvalidAncestorRoots() [][32]byte
BlockRoot() [32]byte
}
// BlockRoot returns the invalid block root.
func (e invalidBlock) BlockRoot() [32]byte {
return e.root
}
// InvalidAncestorRoots returns an optional list of invalid roots of the invalid block which leads up last valid root.
func (e invalidBlock) InvalidAncestorRoots() [][32]byte {
return e.invalidAncestorRoots
}
// IsInvalidBlock returns true if the error has `invalidBlock`.
func IsInvalidBlock(e error) bool {
if e == nil {
return false
}
_, ok := e.(invalidBlockError)
if !ok {
return IsInvalidBlock(errors.Unwrap(e))
}
return true
}
// InvalidBlockRoot returns the invalid block root. If the error
// doesn't have an invalid blockroot. [32]byte{} is returned.
func InvalidBlockRoot(e error) [32]byte {
if e == nil {
return [32]byte{}
}
d, ok := e.(invalidBlockError)
if !ok {
return [32]byte{}
}
return d.BlockRoot()
}
// InvalidAncestorRoots returns a list of invalid roots up to last valid root.
func InvalidAncestorRoots(e error) [][32]byte {
if e == nil {
return [][32]byte{}
}
d, ok := e.(invalidBlockError)
if !ok {
return [][32]byte{}
}
return d.InvalidAncestorRoots()
}

View File

@@ -1,36 +0,0 @@
package blockchain
import (
"testing"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func TestIsInvalidBlock(t *testing.T) {
require.Equal(t, true, IsInvalidBlock(ErrInvalidPayload)) // Already wrapped.
err := invalidBlock{error: ErrInvalidPayload}
require.Equal(t, true, IsInvalidBlock(err))
newErr := errors.Wrap(err, "wrap me")
require.Equal(t, true, IsInvalidBlock(newErr))
require.DeepEqual(t, [][32]byte(nil), InvalidAncestorRoots(err))
}
func TestInvalidBlockRoot(t *testing.T) {
require.Equal(t, [32]byte{}, InvalidBlockRoot(ErrUndefinedExecutionEngineError))
require.Equal(t, [32]byte{}, InvalidBlockRoot(ErrInvalidPayload))
err := invalidBlock{error: ErrInvalidPayload, root: [32]byte{'a'}}
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(err))
require.DeepEqual(t, [][32]byte(nil), InvalidAncestorRoots(err))
}
func TestInvalidRoots(t *testing.T) {
roots := [][32]byte{{'d'}, {'b'}, {'c'}}
err := invalidBlock{error: ErrInvalidPayload, root: [32]byte{'a'}, invalidAncestorRoots: roots}
require.Equal(t, true, IsInvalidBlock(err))
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(err))
require.DeepEqual(t, roots, InvalidAncestorRoots(err))
}

View File

@@ -1,357 +0,0 @@
package blockchain
import (
"context"
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
payloadattribute "github.com/prysmaticlabs/prysm/v4/consensus-types/payload-attribute"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
var defaultLatestValidHash = bytesutil.PadTo([]byte{0xff}, 32)
// notifyForkchoiceUpdateArg is the argument for the forkchoice update notification `notifyForkchoiceUpdate`.
type notifyForkchoiceUpdateArg struct {
headState state.BeaconState
headRoot [32]byte
headBlock interfaces.ReadOnlyBeaconBlock
}
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
// 1. Re-organizes the execution payload chain and corresponding state to make head_block_hash the head.
// 2. Applies finality to the execution state: it irreversibly persists the chain of all execution payloads and corresponding state, up to and including finalized_block_hash.
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkchoiceUpdateArg) (*enginev1.PayloadIDBytes, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.notifyForkchoiceUpdate")
defer span.End()
headBlk := arg.headBlock
if headBlk == nil || headBlk.IsNil() || headBlk.Body().IsNil() {
log.Error("Head block is nil")
return nil, nil
}
// Must not call fork choice updated until the transition conditions are met on the Pow network.
isExecutionBlk, err := blocks.IsExecutionBlock(headBlk.Body())
if err != nil {
log.WithError(err).Error("Could not determine if head block is execution block")
return nil, nil
}
if !isExecutionBlk {
return nil, nil
}
headPayload, err := headBlk.Body().Execution()
if err != nil {
log.WithError(err).Error("Could not get execution payload for head block")
return nil, nil
}
finalizedHash := s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
justifiedHash := s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
fcs := &enginev1.ForkchoiceState{
HeadBlockHash: headPayload.BlockHash(),
SafeBlockHash: justifiedHash[:],
FinalizedBlockHash: finalizedHash[:],
}
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
hasAttr, attr, proposerId := s.getPayloadAttribute(ctx, arg.headState, nextSlot, arg.headRoot[:])
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, attr)
if err != nil {
switch err {
case execution.ErrAcceptedSyncingPayloadStatus:
forkchoiceUpdatedOptimisticNodeCount.Inc()
log.WithFields(logrus.Fields{
"headSlot": headBlk.Slot(),
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash())),
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
}).Info("Called fork choice updated with optimistic block")
return payloadID, nil
case execution.ErrInvalidPayloadStatus:
forkchoiceUpdatedInvalidNodeCount.Inc()
headRoot := arg.headRoot
if len(lastValidHash) == 0 {
lastValidHash = defaultLatestValidHash
}
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, headRoot, headBlk.ParentRoot(), bytesutil.ToBytes32(lastValidHash))
if err != nil {
log.WithError(err).Error("Could not set head root to invalid")
return nil, nil
}
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
log.WithError(err).Error("Could not remove invalid block and state")
return nil, nil
}
r, err := s.cfg.ForkChoiceStore.Head(ctx)
if err != nil {
log.WithFields(logrus.Fields{
"slot": headBlk.Slot(),
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(headRoot[:])),
"invalidChildrenCount": len(invalidRoots),
}).Warn("Pruned invalid blocks, could not update head root")
return nil, invalidBlock{error: ErrInvalidPayload, root: arg.headRoot, invalidAncestorRoots: invalidRoots}
}
b, err := s.getBlock(ctx, r)
if err != nil {
log.WithError(err).Error("Could not get head block")
return nil, nil
}
st, err := s.cfg.StateGen.StateByRoot(ctx, r)
if err != nil {
log.WithError(err).Error("Could not get head state")
return nil, nil
}
pid, err := s.notifyForkchoiceUpdate(ctx, &notifyForkchoiceUpdateArg{
headState: st,
headRoot: r,
headBlock: b.Block(),
})
if err != nil {
return nil, err // Returning err because it's recursive here.
}
if err := s.saveHead(ctx, r, b, st); err != nil {
log.WithError(err).Error("could not save head after pruning invalid blocks")
}
log.WithFields(logrus.Fields{
"slot": headBlk.Slot(),
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(headRoot[:])),
"invalidChildrenCount": len(invalidRoots),
"newHeadRoot": fmt.Sprintf("%#x", bytesutil.Trunc(r[:])),
}).Warn("Pruned invalid blocks")
return pid, invalidBlock{error: ErrInvalidPayload, root: arg.headRoot, invalidAncestorRoots: invalidRoots}
default:
log.WithError(err).Error(ErrUndefinedExecutionEngineError)
return nil, nil
}
}
forkchoiceUpdatedValidNodeCount.Inc()
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, arg.headRoot); err != nil {
log.WithError(err).Error("Could not set head root to valid")
return nil, nil
}
// If the forkchoice update call has an attribute, update the proposer payload ID cache.
if hasAttr && payloadID != nil {
var pId [8]byte
copy(pId[:], payloadID[:])
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nextSlot, proposerId, pId, arg.headRoot)
} else if hasAttr && payloadID == nil {
log.WithFields(logrus.Fields{
"blockHash": fmt.Sprintf("%#x", headPayload.BlockHash()),
"slot": headBlk.Slot(),
}).Error("Received nil payload ID on VALID engine response")
}
return payloadID, nil
}
// getPayloadHash returns the payload hash given the block root.
// if the block is before bellatrix fork epoch, it returns the zero hash.
func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, error) {
blk, err := s.getBlock(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(root)))
if err != nil {
return [32]byte{}, err
}
if blocks.IsPreBellatrixVersion(blk.Block().Version()) {
return params.BeaconConfig().ZeroHash, nil
}
payload, err := blk.Block().Body().Execution()
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not get execution payload")
}
return bytesutil.ToBytes32(payload.BlockHash()), nil
}
// notifyNewPayload signals execution engine on a new payload.
// It returns true if the EL has returned VALID for the block
func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
postStateHeader interfaces.ExecutionData, blk interfaces.ReadOnlySignedBeaconBlock) (bool, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
defer span.End()
// Execution payload is only supported in Bellatrix and beyond. Pre
// merge blocks are never optimistic
if blocks.IsPreBellatrixVersion(postStateVersion) {
return true, nil
}
if err := consensusblocks.BeaconBlockIsNil(blk); err != nil {
return false, err
}
body := blk.Block().Body()
enabled, err := blocks.IsExecutionEnabledUsingHeader(postStateHeader, body)
if err != nil {
return false, errors.Wrap(invalidBlock{error: err}, "could not determine if execution is enabled")
}
if !enabled {
return true, nil
}
payload, err := body.Execution()
if err != nil {
return false, errors.Wrap(invalidBlock{error: err}, "could not get execution payload")
}
lastValidHash, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
switch err {
case nil:
newPayloadValidNodeCount.Inc()
return true, nil
case execution.ErrAcceptedSyncingPayloadStatus:
newPayloadOptimisticNodeCount.Inc()
log.WithFields(logrus.Fields{
"slot": blk.Block().Slot(),
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
}).Info("Called new payload with optimistic block")
return false, nil
case execution.ErrInvalidPayloadStatus:
newPayloadInvalidNodeCount.Inc()
root, err := blk.Block().HashTreeRoot()
if err != nil {
return false, err
}
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, blk.Block().ParentRoot(), bytesutil.ToBytes32(lastValidHash))
if err != nil {
return false, err
}
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
return false, err
}
log.WithFields(logrus.Fields{
"slot": blk.Block().Slot(),
"blockRoot": fmt.Sprintf("%#x", root),
"invalidChildrenCount": len(invalidRoots),
}).Warn("Pruned invalid blocks")
return false, invalidBlock{
invalidAncestorRoots: invalidRoots,
error: ErrInvalidPayload,
}
case execution.ErrInvalidBlockHashPayloadStatus:
newPayloadInvalidNodeCount.Inc()
return false, ErrInvalidBlockHashPayloadStatus
default:
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
}
}
// getPayloadAttributes returns the payload attributes for the given state and slot.
// The attribute is required to initiate a payload build process in the context of an `engine_forkchoiceUpdated` call.
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot primitives.Slot, headRoot []byte) (bool, payloadattribute.Attributer, primitives.ValidatorIndex) {
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
// Root is `[32]byte{}` since we are retrieving proposer ID of a given slot. During insertion at assignment the root was not known.
proposerID, _, ok := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(slot, [32]byte{} /* root */)
if !ok && !features.Get().PrepareAllPayloads { // There's no need to build attribute if there is no proposer for slot.
return false, emptyAttri, 0
}
// Get previous randao.
st = st.Copy()
if slot > st.Slot() {
var err error
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, slot)
if err != nil {
log.WithError(err).Error("Could not process slots to get payload attribute")
return false, emptyAttri, 0
}
}
prevRando, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
if err != nil {
log.WithError(err).Error("Could not get randao mix to get payload attribute")
return false, emptyAttri, 0
}
// Get fee recipient.
feeRecipient := params.BeaconConfig().DefaultFeeRecipient
recipient, err := s.cfg.BeaconDB.FeeRecipientByValidatorID(ctx, proposerID)
switch {
case errors.Is(err, kv.ErrNotFoundFeeRecipient):
if feeRecipient.String() == params.BeaconConfig().EthBurnAddressHex {
logrus.WithFields(logrus.Fields{
"validatorIndex": proposerID,
"burnAddress": params.BeaconConfig().EthBurnAddressHex,
}).Warn("Fee recipient is currently using the burn address, " +
"you will not be rewarded transaction fees on this setting. " +
"Please set a different eth address as the fee recipient. " +
"Please refer to our documentation for instructions")
}
case err != nil:
log.WithError(err).Error("Could not get fee recipient to get payload attribute")
return false, emptyAttri, 0
default:
feeRecipient = recipient
}
// Get timestamp.
t, err := slots.ToTime(uint64(s.genesisTime.Unix()), slot)
if err != nil {
log.WithError(err).Error("Could not get timestamp to get payload attribute")
return false, emptyAttri, 0
}
var attr payloadattribute.Attributer
switch st.Version() {
case version.Capella:
withdrawals, err := st.ExpectedWithdrawals()
if err != nil {
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
return false, emptyAttri, 0
}
attr, err = payloadattribute.New(&enginev1.PayloadAttributesV2{
Timestamp: uint64(t.Unix()),
PrevRandao: prevRando,
SuggestedFeeRecipient: feeRecipient.Bytes(),
Withdrawals: withdrawals,
})
if err != nil {
log.WithError(err).Error("Could not get payload attribute")
return false, emptyAttri, 0
}
case version.Bellatrix:
attr, err = payloadattribute.New(&enginev1.PayloadAttributes{
Timestamp: uint64(t.Unix()),
PrevRandao: prevRando,
SuggestedFeeRecipient: feeRecipient.Bytes(),
})
if err != nil {
log.WithError(err).Error("Could not get payload attribute")
return false, emptyAttri, 0
}
default:
log.WithField("version", st.Version()).Error("Could not get payload attribute due to unknown state version")
return false, emptyAttri, 0
}
return true, attr, proposerID
}
// removeInvalidBlockAndState removes the invalid block and its corresponding state from the cache and DB.
func (s *Service) removeInvalidBlockAndState(ctx context.Context, blkRoots [][32]byte) error {
for _, root := range blkRoots {
if err := s.cfg.StateGen.DeleteStateFromCaches(ctx, root); err != nil {
return err
}
// Delete block also deletes the state as well.
if err := s.cfg.BeaconDB.DeleteBlock(ctx, root); err != nil {
// TODO(10487): If a caller requests to delete a root that's justified and finalized. We should gracefully shutdown.
// This is an irreparable condition, it would me a justified or finalized block has become invalid.
return err
}
}
return nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,134 +0,0 @@
package blockchain
import (
"context"
"fmt"
"time"
"github.com/pkg/errors"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
func (s *Service) isNewProposer(slot primitives.Slot) bool {
_, _, ok := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(slot, [32]byte{} /* root */)
return ok || features.Get().PrepareAllPayloads
}
func (s *Service) isNewHead(r [32]byte) bool {
s.headLock.RLock()
defer s.headLock.RUnlock()
currentHeadRoot := s.originBlockRoot
if s.head != nil {
currentHeadRoot = s.headRoot()
}
return r != currentHeadRoot || r == [32]byte{}
}
func (s *Service) getStateAndBlock(ctx context.Context, r [32]byte) (state.BeaconState, interfaces.ReadOnlySignedBeaconBlock, error) {
if !s.hasBlockInInitSyncOrDB(ctx, r) {
return nil, nil, errors.New("block does not exist")
}
newHeadBlock, err := s.getBlock(ctx, r)
if err != nil {
return nil, nil, err
}
headState, err := s.cfg.StateGen.StateByRoot(ctx, r)
if err != nil {
return nil, nil, err
}
return headState, newHeadBlock, nil
}
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It decides whether a new call to FCU should be made.
// it returns true if the new head is updated
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, newHeadRoot [32]byte, proposingSlot primitives.Slot) (bool, error) {
_, span := trace.StartSpan(ctx, "beacon-chain.blockchain.forkchoiceUpdateWithExecution")
defer span.End()
// Note: Use the service context here to avoid the parent context being ended during a forkchoice update.
ctx = trace.NewContext(s.ctx, span)
isNewHead := s.isNewHead(newHeadRoot)
if !isNewHead {
return false, nil
}
isNewProposer := s.isNewProposer(proposingSlot)
if isNewProposer && !features.Get().DisableReorgLateBlocks {
if s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
return false, nil
}
}
headState, headBlock, err := s.getStateAndBlock(ctx, newHeadRoot)
if err != nil {
log.WithError(err).Error("Could not get forkchoice update argument")
return false, nil
}
_, err = s.notifyForkchoiceUpdate(ctx, &notifyForkchoiceUpdateArg{
headState: headState,
headRoot: newHeadRoot,
headBlock: headBlock.Block(),
})
if err != nil {
return false, errors.Wrap(err, "could not notify forkchoice update")
}
if err := s.saveHead(ctx, newHeadRoot, headBlock, headState); err != nil {
log.WithError(err).Error("could not save head")
}
// Only need to prune attestations from pool if the head has changed.
if err := s.pruneAttsFromPool(headBlock); err != nil {
log.WithError(err).Error("could not prune attestations from pool")
}
return true, nil
}
// shouldOverrideFCU checks whether the incoming block is still subject to being
// reorged or not by the next proposer.
func (s *Service) shouldOverrideFCU(newHeadRoot [32]byte, proposingSlot primitives.Slot) bool {
headWeight, err := s.cfg.ForkChoiceStore.Weight(newHeadRoot)
if err != nil {
log.WithError(err).WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
}
currentSlot := s.CurrentSlot()
if proposingSlot == currentSlot {
proposerHead := s.cfg.ForkChoiceStore.GetProposerHead()
if proposerHead != newHeadRoot {
return true
}
log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", newHeadRoot),
"weight": headWeight,
}).Infof("Attempted late block reorg aborted due to attestations at %d seconds",
params.BeaconConfig().SecondsPerSlot)
lateBlockFailedAttemptSecondThreshold.Inc()
} else {
if s.cfg.ForkChoiceStore.ShouldOverrideFCU() {
return true
}
secs, err := slots.SecondsSinceSlotStart(currentSlot,
uint64(s.genesisTime.Unix()), uint64(time.Now().Unix()))
if err != nil {
log.WithError(err).Error("could not compute seconds since slot start")
}
if secs >= doublylinkedtree.ProcessAttestationsThreshold {
log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", newHeadRoot),
"weight": headWeight,
}).Infof("Attempted late block reorg aborted due to attestations at %d seconds",
doublylinkedtree.ProcessAttestationsThreshold)
lateBlockFailedAttemptFirstThreshold.Inc()
}
}
return false
}

View File

@@ -1,224 +0,0 @@
package blockchain
import (
"context"
"testing"
"time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestService_isNewProposer(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
require.Equal(t, false, service.isNewProposer(service.CurrentSlot()+1))
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(service.CurrentSlot()+1, 0, [8]byte{}, [32]byte{} /* root */)
require.Equal(t, true, service.isNewProposer(service.CurrentSlot()+1))
}
func TestService_isNewHead(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
require.Equal(t, true, service.isNewHead([32]byte{}))
service.head = &head{root: [32]byte{1}}
require.Equal(t, true, service.isNewHead([32]byte{2}))
require.Equal(t, false, service.isNewHead([32]byte{1}))
// Nil head should use origin root
service.head = nil
service.originBlockRoot = [32]byte{3}
require.Equal(t, true, service.isNewHead([32]byte{2}))
require.Equal(t, false, service.isNewHead([32]byte{3}))
}
func TestService_getHeadStateAndBlock(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
_, _, err := service.getStateAndBlock(context.Background(), [32]byte{})
require.ErrorContains(t, "block does not exist", err)
blk, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlock(&ethpb.SignedBeaconBlock{Signature: []byte{1}}))
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), blk))
st, _ := util.DeterministicGenesisState(t, 1)
r, err := blk.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), st, r))
gotState, err := service.cfg.BeaconDB.State(context.Background(), r)
require.NoError(t, err)
require.DeepEqual(t, st.ToProto(), gotState.ToProto())
gotBlk, err := service.cfg.BeaconDB.Block(context.Background(), r)
require.NoError(t, err)
require.DeepEqual(t, blk, gotBlk)
}
func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
_, err = service.forkchoiceUpdateWithExecution(ctx, service.headRoot(), service.CurrentSlot()+1)
require.NoError(t, err)
hookErr := "could not notify forkchoice update"
invalidStateErr := "could not get state summary: could not find block in DB"
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
gb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
require.NoError(t, service.saveInitSyncBlock(ctx, [32]byte{'a'}, gb))
_, err = service.forkchoiceUpdateWithExecution(ctx, [32]byte{'a'}, service.CurrentSlot()+1)
require.NoError(t, err)
require.LogsContain(t, hook, invalidStateErr)
hook.Reset()
service.head = &head{
root: [32]byte{'a'},
block: nil, /* should not panic if notify head uses correct head */
}
// Block in Cache
b := util.NewBeaconBlock()
b.Block.Slot = 2
wsb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
r1, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.saveInitSyncBlock(ctx, r1, wsb))
st, _ := util.DeterministicGenesisState(t, 1)
service.head = &head{
root: r1,
block: wsb,
state: st,
}
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1}, [32]byte{2})
_, err = service.forkchoiceUpdateWithExecution(ctx, r1, service.CurrentSlot())
require.NoError(t, err)
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
// Block in DB
b = util.NewBeaconBlock()
b.Block.Slot = 3
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b)
r1, err = b.Block.HashTreeRoot()
require.NoError(t, err)
st, _ = util.DeterministicGenesisState(t, 1)
service.head = &head{
root: r1,
block: wsb,
state: st,
}
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1}, [32]byte{2})
_, err = service.forkchoiceUpdateWithExecution(ctx, r1, service.CurrentSlot()+1)
require.NoError(t, err)
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
vId, payloadID, has := service.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(2, [32]byte{2})
require.Equal(t, true, has)
require.Equal(t, primitives.ValidatorIndex(1), vId)
require.Equal(t, [8]byte{1}, payloadID)
// Test zero headRoot returns immediately.
headRoot := service.headRoot()
_, err = service.forkchoiceUpdateWithExecution(ctx, [32]byte{}, service.CurrentSlot()+1)
require.NoError(t, err)
require.Equal(t, service.headRoot(), headRoot)
}
func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
altairBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockAltair())
altairBlkRoot, err := altairBlk.Block().HashTreeRoot()
require.NoError(t, err)
bellatrixBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockBellatrix())
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 10)
service.head = &head{
state: st,
}
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 1, altairBlkRoot, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 2, bellatrixBlkRoot, altairBlkRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
service.cfg.ExecutionEngineCaller = &mockExecution.EngineClient{}
require.NoError(t, beaconDB.SaveState(ctx, st, bellatrixBlkRoot))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bellatrixBlkRoot))
sb, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(&ethpb.SignedBeaconBlockBellatrix{}))
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, sb))
r, err := sb.Block().HashTreeRoot()
require.NoError(t, err)
// Set head to be the same but proposing next slot
service.head.root = r
service.head.block = sb
service.head.state = st
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(service.CurrentSlot()+1, 0, [8]byte{}, [32]byte{} /* root */)
_, err = service.forkchoiceUpdateWithExecution(ctx, r, service.CurrentSlot()+1)
require.NoError(t, err)
}
func TestShouldOverrideFCU(t *testing.T) {
hook := logTest.NewGlobal()
service, tr := minimalTestService(t)
ctx, fcs := tr.ctx, tr.fcs
service.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot) * time.Second))
headRoot := [32]byte{'b'}
parentRoot := [32]byte{'a'}
ojc := &ethpb.Checkpoint{}
st, root, err := prepareForkchoiceState(ctx, 1, parentRoot, [32]byte{}, [32]byte{}, ojc, ojc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 2, headRoot, parentRoot, [32]byte{}, ojc, ojc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, root))
require.Equal(t, primitives.Slot(2), service.CurrentSlot())
require.Equal(t, true, service.shouldOverrideFCU(headRoot, 2))
require.LogsDoNotContain(t, hook, "12 seconds")
require.Equal(t, false, service.shouldOverrideFCU(parentRoot, 2))
require.LogsContain(t, hook, "12 seconds")
head, err := fcs.Head(ctx)
require.NoError(t, err)
require.Equal(t, headRoot, head)
fcs.SetGenesisTime(uint64(time.Now().Unix()) - 29)
require.Equal(t, true, service.shouldOverrideFCU(parentRoot, 3))
require.LogsDoNotContain(t, hook, "10 seconds")
fcs.SetGenesisTime(uint64(time.Now().Unix()) - 24)
service.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot+10) * time.Second))
require.Equal(t, false, service.shouldOverrideFCU(parentRoot, 3))
require.LogsContain(t, hook, "10 seconds")
}

View File

@@ -6,163 +6,164 @@ import (
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/math"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time/slots"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// UpdateAndSaveHeadWithBalances updates the beacon state head after getting justified balanced from cache.
// This function is only used in spec-tests, it does save the head after updating it.
func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
if err != nil {
return errors.Wrap(err, "could not update head")
// This defines the current chain service's view of head.
type head struct {
slot types.Slot // current head slot.
root [32]byte // current head root.
block block.SignedBeaconBlock // current head block.
state state.BeaconState // current head state.
}
// Determined the head from the fork choice service and saves its new data
// (head root, head block, and head state) to the local service cache.
func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
ctx, span := trace.StartSpan(ctx, "blockChain.updateHead")
defer span.End()
// To get the proper head update, a node first checks its best justified
// can become justified. This is designed to prevent bounce attack and
// ensure head gets its best justified info.
if s.bestJustifiedCheckpt.Epoch > s.justifiedCheckpt.Epoch {
s.justifiedCheckpt = s.bestJustifiedCheckpt
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
return err
}
}
headBlock, err := s.getBlock(ctx, headRoot)
// Get head from the fork choice service.
f := s.finalizedCheckpt
j := s.justifiedCheckpt
// To get head before the first justified epoch, the fork choice will start with genesis root
// instead of zero hashes.
headStartRoot := bytesutil.ToBytes32(j.Root)
if headStartRoot == params.BeaconConfig().ZeroHash {
headStartRoot = s.genesisRoot
}
// In order to process head, fork choice store requires justified info.
// If the fork choice store is missing justified block info, a node should
// re-initiate fork choice store using the latest justified info.
// This recovers a fatal condition and should not happen in run time.
if !s.cfg.ForkChoiceStore.HasNode(headStartRoot) {
jb, err := s.cfg.BeaconDB.Block(ctx, headStartRoot)
if err != nil {
return err
}
s.cfg.ForkChoiceStore = protoarray.New(j.Epoch, f.Epoch, bytesutil.ToBytes32(f.Root))
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block(), headStartRoot, f, j); err != nil {
return err
}
}
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx, j.Epoch, headStartRoot, balances, f.Epoch)
if err != nil {
return err
}
headState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
return s.saveHead(ctx, headRoot, headBlock, headState)
}
// This defines the current chain service's view of head.
type head struct {
root [32]byte // current head root.
block interfaces.ReadOnlySignedBeaconBlock // current head block.
state state.BeaconState // current head state.
// Save head to the local service cache.
return s.saveHead(ctx, headRoot)
}
// This saves head info to the local service cache, it also saves the
// new head root to the DB.
// Caller of the method MUST acquire a lock on forkchoice.
func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock interfaces.ReadOnlySignedBeaconBlock, headState state.BeaconState) error {
func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.saveHead")
defer span.End()
// Do nothing if head hasn't changed.
if !s.isNewHead(newHeadRoot) {
return nil
}
if err := blocks.BeaconBlockIsNil(headBlock); err != nil {
r, err := s.HeadRoot(ctx)
if err != nil {
return err
}
if headState == nil || headState.IsNil() {
return errors.New("cannot save nil head state")
if headRoot == bytesutil.ToBytes32(r) {
return nil
}
// If the head state is not available, just return nil.
// There's nothing to cache
if !s.cfg.BeaconDB.HasStateSummary(ctx, newHeadRoot) {
if !s.cfg.BeaconDB.HasStateSummary(ctx, headRoot) {
return nil
}
s.headLock.RLock()
oldHeadBlock, err := s.headBlock()
// Get the new head block from DB.
newHeadBlock, err := s.cfg.BeaconDB.Block(ctx, headRoot)
if err != nil {
s.headLock.RUnlock()
return errors.Wrap(err, "could not get old head block")
return err
}
oldStateRoot := oldHeadBlock.Block().StateRoot()
s.headLock.RUnlock()
if err := helpers.BeaconBlockIsNil(newHeadBlock); err != nil {
return err
}
// Get the new head state from cached state or DB.
newHeadState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
if newHeadState == nil || newHeadState.IsNil() {
return errors.New("cannot save nil head state")
}
// A chain re-org occurred, so we fire an event notifying the rest of the services.
headSlot := s.HeadSlot()
newHeadSlot := headBlock.Block().Slot()
newStateRoot := headBlock.Block().StateRoot()
r, err := s.HeadRoot(ctx)
if err != nil {
return errors.Wrap(err, "could not get old head root")
}
oldHeadRoot := bytesutil.ToBytes32(r)
if headBlock.Block().ParentRoot() != oldHeadRoot {
// A chain re-org occurred, so we fire an event notifying the rest of the services.
commonRoot, forkSlot, err := s.cfg.ForkChoiceStore.CommonAncestor(ctx, oldHeadRoot, newHeadRoot)
if err != nil {
log.WithError(err).Error("Could not find common ancestor root")
commonRoot = params.BeaconConfig().ZeroHash
}
dis := headSlot + newHeadSlot - 2*forkSlot
dep := math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot))
oldWeight, err := s.cfg.ForkChoiceStore.Weight(oldHeadRoot)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", oldHeadRoot)).Warn("could not determine node weight")
}
newWeight, err := s.cfg.ForkChoiceStore.Weight(newHeadRoot)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
}
newHeadSlot := newHeadBlock.Block().Slot()
oldHeadRoot := s.headRoot()
oldStateRoot := s.headBlock().Block().StateRoot()
newStateRoot := newHeadBlock.Block().StateRoot()
if bytesutil.ToBytes32(newHeadBlock.Block().ParentRoot()) != bytesutil.ToBytes32(r) {
log.WithFields(logrus.Fields{
"newSlot": fmt.Sprintf("%d", newHeadSlot),
"newRoot": fmt.Sprintf("%#x", newHeadRoot),
"newWeight": newWeight,
"oldSlot": fmt.Sprintf("%d", headSlot),
"oldRoot": fmt.Sprintf("%#x", oldHeadRoot),
"oldWeight": oldWeight,
"commonAncestorRoot": fmt.Sprintf("%#x", commonRoot),
"distance": dis,
"depth": dep,
}).Info("Chain reorg occurred")
reorgDistance.Observe(float64(dis))
reorgDepth.Observe(float64(dep))
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(newHeadRoot)
if err != nil {
return errors.Wrap(err, "could not check if node is optimistically synced")
}
"newSlot": fmt.Sprintf("%d", newHeadSlot),
"oldSlot": fmt.Sprintf("%d", headSlot),
}).Debug("Chain reorg occurred")
absoluteSlotDifference := slots.AbsoluteValueSlotDifference(newHeadSlot, headSlot)
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.Reorg,
Data: &ethpbv1.EventChainReorg{
Slot: newHeadSlot,
Depth: math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot)),
OldHeadBlock: oldHeadRoot[:],
NewHeadBlock: newHeadRoot[:],
OldHeadState: oldStateRoot[:],
NewHeadState: newStateRoot[:],
Epoch: slots.ToEpoch(newHeadSlot),
ExecutionOptimistic: isOptimistic,
Slot: newHeadSlot,
Depth: absoluteSlotDifference,
OldHeadBlock: oldHeadRoot[:],
NewHeadBlock: headRoot[:],
OldHeadState: oldStateRoot,
NewHeadState: newStateRoot,
Epoch: slots.ToEpoch(newHeadSlot),
},
})
if err := s.saveOrphanedOperations(ctx, oldHeadRoot, newHeadRoot); err != nil {
if err := s.saveOrphanedAtts(ctx, bytesutil.ToBytes32(r)); err != nil {
return err
}
reorgCount.Inc()
}
// Cache the new head info.
if err := s.setHead(newHeadRoot, headBlock, headState); err != nil {
return errors.Wrap(err, "could not set head")
}
s.setHead(headRoot, newHeadBlock, newHeadState)
// Save the new head root to DB.
if err := s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, newHeadRoot); err != nil {
if err := s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, headRoot); err != nil {
return errors.Wrap(err, "could not save head root in DB")
}
// Forward an event capturing a new chain head over a common event feed
// done in a goroutine to avoid blocking the critical runtime main routine.
go func() {
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, headState, newStateRoot[:], newHeadRoot[:]); err != nil {
if err := s.notifyNewHeadEvent(newHeadSlot, newHeadState, newStateRoot, headRoot[:]); err != nil {
log.WithError(err).Error("Could not notify event feed of new chain head")
}
}()
@@ -173,8 +174,8 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
// This gets called to update canonical root mapping. It does not save head block
// root in DB. With the inception of initial-sync-cache-state flag, it uses finalized
// check point as anchors to resume sync therefore head is no longer needed to be saved on per slot basis.
func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, r [32]byte, hs state.BeaconState) error {
if err := blocks.BeaconBlockIsNil(b); err != nil {
func (s *Service) saveHeadNoDB(ctx context.Context, b block.SignedBeaconBlock, r [32]byte, hs state.BeaconState) error {
if err := helpers.BeaconBlockIsNil(b); err != nil {
return err
}
cachedHeadRoot, err := s.HeadRoot(ctx)
@@ -185,61 +186,44 @@ func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.ReadOnlySignedB
return nil
}
bCp, err := b.Copy()
if err != nil {
return err
}
if err := s.setHeadInitialSync(r, bCp, hs); err != nil {
return errors.Wrap(err, "could not set head")
}
s.setHeadInitialSync(r, b.Copy(), hs)
return nil
}
// This sets head view object which is used to track the head slot, root, block and state.
func (s *Service) setHead(root [32]byte, block interfaces.ReadOnlySignedBeaconBlock, state state.BeaconState) error {
func (s *Service) setHead(root [32]byte, block block.SignedBeaconBlock, state state.BeaconState) {
s.headLock.Lock()
defer s.headLock.Unlock()
// This does a full copy of the block and state.
bCp, err := block.Copy()
if err != nil {
return err
}
s.head = &head{
slot: block.Block().Slot(),
root: root,
block: bCp,
block: block.Copy(),
state: state.Copy(),
}
return nil
}
// This sets head view object which is used to track the head slot, root, block and state. The method
// assumes that state being passed into the method will not be modified by any other alternate
// caller which holds the state's reference.
func (s *Service) setHeadInitialSync(root [32]byte, block interfaces.ReadOnlySignedBeaconBlock, state state.BeaconState) error {
func (s *Service) setHeadInitialSync(root [32]byte, block block.SignedBeaconBlock, state state.BeaconState) {
s.headLock.Lock()
defer s.headLock.Unlock()
// This does a full copy of the block only.
bCp, err := block.Copy()
if err != nil {
return err
}
s.head = &head{
slot: block.Block().Slot(),
root: root,
block: bCp,
block: block.Copy(),
state: state,
}
return nil
}
// This returns the head slot.
// This is a lock free version.
func (s *Service) headSlot() primitives.Slot {
if s.head == nil || s.head.block == nil || s.head.block.Block() == nil {
return 0
}
return s.head.block.Block().Slot()
func (s *Service) headSlot() types.Slot {
return s.head.slot
}
// This returns the head root.
@@ -256,7 +240,7 @@ func (s *Service) headRoot() [32]byte {
// This returns the head block.
// It does a full copy on head block for immutability.
// This is a lock free version.
func (s *Service) headBlock() (interfaces.ReadOnlySignedBeaconBlock, error) {
func (s *Service) headBlock() block.SignedBeaconBlock {
return s.head.block.Copy()
}
@@ -270,55 +254,88 @@ func (s *Service) headState(ctx context.Context) state.BeaconState {
return s.head.state.Copy()
}
// This returns a read only version of the head state.
// It does not perform a copy of the head state.
// This returns the genesis validator root of the head state.
// This is a lock free version.
func (s *Service) headStateReadOnly(ctx context.Context) state.ReadOnlyBeaconState {
ctx, span := trace.StartSpan(ctx, "blockChain.headStateReadOnly")
defer span.End()
return s.head.state
}
// This returns the genesis validators root of the head state.
// This is a lock free version.
func (s *Service) headGenesisValidatorsRoot() [32]byte {
return bytesutil.ToBytes32(s.head.state.GenesisValidatorsRoot())
func (s *Service) headGenesisValidatorRoot() [32]byte {
return bytesutil.ToBytes32(s.head.state.GenesisValidatorRoot())
}
// This returns the validator referenced by the provided index in
// the head state.
// This is a lock free version.
func (s *Service) headValidatorAtIndex(index primitives.ValidatorIndex) (state.ReadOnlyValidator, error) {
func (s *Service) headValidatorAtIndex(index types.ValidatorIndex) (state.ReadOnlyValidator, error) {
return s.head.state.ValidatorAtIndexReadOnly(index)
}
// This returns the validator index referenced by the provided pubkey in
// the head state.
// This is a lock free version.
func (s *Service) headValidatorIndexAtPubkey(pubKey [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool) {
return s.head.state.ValidatorIndexByPubkey(pubKey)
}
// Returns true if head state exists.
// This is the lock free version.
func (s *Service) hasHeadState() bool {
return s.head != nil && s.head.state != nil
}
// This caches justified state balances to be used for fork choice.
func (s *Service) cacheJustifiedStateBalances(ctx context.Context, justifiedRoot [32]byte) error {
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err
}
s.clearInitSyncBlocks()
var justifiedState state.BeaconState
var err error
if justifiedRoot == s.genesisRoot {
justifiedState, err = s.cfg.BeaconDB.GenesisState(ctx)
if err != nil {
return err
}
} else {
justifiedState, err = s.cfg.StateGen.StateByRoot(ctx, justifiedRoot)
if err != nil {
return err
}
}
if justifiedState == nil || justifiedState.IsNil() {
return errors.New("justified state can't be nil")
}
epoch := time.CurrentEpoch(justifiedState)
justifiedBalances := make([]uint64, justifiedState.NumValidators())
if err := justifiedState.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
if helpers.IsActiveValidatorUsingTrie(val, epoch) {
justifiedBalances[idx] = val.EffectiveBalance()
} else {
justifiedBalances[idx] = 0
}
return nil
}); err != nil {
return err
}
s.justifiedBalancesLock.Lock()
defer s.justifiedBalancesLock.Unlock()
s.justifiedBalances = justifiedBalances
return nil
}
func (s *Service) getJustifiedBalances() []uint64 {
s.justifiedBalancesLock.RLock()
defer s.justifiedBalancesLock.RUnlock()
return s.justifiedBalances
}
// Notifies a common event feed of a new chain head event. Called right after a new
// chain head is determined, set, and saved to disk.
func (s *Service) notifyNewHeadEvent(
ctx context.Context,
newHeadSlot primitives.Slot,
newHeadSlot types.Slot,
newHeadState state.BeaconState,
newHeadStateRoot,
newHeadRoot []byte,
) error {
previousDutyDependentRoot := s.originBlockRoot[:]
currentDutyDependentRoot := s.originBlockRoot[:]
previousDutyDependentRoot := s.genesisRoot[:]
currentDutyDependentRoot := s.genesisRoot[:]
var previousDutyEpoch primitives.Epoch
var previousDutyEpoch types.Epoch
currentDutyEpoch := slots.ToEpoch(newHeadSlot)
if currentDutyEpoch > 0 {
previousDutyEpoch = currentDutyEpoch.Sub(1)
@@ -343,10 +360,6 @@ func (s *Service) notifyNewHeadEvent(
return errors.Wrap(err, "could not get duty dependent root")
}
}
isOptimistic, err := s.IsOptimistic(ctx)
if err != nil {
return errors.Wrap(err, "could not check if node is optimistically synced")
}
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.NewHead,
Data: &ethpbv1.EventHead{
@@ -356,77 +369,44 @@ func (s *Service) notifyNewHeadEvent(
EpochTransition: slots.IsEpochStart(newHeadSlot),
PreviousDutyDependentRoot: previousDutyDependentRoot,
CurrentDutyDependentRoot: currentDutyDependentRoot,
ExecutionOptimistic: isOptimistic,
},
})
return nil
}
// This saves the Attestations and BLSToExecChanges between `orphanedRoot` and the common ancestor root that is derived using `newHeadRoot`.
// It also filters out the attestations that is one epoch older as a defense so invalid attestations don't flow into the attestation pool.
func (s *Service) saveOrphanedOperations(ctx context.Context, orphanedRoot [32]byte, newHeadRoot [32]byte) error {
commonAncestorRoot, _, err := s.cfg.ForkChoiceStore.CommonAncestor(ctx, newHeadRoot, orphanedRoot)
switch {
// Exit early if there's no common ancestor and root doesn't exist, there would be nothing to save.
case errors.Is(err, forkchoice.ErrUnknownCommonAncestor):
// This saves the attestations inside the beacon block with respect to root `orphanedRoot` back into the
// attestation pool. It also filters out the attestations that is one epoch older as a
// defense so invalid attestations don't flow into the attestation pool.
func (s *Service) saveOrphanedAtts(ctx context.Context, orphanedRoot [32]byte) error {
if !features.Get().CorrectlyInsertOrphanedAtts {
return nil
case err != nil:
}
orphanedBlk, err := s.cfg.BeaconDB.Block(ctx, orphanedRoot)
if err != nil {
return err
}
for orphanedRoot != commonAncestorRoot {
if ctx.Err() != nil {
return ctx.Err()
}
orphanedBlk, err := s.getBlock(ctx, orphanedRoot)
if err != nil {
return err
}
// If the block is an epoch older, break out of the loop since we can't include atts anyway.
// This prevents stuck within this for loop longer than necessary.
if orphanedBlk.Block().Slot()+params.BeaconConfig().SlotsPerEpoch <= s.CurrentSlot() {
break
}
for _, a := range orphanedBlk.Block().Body().Attestations() {
// if the attestation is one epoch older, it wouldn't been useful to save it.
if a.Data.Slot+params.BeaconConfig().SlotsPerEpoch < s.CurrentSlot() {
continue
}
if helpers.IsAggregated(a) {
if err := s.cfg.AttPool.SaveAggregatedAttestation(a); err != nil {
return err
}
} else {
if err := s.cfg.AttPool.SaveUnaggregatedAttestation(a); err != nil {
return err
}
}
saveOrphanedAttCount.Inc()
}
for _, as := range orphanedBlk.Block().Body().AttesterSlashings() {
if err := s.cfg.SlashingPool.InsertAttesterSlashing(ctx, s.headStateReadOnly(ctx), as); err != nil {
log.WithError(err).Error("Could not insert reorg attester slashing")
}
}
for _, vs := range orphanedBlk.Block().Body().ProposerSlashings() {
if err := s.cfg.SlashingPool.InsertProposerSlashing(ctx, s.headStateReadOnly(ctx), vs); err != nil {
log.WithError(err).Error("Could not insert reorg proposer slashing")
}
}
for _, v := range orphanedBlk.Block().Body().VoluntaryExits() {
s.cfg.ExitPool.InsertVoluntaryExit(v)
}
if orphanedBlk.Version() >= version.Capella {
changes, err := orphanedBlk.Block().Body().BLSToExecutionChanges()
if err != nil {
return errors.Wrap(err, "could not get BLSToExecutionChanges")
}
for _, c := range changes {
s.cfg.BLSToExecPool.InsertBLSToExecChange(c)
}
}
parentRoot := orphanedBlk.Block().ParentRoot()
orphanedRoot = bytesutil.ToBytes32(parentRoot[:])
if orphanedBlk == nil || orphanedBlk.IsNil() {
return errors.New("orphaned block can't be nil")
}
for _, a := range orphanedBlk.Block().Body().Attestations() {
// Is the attestation one epoch older.
if a.Data.Slot+params.BeaconConfig().SlotsPerEpoch < s.CurrentSlot() {
continue
}
if helpers.IsAggregated(a) {
if err := s.cfg.AttPool.SaveAggregatedAttestation(a); err != nil {
return err
}
} else {
if err := s.cfg.AttPool.SaveUnaggregatedAttestation(a); err != nil {
return err
}
}
saveOrphanedAttCount.Inc()
}
return nil
}

View File

@@ -5,17 +5,17 @@ import (
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/async"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/async"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/time/slots"
)
// Initialize the state cache for sync committees.
@@ -24,30 +24,30 @@ var syncCommitteeHeadStateCache = cache.NewSyncCommitteeHeadState()
// HeadSyncCommitteeFetcher is the interface that wraps the head sync committee related functions.
// The head sync committee functions return callers sync committee indices and public keys with respect to current head state.
type HeadSyncCommitteeFetcher interface {
HeadSyncCommitteeIndices(ctx context.Context, index primitives.ValidatorIndex, slot primitives.Slot) ([]primitives.CommitteeIndex, error)
HeadSyncCommitteePubKeys(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) ([][]byte, error)
HeadSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error)
HeadSyncCommitteePubKeys(ctx context.Context, slot types.Slot, committeeIndex types.CommitteeIndex) ([][]byte, error)
}
// HeadDomainFetcher is the interface that wraps the head sync domain related functions.
// The head sync committee domain functions return callers domain data with respect to slot and head state.
type HeadDomainFetcher interface {
HeadSyncCommitteeDomain(ctx context.Context, slot primitives.Slot) ([]byte, error)
HeadSyncSelectionProofDomain(ctx context.Context, slot primitives.Slot) ([]byte, error)
HeadSyncContributionProofDomain(ctx context.Context, slot primitives.Slot) ([]byte, error)
HeadSyncCommitteeDomain(ctx context.Context, slot types.Slot) ([]byte, error)
HeadSyncSelectionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error)
HeadSyncContributionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error)
}
// HeadSyncCommitteeDomain returns the head sync committee domain using current head state advanced up to `slot`.
func (s *Service) HeadSyncCommitteeDomain(ctx context.Context, slot primitives.Slot) ([]byte, error) {
func (s *Service) HeadSyncCommitteeDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainSyncCommittee)
}
// HeadSyncSelectionProofDomain returns the head sync committee domain using current head state advanced up to `slot`.
func (s *Service) HeadSyncSelectionProofDomain(ctx context.Context, slot primitives.Slot) ([]byte, error) {
func (s *Service) HeadSyncSelectionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainSyncCommitteeSelectionProof)
}
// HeadSyncContributionProofDomain returns the head sync committee domain using current head state advanced up to `slot`.
func (s *Service) HeadSyncContributionProofDomain(ctx context.Context, slot primitives.Slot) ([]byte, error) {
func (s *Service) HeadSyncContributionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainContributionAndProof)
}
@@ -61,7 +61,7 @@ func (s *Service) HeadSyncContributionProofDomain(ctx context.Context, slot prim
// [compute_start_slot_at_epoch(epoch) - 1, compute_start_slot_at_epoch(epoch) + SLOTS_PER_EPOCH - 1)
// rather than for the range
// [compute_start_slot_at_epoch(epoch), compute_start_slot_at_epoch(epoch) + SLOTS_PER_EPOCH)
func (s *Service) HeadSyncCommitteeIndices(ctx context.Context, index primitives.ValidatorIndex, slot primitives.Slot) ([]primitives.CommitteeIndex, error) {
func (s *Service) HeadSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
nextSlotEpoch := slots.ToEpoch(slot + 1)
currentEpoch := slots.ToEpoch(slot)
@@ -79,7 +79,7 @@ func (s *Service) HeadSyncCommitteeIndices(ctx context.Context, index primitives
// headCurrentSyncCommitteeIndices returns the input validator `index`'s position indices in the current sync committee with respect to `slot`.
// Head state advanced up to `slot` is used for calculation.
func (s *Service) headCurrentSyncCommitteeIndices(ctx context.Context, index primitives.ValidatorIndex, slot primitives.Slot) ([]primitives.CommitteeIndex, error) {
func (s *Service) headCurrentSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
if err != nil {
return nil, err
@@ -89,7 +89,7 @@ func (s *Service) headCurrentSyncCommitteeIndices(ctx context.Context, index pri
// headNextSyncCommitteeIndices returns the input validator `index`'s position indices in the next sync committee with respect to `slot`.
// Head state advanced up to `slot` is used for calculation.
func (s *Service) headNextSyncCommitteeIndices(ctx context.Context, index primitives.ValidatorIndex, slot primitives.Slot) ([]primitives.CommitteeIndex, error) {
func (s *Service) headNextSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
if err != nil {
return nil, err
@@ -99,7 +99,7 @@ func (s *Service) headNextSyncCommitteeIndices(ctx context.Context, index primit
// HeadSyncCommitteePubKeys returns the head sync committee public keys with respect to `slot` and subcommittee index `committeeIndex`.
// Head state advanced up to `slot` is used for calculation.
func (s *Service) HeadSyncCommitteePubKeys(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) ([][]byte, error) {
func (s *Service) HeadSyncCommitteePubKeys(ctx context.Context, slot types.Slot, committeeIndex types.CommitteeIndex) ([][]byte, error) {
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
if err != nil {
return nil, err
@@ -125,17 +125,17 @@ func (s *Service) HeadSyncCommitteePubKeys(ctx context.Context, slot primitives.
}
// returns calculated domain using input `domain` and `slot`.
func (s *Service) domainWithHeadState(ctx context.Context, slot primitives.Slot, domain [4]byte) ([]byte, error) {
func (s *Service) domainWithHeadState(ctx context.Context, slot types.Slot, domain [4]byte) ([]byte, error) {
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
if err != nil {
return nil, err
}
return signing.Domain(headState.Fork(), slots.ToEpoch(headState.Slot()), domain, headState.GenesisValidatorsRoot())
return signing.Domain(headState.Fork(), slots.ToEpoch(headState.Slot()), domain, headState.GenesisValidatorRoot())
}
// returns the head state that is advanced up to `slot`. It utilizes the cache `syncCommitteeHeadState` by retrieving using `slot` as key.
// For the cache miss, it processes head state up to slot and fill the cache with `slot` as key.
func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot primitives.Slot) (state.BeaconState, error) {
func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot types.Slot) (state.BeaconState, error) {
var headState state.BeaconState
var err error
mLock := async.NewMultilock(fmt.Sprintf("%s-%d", "syncHeadState", slot))
@@ -157,9 +157,11 @@ func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot primitives
if headState == nil || headState.IsNil() {
return nil, errors.New("nil state")
}
headState, err = transition.ProcessSlotsIfPossible(ctx, headState, slot)
if err != nil {
return nil, err
if slot > headState.Slot() {
headState, err = transition.ProcessSlots(ctx, headState, slot)
if err != nil {
return nil, err
}
}
syncHeadStateMiss.Inc()
err = syncCommitteeHeadStateCache.Put(slot, headState)

View File

@@ -4,15 +4,53 @@ import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
"github.com/prysmaticlabs/prysm/v4/time/slots"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/signing"
dbtest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/time/slots"
)
func TestService_headSyncCommitteeFetcher_Errors(t *testing.T) {
beaconDB := dbtest.SetupDB(t)
c := &Service{
cfg: &config{
StateGen: stategen.New(beaconDB),
},
}
c.head = &head{}
_, err := c.headCurrentSyncCommitteeIndices(context.Background(), types.ValidatorIndex(0), types.Slot(0))
require.ErrorContains(t, "nil state", err)
_, err = c.headNextSyncCommitteeIndices(context.Background(), types.ValidatorIndex(0), types.Slot(0))
require.ErrorContains(t, "nil state", err)
_, err = c.HeadSyncCommitteePubKeys(context.Background(), types.Slot(0), types.CommitteeIndex(0))
require.ErrorContains(t, "nil state", err)
}
func TestService_HeadDomainFetcher_Errors(t *testing.T) {
beaconDB := dbtest.SetupDB(t)
c := &Service{
cfg: &config{
StateGen: stategen.New(beaconDB),
},
}
c.head = &head{}
_, err := c.HeadSyncCommitteeDomain(context.Background(), types.Slot(0))
require.ErrorContains(t, "nil state", err)
_, err = c.HeadSyncSelectionProofDomain(context.Background(), types.Slot(0))
require.ErrorContains(t, "nil state", err)
_, err = c.HeadSyncSelectionProofDomain(context.Background(), types.Slot(0))
require.ErrorContains(t, "nil state", err)
}
func TestService_HeadSyncCommitteeIndices(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
@@ -20,18 +58,18 @@ func TestService_HeadSyncCommitteeIndices(t *testing.T) {
// Current period
slot := 2*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
a, err := c.HeadSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
a, err := c.HeadSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
require.NoError(t, err)
// Current period where slot-2 across EPOCHS_PER_SYNC_COMMITTEE_PERIOD
slot = 3*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) - 2
b, err := c.HeadSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
b, err := c.HeadSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
require.NoError(t, err)
require.DeepEqual(t, a, b)
// Next period where slot-1 across EPOCHS_PER_SYNC_COMMITTEE_PERIOD
slot = 3*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) - 1
b, err = c.HeadSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
b, err = c.HeadSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
require.NoError(t, err)
require.DeepNotEqual(t, a, b)
}
@@ -43,7 +81,7 @@ func TestService_headCurrentSyncCommitteeIndices(t *testing.T) {
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
slot := uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
indices, err := c.headCurrentSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
indices, err := c.headCurrentSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
require.NoError(t, err)
// NextSyncCommittee becomes CurrentSyncCommittee so it should be empty by default.
@@ -57,7 +95,7 @@ func TestService_headNextSyncCommitteeIndices(t *testing.T) {
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
slot := uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
indices, err := c.headNextSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
indices, err := c.headNextSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
require.NoError(t, err)
// NextSyncCommittee should be be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
@@ -71,7 +109,7 @@ func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
// Process slot up to 2 * `EpochsPerSyncCommitteePeriod` so it can run `ProcessSyncCommitteeUpdates` twice.
slot := uint64(2*params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
pubkeys, err := c.HeadSyncCommitteePubKeys(context.Background(), primitives.Slot(slot), 0)
pubkeys, err := c.HeadSyncCommitteePubKeys(context.Background(), types.Slot(slot), 0)
require.NoError(t, err)
// Any subcommittee should match the subcommittee size.
@@ -84,7 +122,7 @@ func TestService_HeadSyncCommitteeDomain(t *testing.T) {
c := &Service{}
c.head = &head{state: s}
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorsRoot())
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorRoot())
require.NoError(t, err)
d, err := c.HeadSyncCommitteeDomain(context.Background(), 0)
@@ -98,7 +136,7 @@ func TestService_HeadSyncContributionProofDomain(t *testing.T) {
c := &Service{}
c.head = &head{state: s}
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainContributionAndProof, s.GenesisValidatorsRoot())
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainContributionAndProof, s.GenesisValidatorRoot())
require.NoError(t, err)
d, err := c.HeadSyncContributionProofDomain(context.Background(), 0)
@@ -112,7 +150,7 @@ func TestService_HeadSyncSelectionProofDomain(t *testing.T) {
c := &Service{}
c.head = &head{state: s}
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommitteeSelectionProof, s.GenesisValidatorsRoot())
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommitteeSelectionProof, s.GenesisValidatorRoot())
require.NoError(t, err)
d, err := c.HeadSyncSelectionProofDomain(context.Background(), 0)

View File

@@ -3,24 +3,21 @@ package blockchain
import (
"bytes"
"context"
"sort"
"testing"
"time"
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
"github.com/prysmaticlabs/prysm/v4/time/slots"
types "github.com/prysmaticlabs/eth2-types"
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/time/slots"
logTest "github.com/sirupsen/logrus/hooks/test"
)
@@ -29,12 +26,10 @@ func TestSaveHead_Same(t *testing.T) {
service := setupBeaconChain(t, beaconDB)
r := [32]byte{'A'}
service.head = &head{root: r}
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 1)
require.NoError(t, service.saveHead(context.Background(), r, b, st))
assert.Equal(t, primitives.Slot(0), service.headSlot(), "Head did not stay the same")
service.head = &head{slot: 0, root: r}
require.NoError(t, service.saveHead(context.Background(), r))
assert.Equal(t, types.Slot(0), service.headSlot(), "Head did not stay the same")
assert.Equal(t, r, service.headRoot(), "Head did not stay the same")
}
@@ -43,15 +38,15 @@ func TestSaveHead_Different(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
oldBlock := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, util.NewBeaconBlock())
util.NewBeaconBlock()
oldBlock := wrapper.WrappedPhase0SignedBeaconBlock(
util.NewBeaconBlock(),
)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), oldBlock))
oldRoot, err := oldBlock.Block().HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, oldBlock.Block().ParentRoot(), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
service.head = &head{
slot: 0,
root: oldRoot,
block: oldBlock,
}
@@ -60,34 +55,23 @@ func TestSaveHead_Different(t *testing.T) {
newHeadSignedBlock.Block.Slot = 1
newHeadBlock := newHeadSignedBlock.Block
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(newHeadSignedBlock)))
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot()-1, wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, wsb.Block().ParentRoot(), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(1))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
require.NoError(t, service.saveHead(context.Background(), newRoot))
assert.Equal(t, primitives.Slot(1), service.HeadSlot(), "Head did not change")
assert.Equal(t, types.Slot(1), service.HeadSlot(), "Head did not change")
cachedRoot, err := service.HeadRoot(context.Background())
require.NoError(t, err)
assert.DeepEqual(t, cachedRoot, newRoot[:], "Head did not change")
headBlock, err := service.headBlock()
require.NoError(t, err)
pb, err := headBlock.Proto()
require.NoError(t, err)
assert.DeepEqual(t, newHeadSignedBlock, pb, "Head did not change")
assert.DeepSSZEqual(t, headState.ToProto(), service.headState(ctx).ToProto(), "Head did not change")
assert.DeepEqual(t, newHeadSignedBlock, service.headBlock().Proto(), "Head did not change")
assert.DeepSSZEqual(t, headState.CloneInnerState(), service.headState(ctx).CloneInnerState(), "Head did not change")
}
func TestSaveHead_Different_Reorg(t *testing.T) {
@@ -96,58 +80,72 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
oldBlock := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, util.NewBeaconBlock())
oldBlock := wrapper.WrappedPhase0SignedBeaconBlock(
util.NewBeaconBlock(),
)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), oldBlock))
oldRoot, err := oldBlock.Block().HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, oldBlock.Block().ParentRoot(), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
service.head = &head{
slot: 0,
root: oldRoot,
block: oldBlock,
}
reorgChainParent := [32]byte{'B'}
state, blkRoot, err = prepareForkchoiceState(ctx, 0, reorgChainParent, oldRoot, oldBlock.Block().ParentRoot(), ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
newHeadSignedBlock := util.NewBeaconBlock()
newHeadSignedBlock.Block.Slot = 1
newHeadSignedBlock.Block.ParentRoot = reorgChainParent[:]
newHeadBlock := newHeadSignedBlock.Block
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(newHeadSignedBlock)))
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, wsb.Block().ParentRoot(), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(1))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
require.NoError(t, service.saveHead(context.Background(), newRoot))
assert.Equal(t, primitives.Slot(1), service.HeadSlot(), "Head did not change")
assert.Equal(t, types.Slot(1), service.HeadSlot(), "Head did not change")
cachedRoot, err := service.HeadRoot(context.Background())
require.NoError(t, err)
if !bytes.Equal(cachedRoot, newRoot[:]) {
t.Error("Head did not change")
}
headBlock, err := service.headBlock()
require.NoError(t, err)
pb, err := headBlock.Proto()
require.NoError(t, err)
assert.DeepEqual(t, newHeadSignedBlock, pb, "Head did not change")
assert.DeepSSZEqual(t, headState.ToProto(), service.headState(ctx).ToProto(), "Head did not change")
assert.DeepEqual(t, newHeadSignedBlock, service.headBlock().Proto(), "Head did not change")
assert.DeepSSZEqual(t, headState.CloneInnerState(), service.headState(ctx).CloneInnerState(), "Head did not change")
require.LogsContain(t, hook, "Chain reorg occurred")
require.LogsContain(t, hook, "distance=1")
require.LogsContain(t, hook, "depth=1")
}
func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
state, _ := util.DeterministicGenesisState(t, 100)
r := [32]byte{'a'}
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: r[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), state, r))
require.NoError(t, service.cacheJustifiedStateBalances(context.Background(), r))
require.DeepEqual(t, service.getJustifiedBalances(), state.Balances(), "Incorrect justified balances")
}
func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
b := util.NewBeaconBlock()
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
service.justifiedCheckpt = &ethpb.Checkpoint{Root: r[:]}
service.finalizedCheckpt = &ethpb.Checkpoint{}
service.bestJustifiedCheckpt = &ethpb.Checkpoint{}
require.NoError(t, service.updateHead(context.Background(), []uint64{}))
}
func Test_notifyNewHeadEvent(t *testing.T) {
@@ -158,11 +156,11 @@ func Test_notifyNewHeadEvent(t *testing.T) {
cfg: &config{
StateNotifier: notifier,
},
originBlockRoot: [32]byte{1},
genesisRoot: [32]byte{1},
}
newHeadStateRoot := [32]byte{2}
newHeadRoot := [32]byte{3}
err := srv.notifyNewHeadEvent(context.Background(), 1, bState, newHeadStateRoot[:], newHeadRoot[:])
err := srv.notifyNewHeadEvent(1, bState, newHeadStateRoot[:], newHeadRoot[:])
require.NoError(t, err)
events := notifier.ReceivedEvents()
require.Equal(t, 1, len(events))
@@ -174,8 +172,8 @@ func Test_notifyNewHeadEvent(t *testing.T) {
Block: newHeadRoot[:],
State: newHeadStateRoot[:],
EpochTransition: false,
PreviousDutyDependentRoot: srv.originBlockRoot[:],
CurrentDutyDependentRoot: srv.originBlockRoot[:],
PreviousDutyDependentRoot: srv.genesisRoot[:],
CurrentDutyDependentRoot: srv.genesisRoot[:],
}
require.DeepSSZEqual(t, wanted, eventHead)
})
@@ -187,7 +185,7 @@ func Test_notifyNewHeadEvent(t *testing.T) {
cfg: &config{
StateNotifier: notifier,
},
originBlockRoot: genesisRoot,
genesisRoot: genesisRoot,
}
epoch1Start, err := slots.EpochStart(1)
require.NoError(t, err)
@@ -197,7 +195,7 @@ func Test_notifyNewHeadEvent(t *testing.T) {
newHeadStateRoot := [32]byte{2}
newHeadRoot := [32]byte{3}
err = srv.notifyNewHeadEvent(context.Background(), epoch2Start, bState, newHeadStateRoot[:], newHeadRoot[:])
err = srv.notifyNewHeadEvent(epoch2Start, bState, newHeadStateRoot[:], newHeadRoot[:])
require.NoError(t, err)
events := notifier.ReceivedEvents()
require.Equal(t, 1, len(events))
@@ -216,400 +214,54 @@ func Test_notifyNewHeadEvent(t *testing.T) {
})
}
func TestRetrieveHead_ReadOnly(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
oldBlock := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, util.NewBeaconBlock())
oldRoot, err := oldBlock.Block().HashTreeRoot()
require.NoError(t, err)
service.head = &head{
root: oldRoot,
block: oldBlock,
}
newHeadSignedBlock := util.NewBeaconBlock()
newHeadSignedBlock.Block.Slot = 1
newHeadBlock := newHeadSignedBlock.Block
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, wsb.Block().Slot()-1, wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, wsb.Block().ParentRoot(), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(1))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
rOnlyState, err := service.HeadStateReadOnly(ctx)
require.NoError(t, err)
assert.Equal(t, rOnlyState, service.head.state, "Head is not the same object")
}
func TestSaveOrphanedAtts(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
// Chain setup
// 0 -- 1 -- 2 -- 3
// \-4
st, keys := util.DeterministicGenesisState(t, 64)
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
assert.NoError(t, err)
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
rG, err := blkG.Block.HashTreeRoot()
require.NoError(t, err)
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
blk1.Block.ParentRoot = rG[:]
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
assert.NoError(t, err)
blk2.Block.ParentRoot = r1[:]
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
blk3, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 3)
assert.NoError(t, err)
blk3.Block.ParentRoot = r2[:]
r3, err := blk3.Block.HashTreeRoot()
require.NoError(t, err)
blk4 := util.NewBeaconBlock()
blk4.Block.Slot = 4
blk4.Block.ParentRoot = rG[:]
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
util.SaveBlock(t, ctx, beaconDB, blk)
}
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
wantAtts := []*ethpb.Attestation{
blk3.Block.Body.Attestations[0],
blk2.Block.Body.Attestations[0],
blk1.Block.Body.Attestations[0],
}
atts := service.cfg.AttPool.AggregatedAttestations()
sort.Slice(atts, func(i, j int) bool {
return atts[i].Data.Slot > atts[j].Data.Slot
resetCfg := features.InitWithReset(&features.Flags{
CorrectlyInsertOrphanedAtts: true,
})
require.DeepEqual(t, wantAtts, atts)
}
defer resetCfg()
func TestSaveOrphanedOps(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.ShardCommitteePeriod = 0
params.OverrideBeaconConfig(config)
genesis, keys := util.DeterministicGenesisState(t, 64)
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
service.genesisTime = time.Now()
// Chain setup
// 0 -- 1 -- 2 -- 3
// \-4
st, keys := util.DeterministicGenesisState(t, 64)
service.head = &head{state: st}
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
assert.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
require.NoError(t, service.saveOrphanedAtts(ctx, r))
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
rG, err := blkG.Block.HashTreeRoot()
require.NoError(t, err)
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
blk1.Block.ParentRoot = rG[:]
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
assert.NoError(t, err)
blk2.Block.ParentRoot = r1[:]
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
blkConfig := util.DefaultBlockGenConfig()
blkConfig.NumBLSChanges = 5
blkConfig.NumProposerSlashings = 1
blkConfig.NumAttesterSlashings = 1
blkConfig.NumVoluntaryExits = 1
blk3, err := util.GenerateFullBlock(st, keys, blkConfig, 3)
assert.NoError(t, err)
blk3.Block.ParentRoot = r2[:]
r3, err := blk3.Block.HashTreeRoot()
require.NoError(t, err)
blk4 := util.NewBeaconBlock()
blk4.Block.Slot = 4
blk4.Block.ParentRoot = rG[:]
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
util.SaveBlock(t, ctx, beaconDB, blk)
}
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
wantAtts := []*ethpb.Attestation{
blk3.Block.Body.Attestations[0],
blk2.Block.Body.Attestations[0],
blk1.Block.Body.Attestations[0],
}
atts := service.cfg.AttPool.AggregatedAttestations()
sort.Slice(atts, func(i, j int) bool {
return atts[i].Data.Slot > atts[j].Data.Slot
})
require.DeepEqual(t, wantAtts, atts)
require.Equal(t, 1, len(service.cfg.SlashingPool.PendingProposerSlashings(ctx, st, false)))
require.Equal(t, 1, len(service.cfg.SlashingPool.PendingAttesterSlashings(ctx, st, false)))
exits, err := service.cfg.ExitPool.PendingExits()
require.NoError(t, err)
require.Equal(t, 1, len(exits))
require.Equal(t, len(b.Block.Body.Attestations), service.cfg.AttPool.AggregatedAttestationCount())
savedAtts := service.cfg.AttPool.AggregatedAttestations()
atts := b.Block.Body.Attestations
require.DeepSSZEqual(t, atts, savedAtts)
}
func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.cfg.BLSToExecPool = blstoexec.NewPool()
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
// Chain setup
// 0 -- 1 -- 2
// \-4
st, keys := util.DeterministicGenesisStateCapella(t, 64)
blkConfig := util.DefaultBlockGenConfig()
blkConfig.NumBLSChanges = 5
blkG, err := util.GenerateFullBlockCapella(st, keys, blkConfig, 1)
assert.NoError(t, err)
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
rG, err := blkG.Block.HashTreeRoot()
require.NoError(t, err)
blkConfig.NumBLSChanges = 10
blk1, err := util.GenerateFullBlockCapella(st, keys, blkConfig, 2)
assert.NoError(t, err)
blk1.Block.ParentRoot = rG[:]
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
blkConfig.NumBLSChanges = 15
blk2, err := util.GenerateFullBlockCapella(st, keys, blkConfig, 3)
assert.NoError(t, err)
blk2.Block.ParentRoot = r1[:]
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
blk4 := util.NewBeaconBlockCapella()
blkConfig.NumBLSChanges = 0
blk4.Block.Slot = 4
blk4.Block.ParentRoot = rG[:]
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
for _, blk := range []*ethpb.SignedBeaconBlockCapella{blkG, blk1, blk2, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
util.SaveBlock(t, ctx, beaconDB, blk)
}
require.NoError(t, service.saveOrphanedOperations(ctx, r2, r4))
require.Equal(t, 1, service.cfg.AttPool.AggregatedAttestationCount())
pending, err := service.cfg.BLSToExecPool.PendingBLSToExecChanges()
require.NoError(t, err)
require.Equal(t, 15, len(pending))
}
func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
// Chain setup
// 0 -- 1 -- 2 -- 3
// \-4
st, keys := util.DeterministicGenesisState(t, 64)
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
assert.NoError(t, err)
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
rG, err := blkG.Block.HashTreeRoot()
require.NoError(t, err)
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
blk1.Block.ParentRoot = rG[:]
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
assert.NoError(t, err)
blk2.Block.ParentRoot = r1[:]
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
blk3, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 3)
assert.NoError(t, err)
blk3.Block.ParentRoot = r2[:]
r3, err := blk3.Block.HashTreeRoot()
require.NoError(t, err)
blk4 := util.NewBeaconBlock()
blk4.Block.Slot = 4
blk4.Block.ParentRoot = rG[:]
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
util.SaveBlock(t, ctx, beaconDB, blk)
}
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
wantAtts := []*ethpb.Attestation{
blk3.Block.Body.Attestations[0],
blk2.Block.Body.Attestations[0],
blk1.Block.Body.Attestations[0],
}
atts := service.cfg.AttPool.AggregatedAttestations()
sort.Slice(atts, func(i, j int) bool {
return atts[i].Data.Slot > atts[j].Data.Slot
resetCfg := features.InitWithReset(&features.Flags{
CorrectlyInsertOrphanedAtts: true,
})
require.DeepEqual(t, wantAtts, atts)
}
defer resetCfg()
genesis, keys := util.DeterministicGenesisState(t, 64)
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
// Chain setup
// 0 -- 1 -- 2
// \-4
st, keys := util.DeterministicGenesisState(t, 64)
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
assert.NoError(t, err)
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
rG, err := blkG.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
require.NoError(t, service.saveOrphanedAtts(ctx, r))
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
blk1.Block.ParentRoot = rG[:]
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
assert.NoError(t, err)
blk2.Block.ParentRoot = r1[:]
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
blk4 := util.NewBeaconBlock()
blk4.Block.Slot = 4
blk4.Block.ParentRoot = rG[:]
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
util.SaveBlock(t, ctx, beaconDB, blk)
}
require.NoError(t, service.saveOrphanedOperations(ctx, r2, r4))
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
}
func TestUpdateHead_noSavedChanges(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
ojp := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, ojp, ojp)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
bellatrixBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockBellatrix())
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
require.NoError(t, err)
fcp := &ethpb.Checkpoint{
Root: bellatrixBlkRoot[:],
Epoch: 0,
}
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bellatrixBlkRoot))
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
require.NoError(t, beaconDB.SaveState(ctx, bellatrixState, bellatrixBlkRoot))
service.cfg.StateGen.SaveFinalizedState(0, bellatrixBlkRoot, bellatrixState)
headRoot := service.headRoot()
require.Equal(t, [32]byte{}, headRoot)
st, blkRoot, err = prepareForkchoiceState(ctx, 0, bellatrixBlkRoot, [32]byte{}, [32]byte{}, fcp, fcp)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
fcs.SetBalancesByRooter(func(context.Context, [32]byte) ([]uint64, error) { return []uint64{1, 2}, nil })
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{}))
newRoot, err := service.cfg.ForkChoiceStore.Head(ctx)
require.NoError(t, err)
require.NotEqual(t, headRoot, newRoot)
require.Equal(t, headRoot, service.headRoot())
savedAtts := service.cfg.AttPool.AggregatedAttestations()
atts := b.Block.Body.Attestations
require.DeepNotSSZEqual(t, atts, savedAtts)
}

View File

@@ -0,0 +1,99 @@
package blockchain
import (
"encoding/hex"
"fmt"
"net/http"
"github.com/emicklei/dot"
"github.com/prysmaticlabs/prysm/config/params"
)
const template = `<html>
<head>
<script src="//cdnjs.cloudflare.com/ajax/libs/viz.js/2.1.2/viz.js"></script>
<script src="//cdnjs.cloudflare.com/ajax/libs/viz.js/2.1.2/full.render.js"></script>
<body>
<script type="application/javascript">
var graph = ` + "`%s`;" + `
var viz = new Viz();
viz.renderSVGElement(graph) // reading the graph.
.then(function(element) {
document.body.appendChild(element); // appends to document.
})
.catch(error => {
// Create a new Viz instance (@see Caveats page for more info)
viz = new Viz();
// Possibly display the error
console.error(error);
});
</script>
</head>
</body>
</html>`
// TreeHandler is a handler to serve /tree page in metrics.
func (s *Service) TreeHandler(w http.ResponseWriter, r *http.Request) {
headState, err := s.HeadState(r.Context())
if err != nil {
log.WithError(err).Error("Could not get head state")
return
}
if headState == nil || headState.IsNil() {
if _, err := w.Write([]byte("Unavailable during initial syncing")); err != nil {
log.WithError(err).Error("Failed to render p2p info page")
}
}
nodes := s.cfg.ForkChoiceStore.Nodes()
graph := dot.NewGraph(dot.Directed)
graph.Attr("rankdir", "RL")
graph.Attr("labeljust", "l")
dotNodes := make([]*dot.Node, len(nodes))
avgBalance := uint64(averageBalance(headState.Balances()))
for i := len(nodes) - 1; i >= 0; i-- {
// Construct label for each node.
slot := fmt.Sprintf("%d", nodes[i].Slot())
weight := fmt.Sprintf("%d", nodes[i].Weight()/1e9) // Convert unit Gwei to unit ETH.
votes := fmt.Sprintf("%d", nodes[i].Weight()/1e9/avgBalance)
index := fmt.Sprintf("%d", i)
g := nodes[i].Graffiti()
graffiti := hex.EncodeToString(g[:8])
label := "slot: " + slot + "\n votes: " + votes + "\n weight: " + weight + "\n graffiti: " + graffiti
var dotN dot.Node
if nodes[i].Parent() != ^uint64(0) {
dotN = graph.Node(index).Box().Attr("label", label)
}
if nodes[i].Slot() == s.HeadSlot() &&
nodes[i].BestDescendant() == ^uint64(0) &&
nodes[i].Parent() != ^uint64(0) {
dotN = dotN.Attr("color", "green")
}
dotNodes[i] = &dotN
}
for i := len(nodes) - 1; i >= 0; i-- {
if nodes[i].Parent() != ^uint64(0) && nodes[i].Parent() < uint64(len(dotNodes)) {
graph.Edge(*dotNodes[i], *dotNodes[nodes[i].Parent()])
}
}
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "text/html")
if _, err := fmt.Fprintf(w, template, graph.String()); err != nil {
log.WithError(err).Error("Failed to render p2p info page")
}
}
func averageBalance(balances []uint64) float64 {
total := uint64(0)
for i := 0; i < len(balances); i++ {
total += balances[i]
}
return float64(total) / float64(len(balances)) / float64(params.BeaconConfig().GweiPerEth)
}

View File

@@ -0,0 +1,50 @@
package blockchain
import (
"context"
"net/http"
"net/http/httptest"
"testing"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
)
func TestService_TreeHandler(t *testing.T) {
req, err := http.NewRequest("GET", "/tree", nil)
require.NoError(t, err)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetBalances([]uint64{params.BeaconConfig().GweiPerEth}))
cfg := &config{
BeaconDB: beaconDB,
ForkChoiceStore: protoarray.New(
0, // justifiedEpoch
0, // finalizedEpoch
[32]byte{'a'},
),
StateGen: stategen.New(beaconDB),
}
s, err := NewService(ctx)
require.NoError(t, err)
s.cfg = cfg
require.NoError(t, s.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, [32]byte{'a'}, [32]byte{'g'}, [32]byte{'c'}, 0, 0))
require.NoError(t, s.cfg.ForkChoiceStore.ProcessBlock(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'c'}, 0, 0))
s.setHead([32]byte{'a'}, wrapper.WrappedPhase0SignedBeaconBlock(util.NewBeaconBlock()), headState)
rr := httptest.NewRecorder()
handler := http.HandlerFunc(s.TreeHandler)
handler.ServeHTTP(rr, req)
assert.Equal(t, http.StatusOK, rr.Code)
}

View File

@@ -1,27 +1,14 @@
package blockchain
import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
)
// This saves a beacon block to the initial sync blocks cache. It rate limits how many blocks
// the cache keeps in memory (2 epochs worth of blocks) and saves them to DB when it hits this limit.
func (s *Service) saveInitSyncBlock(ctx context.Context, r [32]byte, b interfaces.ReadOnlySignedBeaconBlock) error {
// This saves a beacon block to the initial sync blocks cache.
func (s *Service) saveInitSyncBlock(r [32]byte, b block.SignedBeaconBlock) {
s.initSyncBlocksLock.Lock()
defer s.initSyncBlocksLock.Unlock()
s.initSyncBlocks[r] = b
numBlocks := len(s.initSyncBlocks)
s.initSyncBlocksLock.Unlock()
if uint64(numBlocks) > initialSyncBlockCacheSize {
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err
}
s.clearInitSyncBlocks()
}
return nil
}
// This checks if a beacon block exists in the initial sync blocks cache using the root
@@ -33,42 +20,22 @@ func (s *Service) hasInitSyncBlock(r [32]byte) bool {
return ok
}
// Returns true if a block for root `r` exists in the initial sync blocks cache or the DB.
func (s *Service) hasBlockInInitSyncOrDB(ctx context.Context, r [32]byte) bool {
if s.hasInitSyncBlock(r) {
return true
}
return s.cfg.BeaconDB.HasBlock(ctx, r)
}
// Returns block for a given root `r` from either the initial sync blocks cache or the DB.
// Error is returned if the block is not found in either cache or DB.
func (s *Service) getBlock(ctx context.Context, r [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
// This retrieves a beacon block from the initial sync blocks cache using the root of
// the block.
func (s *Service) getInitSyncBlock(r [32]byte) block.SignedBeaconBlock {
s.initSyncBlocksLock.RLock()
// Check cache first because it's faster.
b, ok := s.initSyncBlocks[r]
s.initSyncBlocksLock.RUnlock()
var err error
if !ok {
b, err = s.cfg.BeaconDB.Block(ctx, r)
if err != nil {
return nil, errors.Wrap(err, "could not retrieve block from db")
}
}
if err := blocks.BeaconBlockIsNil(b); err != nil {
return nil, errBlockNotFoundInCacheOrDB
}
return b, nil
defer s.initSyncBlocksLock.RUnlock()
b := s.initSyncBlocks[r]
return b
}
// This retrieves all the beacon blocks from the initial sync blocks cache, the returned
// blocks are unordered.
func (s *Service) getInitSyncBlocks() []interfaces.ReadOnlySignedBeaconBlock {
func (s *Service) getInitSyncBlocks() []block.SignedBeaconBlock {
s.initSyncBlocksLock.RLock()
defer s.initSyncBlocksLock.RUnlock()
blks := make([]interfaces.ReadOnlySignedBeaconBlock, 0, len(s.initSyncBlocks))
blks := make([]block.SignedBeaconBlock, 0, len(s.initSyncBlocks))
for _, b := range s.initSyncBlocks {
blks = append(blks, b)
}
@@ -79,5 +46,5 @@ func (s *Service) getInitSyncBlocks() []interfaces.ReadOnlySignedBeaconBlock {
func (s *Service) clearInitSyncBlocks() {
s.initSyncBlocksLock.Lock()
defer s.initSyncBlocksLock.Unlock()
s.initSyncBlocks = make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock)
s.initSyncBlocks = make(map[[32]byte]block.SignedBeaconBlock)
}

View File

@@ -1,68 +0,0 @@
package blockchain
import (
"context"
"testing"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
)
func TestService_getBlock(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
s := setupBeaconChain(t, beaconDB)
b1 := util.NewBeaconBlock()
r1, err := b1.Block.HashTreeRoot()
require.NoError(t, err)
b2 := util.NewBeaconBlock()
b2.Block.Slot = 100
r2, err := b2.Block.HashTreeRoot()
require.NoError(t, err)
// block not found
_, err = s.getBlock(ctx, [32]byte{})
require.ErrorIs(t, err, errBlockNotFoundInCacheOrDB)
// block in cache
b, err := blocks.NewSignedBeaconBlock(b1)
require.NoError(t, err)
require.NoError(t, s.saveInitSyncBlock(ctx, r1, b))
got, err := s.getBlock(ctx, r1)
require.NoError(t, err)
require.DeepEqual(t, b, got)
// block in db
b = util.SaveBlock(t, ctx, s.cfg.BeaconDB, b2)
got, err = s.getBlock(ctx, r2)
require.NoError(t, err)
require.DeepEqual(t, b, got)
}
func TestService_hasBlockInInitSyncOrDB(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
s := setupBeaconChain(t, beaconDB)
b1 := util.NewBeaconBlock()
r1, err := b1.Block.HashTreeRoot()
require.NoError(t, err)
b2 := util.NewBeaconBlock()
b2.Block.Slot = 100
r2, err := b2.Block.HashTreeRoot()
require.NoError(t, err)
// block not found
require.Equal(t, false, s.hasBlockInInitSyncOrDB(ctx, [32]byte{}))
// block in cache
b, err := blocks.NewSignedBeaconBlock(b1)
require.NoError(t, err)
require.NoError(t, s.saveInitSyncBlock(ctx, r1, b))
require.Equal(t, true, s.hasBlockInInitSyncOrDB(ctx, r1))
// block in db
util.SaveBlock(t, ctx, s.cfg.BeaconDB, b2)
require.Equal(t, true, s.hasBlockInInitSyncOrDB(ctx, r2))
}

View File

@@ -1,12 +1,12 @@
package blockchain
import (
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/config/params"
)
func init() {
// Override network name so that hardcoded genesis files are not loaded.
if err := params.SetActive(params.MainnetTestConfig()); err != nil {
panic(err)
}
cfg := params.BeaconConfig()
cfg.ConfigName = "test"
params.OverrideBeaconConfig(cfg)
}

View File

@@ -5,23 +5,19 @@ import (
"fmt"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v4/config/params"
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/runtime/version"
prysmTime "github.com/prysmaticlabs/prysm/time"
"github.com/prysmaticlabs/prysm/time/slots"
"github.com/sirupsen/logrus"
)
var log = logrus.WithField("prefix", "blockchain")
// logs state transition related data every slot.
func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
func logStateTransitionData(b block.BeaconBlock) {
log := log.WithField("slot", b.Slot())
if len(b.Body().Attestations()) > 0 {
log = log.WithField("attestations", len(b.Body().Attestations()))
@@ -38,103 +34,34 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
if len(b.Body().VoluntaryExits()) > 0 {
log = log.WithField("voluntaryExits", len(b.Body().VoluntaryExits()))
}
if b.Version() >= version.Altair {
if b.Version() == version.Altair {
agg, err := b.Body().SyncAggregate()
if err != nil {
return err
}
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
}
if b.Version() >= version.Bellatrix {
p, err := b.Body().Execution()
if err != nil {
return err
}
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
txs, err := p.Transactions()
switch {
case errors.Is(err, consensus_types.ErrUnsupportedGetter):
case err != nil:
return err
default:
log = log.WithField("txCount", len(txs))
txsPerSlotCount.Set(float64(len(txs)))
if err == nil {
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
}
}
log.Info("Finished applying state transition")
return nil
}
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesisTime uint64) error {
func logBlockSyncStatus(block block.BeaconBlock, blockRoot [32]byte, finalized *ethpb.Checkpoint, receivedTime time.Time, genesisTime uint64) error {
startTime, err := slots.ToTime(genesisTime, block.Slot())
if err != nil {
return err
}
level := log.Logger.GetLevel()
if level >= logrus.DebugLevel {
parentRoot := block.ParentRoot()
log.WithFields(logrus.Fields{
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime),
"deposits": len(block.Body().Deposits()),
}).Debug("Synced new block")
} else {
log.WithFields(logrus.Fields{
"slot": block.Slot(),
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"epoch": slots.ToEpoch(block.Slot()),
}).Info("Synced new block")
}
return nil
}
// logs payload related data every slot.
func logPayload(block interfaces.ReadOnlyBeaconBlock) error {
isExecutionBlk, err := blocks.IsExecutionBlock(block.Body())
if err != nil {
return errors.Wrap(err, "could not determine if block is execution block")
}
if !isExecutionBlk {
return nil
}
payload, err := block.Body().Execution()
if err != nil {
return err
}
if payload.GasLimit() == 0 {
return errors.New("gas limit should not be 0")
}
gasUtilized := float64(payload.GasUsed()) / float64(payload.GasLimit())
fields := logrus.Fields{
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.ParentHash())),
"blockNumber": payload.BlockNumber,
"gasUtilized": fmt.Sprintf("%.2f", gasUtilized),
}
if block.Version() >= version.Capella {
withdrawals, err := payload.Withdrawals()
if err != nil {
return errors.Wrap(err, "could not get withdrawals")
}
fields["withdrawals"] = len(withdrawals)
changes, err := block.Body().BLSToExecutionChanges()
if err != nil {
return errors.Wrap(err, "could not get BLSToExecutionChanges")
}
fields["blsToExecutionChanges"] = len(changes)
}
log.WithFields(fields).Debug("Synced new payload")
log.WithFields(logrus.Fields{
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(block.ParentRoot())[:8]),
"version": version.String(block.Version()),
}).Info("Synced new block")
log.WithFields(logrus.Fields{
"slot": block.Slot,
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime),
}).Debug("Sync new block times")
return nil
}

View File

@@ -3,107 +3,63 @@ package blockchain
import (
"testing"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func Test_logStateTransitionData(t *testing.T) {
payloadBlk := &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
SyncAggregate: &ethpb.SyncAggregate{},
ExecutionPayload: &enginev1.ExecutionPayload{
BlockHash: []byte{1, 2, 3},
Transactions: [][]byte{{}, {}},
},
},
}
wrappedPayloadBlk, err := blocks.NewBeaconBlock(payloadBlk)
require.NoError(t, err)
tests := []struct {
name string
b func() interfaces.ReadOnlyBeaconBlock
b block.BeaconBlock
want string
}{
{name: "empty block body",
b: func() interfaces.ReadOnlyBeaconBlock {
wb, err := blocks.NewBeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}})
require.NoError(t, err)
return wb
},
b: wrapper.WrappedPhase0BeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}),
want: "\"Finished applying state transition\" prefix=blockchain slot=0",
},
{name: "has attestation",
b: func() interfaces.ReadOnlyBeaconBlock {
wb, err := blocks.NewBeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{Attestations: []*ethpb.Attestation{{}}}})
require.NoError(t, err)
return wb
},
b: wrapper.WrappedPhase0BeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{Attestations: []*ethpb.Attestation{{}}}}),
want: "\"Finished applying state transition\" attestations=1 prefix=blockchain slot=0",
},
{name: "has deposit",
b: func() interfaces.ReadOnlyBeaconBlock {
wb, err := blocks.NewBeaconBlock(
&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
Attestations: []*ethpb.Attestation{{}},
Deposits: []*ethpb.Deposit{{}}}})
require.NoError(t, err)
return wb
},
b: wrapper.WrappedPhase0BeaconBlock(
&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
Attestations: []*ethpb.Attestation{{}},
Deposits: []*ethpb.Deposit{{}}}}),
want: "\"Finished applying state transition\" attestations=1 deposits=1 prefix=blockchain slot=0",
},
{name: "has attester slashing",
b: func() interfaces.ReadOnlyBeaconBlock {
wb, err := blocks.NewBeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
AttesterSlashings: []*ethpb.AttesterSlashing{{}}}})
require.NoError(t, err)
return wb
},
b: wrapper.WrappedPhase0BeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
AttesterSlashings: []*ethpb.AttesterSlashing{{}}}}),
want: "\"Finished applying state transition\" attesterSlashings=1 prefix=blockchain slot=0",
},
{name: "has proposer slashing",
b: func() interfaces.ReadOnlyBeaconBlock {
wb, err := blocks.NewBeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
ProposerSlashings: []*ethpb.ProposerSlashing{{}}}})
require.NoError(t, err)
return wb
},
b: wrapper.WrappedPhase0BeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
ProposerSlashings: []*ethpb.ProposerSlashing{{}}}}),
want: "\"Finished applying state transition\" prefix=blockchain proposerSlashings=1 slot=0",
},
{name: "has exit",
b: func() interfaces.ReadOnlyBeaconBlock {
wb, err := blocks.NewBeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
VoluntaryExits: []*ethpb.SignedVoluntaryExit{{}}}})
require.NoError(t, err)
return wb
},
b: wrapper.WrappedPhase0BeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
VoluntaryExits: []*ethpb.SignedVoluntaryExit{{}}}}),
want: "\"Finished applying state transition\" prefix=blockchain slot=0 voluntaryExits=1",
},
{name: "has everything",
b: func() interfaces.ReadOnlyBeaconBlock {
wb, err := blocks.NewBeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
Attestations: []*ethpb.Attestation{{}},
Deposits: []*ethpb.Deposit{{}},
AttesterSlashings: []*ethpb.AttesterSlashing{{}},
ProposerSlashings: []*ethpb.ProposerSlashing{{}},
VoluntaryExits: []*ethpb.SignedVoluntaryExit{{}}}})
require.NoError(t, err)
return wb
},
b: wrapper.WrappedPhase0BeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
Attestations: []*ethpb.Attestation{{}},
Deposits: []*ethpb.Deposit{{}},
AttesterSlashings: []*ethpb.AttesterSlashing{{}},
ProposerSlashings: []*ethpb.ProposerSlashing{{}},
VoluntaryExits: []*ethpb.SignedVoluntaryExit{{}}}}),
want: "\"Finished applying state transition\" attestations=1 attesterSlashings=1 deposits=1 prefix=blockchain proposerSlashings=1 slot=0 voluntaryExits=1",
},
{name: "has payload",
b: func() interfaces.ReadOnlyBeaconBlock { return wrappedPayloadBlk },
want: "\"Finished applying state transition\" payloadHash=0x010203 prefix=blockchain slot=0 syncBitsCount=0 txCount=2",
},
}
for _, tt := range tests {
hook := logTest.NewGlobal()
t.Run(tt.name, func(t *testing.T) {
require.NoError(t, logStateTransitionData(tt.b()))
logStateTransitionData(tt.b)
require.LogsContain(t, hook, tt.want)
})
}

Some files were not shown because too many files have changed in this diff Show More