Compare commits

..

2 Commits

Author SHA1 Message Date
nisdas
a8572352e1 fix build 2023-05-17 13:34:20 +08:00
nisdas
1848d76796 fix 2023-05-17 13:03:08 +08:00
779 changed files with 13143 additions and 60808 deletions

View File

@@ -3,7 +3,6 @@ import %workspace%/build/bazelrc/convenience.bazelrc
import %workspace%/build/bazelrc/correctness.bazelrc
import %workspace%/build/bazelrc/cross.bazelrc
import %workspace%/build/bazelrc/debug.bazelrc
import %workspace%/build/bazelrc/hermetic-cc.bazelrc
import %workspace%/build/bazelrc/performance.bazelrc
# E2E run with debug gotag
@@ -15,7 +14,7 @@ coverage --define=coverage_enabled=1
# Stamp binaries with git information
build --workspace_status_command=./hack/workspace_status.sh
build --define blst_disabled=false --define blst_modern=true
build --define blst_disabled=false
run --define blst_disabled=false
build:blst_disabled --define blst_disabled=true
@@ -28,7 +27,30 @@ build:minimal --@io_bazel_rules_go//go/config:tags=minimal
build:release --compilation_mode=opt
build:release --stamp
# LLVM compiler for building C/C++ dependencies.
build:llvm --define compiler=llvm
build:llvm --copt -fno-sanitize=vptr,function
build:llvm --linkopt -fno-sanitize=vptr,function
# --incompatible_enable_cc_toolchain_resolution not needed after this issue is closed https://github.com/bazelbuild/bazel/issues/7260
build:llvm --incompatible_enable_cc_toolchain_resolution
build:asan --copt -fsanitize=address,undefined
build:asan --copt -fno-omit-frame-pointer
build:asan --linkopt -fsanitize=address,undefined
build:asan --copt -fno-sanitize=vptr,function
build:asan --linkopt -fno-sanitize=vptr,function
build:asan --copt -DADDRESS_SANITIZER=1
build:asan --copt -D__SANITIZE_ADDRESS__
build:asan --linkopt -ldl
build:llvm-asan --config=llvm
build:llvm-asan --config=asan
build:llvm-asan --linkopt -fuse-ld=ld.lld
build:fuzz --@io_bazel_rules_go//go/config:tags=fuzz
# Build binary with cgo symbolizer for debugging / profiling.
build:cgo_symbolizer --config=llvm
build:cgo_symbolizer --copt=-g
build:cgo_symbolizer --define=USE_CGO_SYMBOLIZER=true
build:cgo_symbolizer -c dbg
@@ -37,13 +59,9 @@ build:cgo_symbolizer --define=gotags=cgosymbolizer_enabled
# toolchain build debug configs
#------------------------------
build:debug --sandbox_debug
build:debug --toolchain_resolution_debug=".*"
build:debug --toolchain_resolution_debug
build:debug --verbose_failures
build:debug -s
# Set bazel gotag
build --define gotags=bazel
# Abseil requires c++14 or greater.
build --cxxopt=-std=c++20
build --host_cxxopt=-std=c++20

View File

@@ -1 +1 @@
6.2.1
6.1.0

View File

@@ -3,6 +3,7 @@ load("@com_github_atlassian_bazel_tools//gometalinter:def.bzl", "gometalinter")
load("@com_github_atlassian_bazel_tools//goimports:def.bzl", "goimports")
load("@io_kubernetes_build//defs:run_in_workspace.bzl", "workspace_binary")
load("@io_bazel_rules_go//go:def.bzl", "nogo")
load("@vaticle_bazel_distribution//common:rules.bzl", "assemble_targz", "assemble_versioned")
load("@bazel_skylib//rules:common_settings.bzl", "string_setting")
prefix = "github.com/prysmaticlabs/prysm"

View File

@@ -1,6 +1,6 @@
# Contribution Guidelines
Note: The latest and most up-to-date documentation can be found on our [docs portal](https://docs.prylabs.network/docs/contribute/contribution-guidelines).
Note: The latest and most up to date documenation can be found on our [docs portal](https://docs.prylabs.network/docs/contribute/contribution-guidelines).
Excited by our work and want to get involved in building out our sharding releases? Or maybe you haven't learned as much about the Ethereum protocol but are a savvy developer?
@@ -10,9 +10,9 @@ You can explore our [Open Issues](https://github.com/prysmaticlabs/prysm/issues)
**1. Set up Prysm following the instructions in README.md.**
**2. Fork the Prysm repo.**
**2. Fork the prysm repo.**
Sign in to your GitHub account or create a new account if you do not have one already. Then navigate your browser to https://github.com/prysmaticlabs/prysm/. In the upper right hand corner of the page, click “fork”. This will create a copy of the Prysm repo in your account.
Sign in to your Github account or create a new account if you do not have one already. Then navigate your browser to https://github.com/prysmaticlabs/prysm/. In the upper right hand corner of the page, click “fork”. This will create a copy of the Prysm repo in your account.
**3. Create a local clone of Prysm.**
@@ -23,7 +23,7 @@ $ git clone https://github.com/prysmaticlabs/prysm.git
$ cd $GOPATH/src/github.com/prysmaticlabs/prysm
```
**4. Link your local clone to the fork on your GitHub repo.**
**4. Link your local clone to the fork on your Github repo.**
```
$ git remote add myprysmrepo https://github.com/<your_github_user_name>/prysm.git
@@ -68,7 +68,7 @@ $ go test <file_you_are_working_on>
$ git add --all
```
This command stages all the files that you have changed. You can add individual files by specifying the file name or names and eliminating the “-- all”.
This command stages all of the files that you have changed. You can add individual files by specifying the file name or names and eliminating the “-- all”.
**11. Commit the file or files.**
@@ -96,7 +96,8 @@ If there are conflicts between your edits and those made by others since you sta
$ git status
```
Open those files one at a time, and you will see lines inserted by Git that identify the conflicts:
Open those files one at a time and you
will see lines inserted by Git that identify the conflicts:
```
<<<<<< HEAD
@@ -118,7 +119,7 @@ $ git push myrepo feature-in-progress-branch
**15. Check to be sure your fork of the Prysm repo contains your feature branch with the latest edits.**
Navigate to your fork of the repo on GitHub. On the upper left where the current branch is listed, change the branch to your feature-in-progress-branch. Open the files that you have worked on and check to make sure they include your changes.
Navigate to your fork of the repo on Github. On the upper left where the current branch is listed, change the branch to your feature-in-progress-branch. Open the files that you have worked on and check to make sure they include your changes.
**16. Create a pull request.**
@@ -150,7 +151,7 @@ pick hash fix a bug
pick hash add a feature
```
Replace the word pick with the word “squash” for every line but the first, so you end with ….
Replace the word pick with the word “squash” for every line but the first so you end with ….
```
pick hash do some work
@@ -177,7 +178,7 @@ We consider two types of contributions to our repo and categorize them as follow
Anyone can become a part-time contributor and help out on implementing Ethereum consensus. The responsibilities of a part-time contributor include:
- Engaging in Gitter conversations, asking the questions on how to begin contributing to the project
- Opening up GitHub issues to express interest in code to implement
- Opening up github issues to express interest in code to implement
- Opening up PRs referencing any open issue in the repo. PRs should include:
- Detailed context of what would be required for merge
- Tests that are consistent with how other tests are written in our implementation
@@ -187,12 +188,12 @@ Anyone can become a part-time contributor and help out on implementing Ethereum
### Core Contributors
Core contributors are remote contractors of Prysmatic Labs, LLC. and are considered critical team members of our organization. Core devs have all the responsibilities of part-time contributors plus the majority of the following:
Core contributors are remote contractors of Prysmatic Labs, LLC. and are considered critical team members of our organization. Core devs have all of the responsibilities of part-time contributors plus the majority of the following:
- Stay up to date on the latest beacon chain specification
- Monitor GitHub issues and PRs to make sure owner, labels, descriptions are correct
- Monitor github issues and PRs to make sure owner, labels, descriptions are correct
- Formulate independent ideas, suggest new work to do, point out improvements to existing approaches
- Participate in code review, ensure code quality is excellent, and ensure high code coverage
- Participate in code review, ensure code quality is excellent, and have ensure high code coverage
- Help with social media presence, write bi-weekly development update
- Represent Prysmatic Labs at events to help spread the word on scalability research and solutions

View File

@@ -17,34 +17,26 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
rules_pkg_dependencies()
http_archive(
name = "hermetic_cc_toolchain",
sha256 = "973ab22945b921ef45b8e1d6ce01ca7ce1b8a462167449a36e297438c4ec2755",
strip_prefix = "hermetic_cc_toolchain-5098046bccc15d2962f3cc8e7e53d6a2a26072dc",
urls = [
"https://github.com/uber/hermetic_cc_toolchain/archive/5098046bccc15d2962f3cc8e7e53d6a2a26072dc.tar.gz", # 2023-06-28
],
name = "com_grail_bazel_toolchain",
sha256 = "b210fc8e58782ef171f428bfc850ed7179bdd805543ebd1aa144b9c93489134f",
strip_prefix = "bazel-toolchain-83e69ba9e4b4fdad0d1d057fcb87addf77c281c9",
urls = ["https://github.com/grailbio/bazel-toolchain/archive/83e69ba9e4b4fdad0d1d057fcb87addf77c281c9.tar.gz"],
)
load("@hermetic_cc_toolchain//toolchain:defs.bzl", zig_toolchains = "toolchains")
load("@com_grail_bazel_toolchain//toolchain:deps.bzl", "bazel_toolchain_dependencies")
zig_toolchains()
bazel_toolchain_dependencies()
# Register zig sdk toolchains with support for Ubuntu 20.04 (Focal Fossa) which has an EOL date of April, 2025.
# For ubuntu glibc support, see https://launchpad.net/ubuntu/+source/glibc
register_toolchains(
"@zig_sdk//toolchain:linux_amd64_gnu.2.31",
"@zig_sdk//toolchain:linux_arm64_gnu.2.31",
# Hermetic cc toolchain is not yet supported on darwin. Sysroot needs to be provided.
# See https://github.com/uber/hermetic_cc_toolchain#osx-sysroot
# "@zig_sdk//toolchain:darwin_amd64",
# "@zig_sdk//toolchain:darwin_arm64",
# Windows builds are not supported yet.
# "@zig_sdk//toolchain:windows_amd64",
load("@com_grail_bazel_toolchain//toolchain:rules.bzl", "llvm_toolchain")
llvm_toolchain(
name = "llvm_toolchain",
llvm_version = "13.0.1",
)
load("@prysm//tools/cross-toolchain:darwin_cc_hack.bzl", "configure_nonhermetic_darwin")
load("@llvm_toolchain//:toolchains.bzl", "llvm_register_toolchains")
configure_nonhermetic_darwin()
llvm_register_toolchains()
load("@prysm//tools/cross-toolchain:prysm_toolchains.bzl", "configure_prysm_toolchains")
@@ -172,7 +164,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
go_rules_dependencies()
go_register_toolchains(
go_version = "1.20.6",
go_version = "1.20.3",
nogo = "@//:nogo",
)
@@ -213,7 +205,7 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.4.0-beta.0"
consensus_spec_version = "v1.3.0"
bls_test_version = "v0.1.1"
@@ -229,7 +221,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "b225137494dfe4ce833abf78690b908828c63399e8f34bef5f2e258fadbf363a",
sha256 = "1c806e04ac5e3779032c06a6009350b3836b6809bb23812993d6ececd7047cf5",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -245,7 +237,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "4ad95a45a635f9c2071d9a75d6e37ac15c8a6bb35e0a31ac1f1c9f3ecd34e706",
sha256 = "2b42796dc5ccd9f1246032d0c17663e20f70334ff7e00325f0fc3af28cb24186",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -261,7 +253,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "5eeb9b7b0c882fe11ea14444cec44e9e77655f384d80b6743b1f0f82ec9a6599",
sha256 = "231e3371e81ce9acde65d2910ec4580587e74dbbcfcbd9c675e473e022deec8a",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
@@ -276,7 +268,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "939c1a79a0990831d91792de622ce2a410cfc77bce8c845b5ad18e05fed64bff",
sha256 = "219b74d95664ea7e8dfbf31162dfa206b9c0cf45919ea86db5fa0f8902977e3c",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)
@@ -319,13 +311,11 @@ http_archive(
url = "https://github.com/bazelbuild/buildtools/archive/f2aed9ee205d62d45c55cfabbfd26342f8526862.zip",
)
http_archive(
git_repository(
name = "com_google_protobuf",
sha256 = "4e176116949be52b0408dfd24f8925d1eb674a781ae242a75296b17a1c721395",
strip_prefix = "protobuf-23.3",
urls = [
"https://github.com/protocolbuffers/protobuf/archive/v23.3.tar.gz",
],
commit = "436bd7880e458532901c58f4d9d1ea23fa7edd52",
remote = "https://github.com/protocolbuffers/protobuf",
shallow_since = "1617835118 -0700",
)
# Group the sources of the library so that CMake rule have access to it

View File

@@ -1,8 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["headers.go"],
importpath = "github.com/prysmaticlabs/prysm/v4/api",
visibility = ["//visibility:public"],
)

View File

@@ -1,20 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"client.go",
"errors.go",
"options.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/api/client",
visibility = ["//visibility:public"],
deps = ["@com_github_pkg_errors//:go_default_library"],
)
go_test(
name = "go_default_test",
srcs = ["client_test.go"],
embed = [":go_default_library"],
deps = ["//testing/require:go_default_library"],
)

View File

@@ -6,11 +6,11 @@ go_library(
"checkpoint.go",
"client.go",
"doc.go",
"errors.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/beacon",
visibility = ["//visibility:public"],
deps = [
"//api/client:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/rpc/apimiddleware:go_default_library",
"//beacon-chain/state:go_default_library",
@@ -39,7 +39,6 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//api/client:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",

View File

@@ -6,12 +6,10 @@ import (
"path"
"github.com/pkg/errors"
base "github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/encoding/ssz/detect"
"github.com/prysmaticlabs/prysm/v4/io/file"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
@@ -20,8 +18,6 @@ import (
"golang.org/x/mod/semver"
)
var errCheckpointBlockMismatch = errors.New("mismatch between checkpoint sync state and block")
// OriginData represents the BeaconState and ReadOnlySignedBeaconBlock necessary to start an empty Beacon Node
// using Checkpoint Sync.
type OriginData struct {
@@ -78,40 +74,37 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
if err != nil {
return nil, errors.Wrap(err, "error unmarshaling finalized state to correct version")
}
if s.Slot() != s.LatestBlockHeader().Slot {
return nil, fmt.Errorf("finalized state slot does not match latest block header slot %d != %d", s.Slot(), s.LatestBlockHeader().Slot)
}
slot := s.LatestBlockHeader().Slot
bb, err := client.GetBlock(ctx, IdFromSlot(slot))
sr, err := s.HashTreeRoot(ctx)
if err != nil {
return nil, errors.Wrapf(err, "error requesting block by slot = %d", slot)
return nil, errors.Wrapf(err, "failed to compute htr for finalized state at slot=%d", s.Slot())
}
header := s.LatestBlockHeader()
header.StateRoot = sr[:]
br, err := header.HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error while computing block root using state data")
}
bb, err := client.GetBlock(ctx, IdFromRoot(br))
if err != nil {
return nil, errors.Wrapf(err, "error requesting block by root = %#x", br)
}
b, err := vu.UnmarshalBeaconBlock(bb)
if err != nil {
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
}
br, err := b.Block().HashTreeRoot()
realBlockRoot, err := b.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block")
}
bodyRoot, err := b.Block().Body().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block body")
}
sbr := bytesutil.ToBytes32(s.LatestBlockHeader().BodyRoot)
if sbr != bodyRoot {
return nil, errors.Wrapf(errCheckpointBlockMismatch, "state body root = %#x, block body root = %#x", sbr, bodyRoot)
}
sr, err := s.HashTreeRoot(ctx)
if err != nil {
return nil, errors.Wrapf(err, "failed to compute htr for finalized state at slot=%d", s.Slot())
}
log.
WithField("block_slot", b.Block().Slot()).
WithField("state_slot", s.Slot()).
WithField("state_root", sr).
WithField("block_root", br).
Info("Downloaded checkpoint sync state and block.")
log.Printf("BeaconState slot=%d, Block slot=%d", s.Slot(), b.Block().Slot())
log.Printf("BeaconState htr=%#x, Block state_root=%#x", sr, b.Block().StateRoot())
log.Printf("BeaconState latest_block_header htr=%#x, block htr=%#x", br, realBlockRoot)
return &OriginData{
st: s,
b: b,
@@ -147,7 +140,7 @@ func ComputeWeakSubjectivityCheckpoint(ctx context.Context, client *Client) (*We
ws, err := client.GetWeakSubjectivity(ctx)
if err != nil {
// a 404/405 is expected if querying an endpoint that doesn't support the weak subjectivity checkpoint api
if !errors.Is(err, base.ErrNotOK) {
if !errors.Is(err, ErrNotOK) {
return nil, errors.Wrap(err, "unexpected API response for prysm-only weak subjectivity checkpoint API")
}
// fall back to vanilla Beacon Node API method

View File

@@ -7,9 +7,9 @@ import (
"fmt"
"io"
"net/http"
"net/url"
"testing"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
blocktest "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks/testing"
@@ -66,7 +66,11 @@ func TestMarshalToEnvelope(t *testing.T) {
}
func TestFallbackVersionCheck(t *testing.T) {
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
c := &Client{
hc: &http.Client{},
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
c.hc.Transport = &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getNodeVersionPath:
@@ -84,13 +88,12 @@ func TestFallbackVersionCheck(t *testing.T) {
case getWeakSubjectivityPath:
res.StatusCode = http.StatusNotFound
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
ctx := context.Background()
_, err = ComputeWeakSubjectivityCheckpoint(ctx, c)
_, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
require.ErrorIs(t, err, errUnsupportedPrysmCheckpointVersion)
}
@@ -128,7 +131,6 @@ func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) {
wst, err := util.NewBeaconState()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, epoch)
require.NoError(t, err)
require.NoError(t, wst.SetFork(fork))
// set up checkpoint block
@@ -168,41 +170,44 @@ func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) {
Epoch: epoch,
}
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getWeakSubjectivityPath:
res.StatusCode = http.StatusOK
cp := struct {
Epoch string `json:"epoch"`
Root string `json:"root"`
}{
Epoch: fmt.Sprintf("%d", slots.ToEpoch(b.Block().Slot())),
Root: fmt.Sprintf("%#x", bRoot),
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getWeakSubjectivityPath:
res.StatusCode = http.StatusOK
cp := struct {
Epoch string `json:"epoch"`
Root string `json:"root"`
}{
Epoch: fmt.Sprintf("%d", slots.ToEpoch(b.Block().Slot())),
Root: fmt.Sprintf("%#x", bRoot),
}
wsr := struct {
Checkpoint interface{} `json:"ws_checkpoint"`
StateRoot string `json:"state_root"`
}{
Checkpoint: cp,
StateRoot: fmt.Sprintf("%#x", wRoot),
}
rb, err := marshalToEnvelope(wsr)
require.NoError(t, err)
res.Body = io.NopCloser(bytes.NewBuffer(rb))
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
}
wsr := struct {
Checkpoint interface{} `json:"ws_checkpoint"`
StateRoot string `json:"state_root"`
}{
Checkpoint: cp,
StateRoot: fmt.Sprintf("%#x", wRoot),
}
rb, err := marshalToEnvelope(wsr)
require.NoError(t, err)
res.Body = io.NopCloser(bytes.NewBuffer(rb))
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
return res, nil
}},
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
wsd, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
require.NoError(t, err)
@@ -227,7 +232,6 @@ func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
wst, err := util.NewBeaconState()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, cfg.GenesisEpoch)
require.NoError(t, err)
require.NoError(t, wst.SetFork(fork))
// set up checkpoint block
@@ -262,39 +266,42 @@ func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
wsSerialized, err := wst.MarshalSSZ()
require.NoError(t, err)
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getNodeVersionPath:
res.StatusCode = http.StatusOK
b := bytes.NewBuffer(nil)
d := struct {
Version string `json:"version"`
}{
Version: "Lighthouse/v0.1.5 (Linux x86_64)",
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getNodeVersionPath:
res.StatusCode = http.StatusOK
b := bytes.NewBuffer(nil)
d := struct {
Version string `json:"version"`
}{
Version: "Lighthouse/v0.1.5 (Linux x86_64)",
}
encoded, err := marshalToEnvelope(d)
require.NoError(t, err)
b.Write(encoded)
res.Body = io.NopCloser(b)
case getWeakSubjectivityPath:
res.StatusCode = http.StatusNotFound
case renderGetStatePath(IdHead):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
}
encoded, err := marshalToEnvelope(d)
require.NoError(t, err)
b.Write(encoded)
res.Body = io.NopCloser(b)
case getWeakSubjectivityPath:
res.StatusCode = http.StatusNotFound
case renderGetStatePath(IdHead):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
return res, nil
}},
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
wsPub, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
require.NoError(t, err)
@@ -308,16 +315,21 @@ func TestGetWeakSubjectivityEpochFromHead(t *testing.T) {
st, expectedEpoch := defaultTestHeadState(t, params.MainnetConfig())
serialized, err := st.MarshalSSZ()
require.NoError(t, err)
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
if req.URL.Path == renderGetStatePath(IdHead) {
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case renderGetStatePath(IdHead):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
}
return res, nil
}},
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
actualEpoch, err := getWeakSubjectivityEpochFromHead(context.Background(), c)
require.NoError(t, err)
require.Equal(t, expectedEpoch, actualEpoch)
@@ -401,7 +413,6 @@ func TestDownloadFinalizedData(t *testing.T) {
st, err := util.NewBeaconState()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, epoch)
require.NoError(t, err)
require.NoError(t, st.SetFork(fork))
require.NoError(t, st.SetSlot(slot))
@@ -437,24 +448,29 @@ func TestDownloadFinalizedData(t *testing.T) {
ms, err := st.MarshalSSZ()
require.NoError(t, err)
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case renderGetStatePath(IdFinalized):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(ms))
case renderGetBlockPath(IdFromSlot(b.Block().Slot())):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(mb))
default:
res.StatusCode = http.StatusInternalServerError
res.Body = io.NopCloser(bytes.NewBufferString(""))
}
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case renderGetStatePath(IdFinalized):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(ms))
case renderGetBlockPath(IdFromRoot(br)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(mb))
default:
res.StatusCode = http.StatusInternalServerError
res.Body = io.NopCloser(bytes.NewBufferString(""))
}
return res, nil
}},
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
// sanity check before we go through checkpoint
// make sure we can download the state and unmarshal it with the VersionedUnmarshaler
sb, err := c.GetState(ctx, IdFinalized)

View File

@@ -5,6 +5,8 @@ import (
"context"
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"net/url"
"path"
@@ -12,8 +14,8 @@ import (
"sort"
"strconv"
"text/template"
"time"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/network/forks"
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
@@ -52,6 +54,8 @@ const (
IdFinalized StateOrBlockId = "finalized"
)
var ErrMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
// IdFromRoot encodes a block root in the format expected by the API in places where a root can be used to identify
// a BeaconState or SignedBeaconBlock.
func IdFromRoot(r [32]byte) StateOrBlockId {
@@ -81,22 +85,96 @@ func idTemplate(ts string) func(StateOrBlockId) string {
return f
}
func renderGetBlockPath(id StateOrBlockId) string {
return path.Join(getSignedBlockPath, string(id))
// ClientOpt is a functional option for the Client type (http.Client wrapper)
type ClientOpt func(*Client)
// WithTimeout sets the .Timeout attribute of the wrapped http.Client.
func WithTimeout(timeout time.Duration) ClientOpt {
return func(c *Client) {
c.hc.Timeout = timeout
}
}
// Client provides a collection of helper methods for calling the Eth Beacon Node API endpoints.
type Client struct {
*client.Client
hc *http.Client
baseURL *url.URL
}
// NewClient returns a new Client that includes functions for rest calls to Beacon API.
func NewClient(host string, opts ...client.ClientOpt) (*Client, error) {
c, err := client.NewClient(host, opts...)
// NewClient constructs a new client with the provided options (ex WithTimeout).
// `host` is the base host + port used to construct request urls. This value can be
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
u, err := urlForHost(host)
if err != nil {
return nil, err
}
return &Client{c}, nil
c := &Client{
hc: &http.Client{},
baseURL: u,
}
for _, o := range opts {
o(c)
}
return c, nil
}
func urlForHost(h string) (*url.URL, error) {
// try to parse as url (being permissive)
u, err := url.Parse(h)
if err == nil && u.Host != "" {
return u, nil
}
// try to parse as host:port
host, port, err := net.SplitHostPort(h)
if err != nil {
return nil, ErrMalformedHostname
}
return &url.URL{Host: fmt.Sprintf("%s:%s", host, port), Scheme: "http"}, nil
}
// NodeURL returns a human-readable string representation of the beacon node base url.
func (c *Client) NodeURL() string {
return c.baseURL.String()
}
type reqOption func(*http.Request)
func withSSZEncoding() reqOption {
return func(req *http.Request) {
req.Header.Set("Accept", "application/octet-stream")
}
}
// get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
func (c *Client) get(ctx context.Context, path string, opts ...reqOption) ([]byte, error) {
u := c.baseURL.ResolveReference(&url.URL{Path: path})
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
for _, o := range opts {
o(req)
}
r, err := c.hc.Do(req)
if err != nil {
return nil, err
}
defer func() {
err = r.Body.Close()
}()
if r.StatusCode != http.StatusOK {
return nil, non200Err(r)
}
b, err := io.ReadAll(r.Body)
if err != nil {
return nil, errors.Wrap(err, "error reading http response body from GetBlock")
}
return b, nil
}
func renderGetBlockPath(id StateOrBlockId) string {
return path.Join(getSignedBlockPath, string(id))
}
// GetBlock retrieves the SignedBeaconBlock for the given block id.
@@ -106,7 +184,7 @@ func NewClient(host string, opts ...client.ClientOpt) (*Client, error) {
// The return value contains the ssz-encoded bytes.
func (c *Client) GetBlock(ctx context.Context, blockId StateOrBlockId) ([]byte, error) {
blockPath := renderGetBlockPath(blockId)
b, err := c.Get(ctx, blockPath, client.WithSSZEncoding())
b, err := c.get(ctx, blockPath, withSSZEncoding())
if err != nil {
return nil, errors.Wrapf(err, "error requesting state by id = %s", blockId)
}
@@ -121,7 +199,7 @@ var getBlockRootTpl = idTemplate(getBlockRootPath)
// for the named identifiers.
func (c *Client) GetBlockRoot(ctx context.Context, blockId StateOrBlockId) ([32]byte, error) {
rootPath := getBlockRootTpl(blockId)
b, err := c.Get(ctx, rootPath)
b, err := c.get(ctx, rootPath)
if err != nil {
return [32]byte{}, errors.Wrapf(err, "error requesting block root by id = %s", blockId)
}
@@ -144,7 +222,7 @@ var getForkTpl = idTemplate(getForkForStatePath)
// <slot>, <hex encoded blockRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
// for the named identifiers.
func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fork, error) {
body, err := c.Get(ctx, getForkTpl(stateId))
body, err := c.get(ctx, getForkTpl(stateId))
if err != nil {
return nil, errors.Wrapf(err, "error requesting fork by state id = %s", stateId)
}
@@ -160,7 +238,7 @@ func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fo
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, error) {
body, err := c.Get(ctx, getForkSchedulePath)
body, err := c.get(ctx, getForkSchedulePath)
if err != nil {
return nil, errors.Wrap(err, "error requesting fork schedule")
}
@@ -178,7 +256,7 @@ func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, er
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
func (c *Client) GetConfigSpec(ctx context.Context) (*v1.SpecResponse, error) {
body, err := c.Get(ctx, getConfigSpecPath)
body, err := c.get(ctx, getConfigSpecPath)
if err != nil {
return nil, errors.Wrap(err, "error requesting configSpecPath")
}
@@ -201,7 +279,7 @@ var versionRE = regexp.MustCompile(`^(\w+)/(v\d+\.\d+\.\d+[-a-zA-Z0-9]*)\s*/?(.*
func parseNodeVersion(v string) (*NodeVersion, error) {
groups := versionRE.FindStringSubmatch(v)
if len(groups) != 4 {
return nil, errors.Wrapf(client.ErrInvalidNodeVersion, "could not be parsed: %s", v)
return nil, errors.Wrapf(ErrInvalidNodeVersion, "could not be parsed: %s", v)
}
return &NodeVersion{
implementation: groups[1],
@@ -213,7 +291,7 @@ func parseNodeVersion(v string) (*NodeVersion, error) {
// GetNodeVersion requests that the beacon node identify information about its implementation in a format
// similar to a HTTP User-Agent field. ex: Lighthouse/v0.1.5 (Linux x86_64)
func (c *Client) GetNodeVersion(ctx context.Context) (*NodeVersion, error) {
b, err := c.Get(ctx, getNodeVersionPath)
b, err := c.get(ctx, getNodeVersionPath)
if err != nil {
return nil, errors.Wrap(err, "error requesting node version")
}
@@ -240,7 +318,7 @@ func renderGetStatePath(id StateOrBlockId) string {
// The return value contains the ssz-encoded bytes.
func (c *Client) GetState(ctx context.Context, stateId StateOrBlockId) ([]byte, error) {
statePath := path.Join(getStatePath, string(stateId))
b, err := c.Get(ctx, statePath, client.WithSSZEncoding())
b, err := c.get(ctx, statePath, withSSZEncoding())
if err != nil {
return nil, errors.Wrapf(err, "error requesting state by id = %s", stateId)
}
@@ -253,7 +331,7 @@ func (c *Client) GetState(ctx context.Context, stateId StateOrBlockId) ([]byte,
// - finds the highest non-skipped block preceding the epoch
// - returns the htr of the found block and returns this + the value of state_root from the block
func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData, error) {
body, err := c.Get(ctx, getWeakSubjectivityPath)
body, err := c.get(ctx, getWeakSubjectivityPath)
if err != nil {
return nil, err
}
@@ -284,7 +362,7 @@ func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData
// SubmitChangeBLStoExecution calls a beacon API endpoint to set the withdrawal addresses based on the given signed messages.
// If the API responds with something other than OK there will be failure messages associated to the corresponding request message.
func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apimiddleware.SignedBLSToExecutionChangeJson) error {
u := c.BaseURL().ResolveReference(&url.URL{Path: changeBLStoExecutionPath})
u := c.baseURL.ResolveReference(&url.URL{Path: changeBLStoExecutionPath})
body, err := json.Marshal(request)
if err != nil {
return errors.Wrap(err, "failed to marshal JSON")
@@ -294,7 +372,7 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apim
return errors.Wrap(err, "invalid format, failed to create new POST request object")
}
req.Header.Set("Content-Type", "application/json")
resp, err := c.Do(req)
resp, err := c.hc.Do(req)
if err != nil {
return err
}
@@ -323,7 +401,7 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apim
// GetBLStoExecutionChanges gets all the set withdrawal messages in the node's operation pool.
// Returns a struct representation of json response.
func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*apimiddleware.BLSToExecutionChangesPoolResponseJson, error) {
body, err := c.Get(ctx, changeBLStoExecutionPath)
body, err := c.get(ctx, changeBLStoExecutionPath)
if err != nil {
return nil, err
}
@@ -335,6 +413,23 @@ func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*apimiddleware.B
return poolResponse, nil
}
func non200Err(response *http.Response) error {
bodyBytes, err := io.ReadAll(response.Body)
var body string
if err != nil {
body = "(Unable to read response body.)"
} else {
body = "response body:\n" + string(bodyBytes)
}
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
switch response.StatusCode {
case 404:
return errors.Wrap(ErrNotFound, msg)
default:
return errors.Wrap(ErrNotOK, msg)
}
}
type forkResponse struct {
PreviousVersion string `json:"previous_version"`
CurrentVersion string `json:"current_version"`

View File

@@ -4,7 +4,6 @@ import (
"net/url"
"testing"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
@@ -18,17 +17,17 @@ func TestParseNodeVersion(t *testing.T) {
{
name: "empty string",
v: "",
err: client.ErrInvalidNodeVersion,
err: ErrInvalidNodeVersion,
},
{
name: "Prysm as the version string",
v: "Prysm",
err: client.ErrInvalidNodeVersion,
err: ErrInvalidNodeVersion,
},
{
name: "semver only",
v: "v2.0.6",
err: client.ErrInvalidNodeVersion,
err: ErrInvalidNodeVersion,
},
{
name: "complete version",
@@ -92,7 +91,7 @@ func TestValidHostname(t *testing.T) {
{
name: "hostname without port",
hostArg: "mydomain.org",
err: client.ErrMalformedHostname,
err: ErrMalformedHostname,
},
{
name: "hostname with port",
@@ -133,7 +132,7 @@ func TestValidHostname(t *testing.T) {
return
}
require.NoError(t, err)
require.Equal(t, c.joined, cl.BaseURL().ResolveReference(&url.URL{Path: c.path}).String())
require.Equal(t, c.joined, cl.baseURL.ResolveReference(&url.URL{Path: c.path}).String())
})
}
}

View File

@@ -0,0 +1,13 @@
package beacon
import "github.com/pkg/errors"
// ErrNotOK is used to indicate when an HTTP request to the Beacon Node API failed with any non-2xx response code.
// More specific errors may be returned, but an error in reaction to a non-2xx response will always wrap ErrNotOK.
var ErrNotOK = errors.New("did not receive 2xx response from API")
// ErrNotFound specifically means that a '404 - NOT FOUND' response was received from the API.
var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
// ErrInvalidNodeVersion indicates that the /eth/v1/node/version api response format was not recognized.
var ErrInvalidNodeVersion = errors.New("invalid node version response")

View File

@@ -11,12 +11,12 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/builder",
visibility = ["//visibility:public"],
deps = [
"//config/fieldparams:go_default_library",
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//network:go_default_library",
"//network/authorization:go_default_library",
@@ -40,7 +40,6 @@ go_test(
data = glob(["testdata/**"]),
embed = [":go_default_library"],
deps = [
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",

View File

@@ -1,12 +1,15 @@
package builder
import (
"math/big"
"github.com/pkg/errors"
ssz "github.com/prysmaticlabs/fastssz"
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/math"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
)
@@ -22,7 +25,6 @@ type SignedBid interface {
// Bid is an interface describing the method set of a builder bid.
type Bid interface {
Header() (interfaces.ExecutionData, error)
BlindedBlobsBundle() (*enginev1.BlindedBlobsBundle, error)
Value() []byte
Pubkey() []byte
Version() int
@@ -115,11 +117,6 @@ func (b builderBid) Header() (interfaces.ExecutionData, error) {
return blocks.WrappedExecutionPayloadHeader(b.p.Header)
}
// BlindedBlobsBundle --
func (b builderBid) BlindedBlobsBundle() (*enginev1.BlindedBlobsBundle, error) {
return nil, errors.New("blinded blobs bundle not available before Deneb")
}
// Version --
func (b builderBid) Version() int {
return version.Bellatrix
@@ -165,13 +162,12 @@ func WrappedBuilderBidCapella(p *ethpb.BuilderBidCapella) (Bid, error) {
// Header returns the execution data interface.
func (b builderBidCapella) Header() (interfaces.ExecutionData, error) {
if b.p == nil {
return nil, errors.New("builder bid is nil")
}
// We have to convert big endian to little endian because the value is coming from the execution layer.
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, blocks.PayloadValueToGwei(b.p.Value))
}
// BlindedBlobsBundle --
func (b builderBidCapella) BlindedBlobsBundle() (*enginev1.BlindedBlobsBundle, error) {
return nil, errors.New("blinded blobs bundle not available before Deneb")
v := big.NewInt(0).SetBytes(bytesutil.ReverseByteOrder(b.p.Value))
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, math.WeiToGwei(v))
}
// Version --
@@ -203,90 +199,3 @@ func (b builderBidCapella) HashTreeRoot() ([32]byte, error) {
func (b builderBidCapella) HashTreeRootWith(hh *ssz.Hasher) error {
return b.p.HashTreeRootWith(hh)
}
type builderBidDeneb struct {
p *ethpb.BuilderBidDeneb
}
// WrappedBuilderBidDeneb is a constructor which wraps a protobuf bid into an interface.
func WrappedBuilderBidDeneb(p *ethpb.BuilderBidDeneb) (Bid, error) {
w := builderBidDeneb{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
// Version --
func (b builderBidDeneb) Version() int {
return version.Deneb
}
// Value --
func (b builderBidDeneb) Value() []byte {
return b.p.Value
}
// Pubkey --
func (b builderBidDeneb) Pubkey() []byte {
return b.p.Pubkey
}
// IsNil --
func (b builderBidDeneb) IsNil() bool {
return b.p == nil
}
// HashTreeRoot --
func (b builderBidDeneb) HashTreeRoot() ([32]byte, error) {
return b.p.HashTreeRoot()
}
// HashTreeRootWith --
func (b builderBidDeneb) HashTreeRootWith(hh *ssz.Hasher) error {
return b.p.HashTreeRootWith(hh)
}
// Header --
func (b builderBidDeneb) Header() (interfaces.ExecutionData, error) {
// We have to convert big endian to little endian because the value is coming from the execution layer.
return blocks.WrappedExecutionPayloadHeaderDeneb(b.p.Header, blocks.PayloadValueToGwei(b.p.Value))
}
// BlindedBlobsBundle --
func (b builderBidDeneb) BlindedBlobsBundle() (*enginev1.BlindedBlobsBundle, error) {
return b.p.BlindedBlobsBundle, nil
}
type signedBuilderBidDeneb struct {
p *ethpb.SignedBuilderBidDeneb
}
// WrappedSignedBuilderBidDeneb is a constructor which wraps a protobuf signed bit into an interface.
func WrappedSignedBuilderBidDeneb(p *ethpb.SignedBuilderBidDeneb) (SignedBid, error) {
w := signedBuilderBidDeneb{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
// Message --
func (b signedBuilderBidDeneb) Message() (Bid, error) {
return WrappedBuilderBidDeneb(b.p.Message)
}
// Signature --
func (b signedBuilderBidDeneb) Signature() []byte {
return b.p.Signature
}
// Version --
func (b signedBuilderBidDeneb) Version() int {
return version.Deneb
}
// IsNil --
func (b signedBuilderBidDeneb) IsNil() bool {
return b.p == nil
}

View File

@@ -11,6 +11,7 @@ import (
"net/url"
"strings"
"text/template"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
@@ -19,7 +20,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
"github.com/prysmaticlabs/prysm/v4/network"
"github.com/prysmaticlabs/prysm/v4/network/authorization"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
log "github.com/sirupsen/logrus"
@@ -36,6 +36,7 @@ const (
var errMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
var errMalformedRequest = errors.New("required request data are missing")
var errNotBlinded = errors.New("submitted block is not blinded")
var submitBlindedBlockTimeout = 3 * time.Second
// ClientOpt is a functional option for the Client type (http.Client wrapper)
type ClientOpt func(*Client)
@@ -87,7 +88,7 @@ type BuilderClient interface {
NodeURL() string
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubkey [48]byte) (SignedBid, error)
RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock, blobs []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error)
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error)
Status(ctx context.Context) error
}
@@ -221,16 +222,6 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
}
switch strings.ToLower(v.Version) {
case strings.ToLower(version.String(version.Deneb)):
hr := &ExecHeaderResponseDeneb{}
if err := json.Unmarshal(hb, hr); err != nil {
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
}
p, err := hr.ToProto()
if err != nil {
return nil, errors.Wrapf(err, "could not extract proto message from header")
}
return WrappedSignedBuilderBidDeneb(p)
case strings.ToLower(version.String(version.Capella)):
hr := &ExecHeaderResponseCapella{}
if err := json.Unmarshal(hb, hr); err != nil {
@@ -285,118 +276,79 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
// SubmitBlindedBlock calls the builder API endpoint that binds the validator to the builder and submits the block.
// The response is the full execution payload used to create the blinded block.
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock, blobs []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error) {
if !sb.IsBlinded() {
return nil, nil, errNotBlinded
return nil, errNotBlinded
}
switch sb.Version() {
case version.Bellatrix:
psb, err := sb.PbBlindedBellatrixBlock()
if err != nil {
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
return nil, errors.Wrapf(err, "could not get protobuf block")
}
b := &SignedBlindedBeaconBlockBellatrix{SignedBlindedBeaconBlockBellatrix: psb}
body, err := json.Marshal(b)
if err != nil {
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockBellatrix value body in SubmitBlindedBlock")
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockBellatrix value body in SubmitBlindedBlock")
}
ctx, cancel := context.WithTimeout(ctx, submitBlindedBlockTimeout)
defer cancel()
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Bellatrix))
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
if err != nil {
return nil, nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockBellatrix to the builder api")
return nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockBellatrix to the builder api")
}
ep := &ExecPayloadResponse{}
if err := json.Unmarshal(rb, ep); err != nil {
return nil, nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlock response")
return nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlock response")
}
if strings.ToLower(ep.Version) != version.String(version.Bellatrix) {
return nil, nil, errors.New("not a bellatrix payload")
return nil, errors.New("not a bellatrix payload")
}
p, err := ep.ToProto()
if err != nil {
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
return nil, errors.Wrapf(err, "could not extract proto message from payload")
}
payload, err := blocks.WrappedExecutionPayload(p)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
}
return payload, nil, nil
return blocks.WrappedExecutionPayload(p)
case version.Capella:
psb, err := sb.PbBlindedCapellaBlock()
if err != nil {
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
return nil, errors.Wrapf(err, "could not get protobuf block")
}
b := &SignedBlindedBeaconBlockCapella{SignedBlindedBeaconBlockCapella: psb}
body, err := json.Marshal(b)
if err != nil {
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockCapella value body in SubmitBlindedBlockCapella")
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockCapella value body in SubmitBlindedBlockCapella")
}
ctx, cancel := context.WithTimeout(ctx, submitBlindedBlockTimeout)
defer cancel()
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Capella))
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
if err != nil {
return nil, nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockCapella to the builder api")
return nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockCapella to the builder api")
}
ep := &ExecPayloadResponseCapella{}
if err := json.Unmarshal(rb, ep); err != nil {
return nil, nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockCapella response")
return nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockCapella response")
}
if strings.ToLower(ep.Version) != version.String(version.Capella) {
return nil, nil, errors.New("not a capella payload")
return nil, errors.New("not a capella payload")
}
p, err := ep.ToProto()
if err != nil {
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
return nil, errors.Wrapf(err, "could not extract proto message from payload")
}
payload, err := blocks.WrappedExecutionPayloadCapella(p, 0)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
}
return payload, nil, nil
case version.Deneb:
psb, err := sb.PbBlindedDenebBlock()
if err != nil {
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
}
b := &ethpb.SignedBlindedBeaconBlockAndBlobsDeneb{Block: psb, Blobs: blobs}
body, err := json.Marshal(b)
if err != nil {
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockDeneb value body in SubmitBlindedBlockDeneb")
}
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Deneb))
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
if err != nil {
return nil, nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockDeneb to the builder api")
}
ep := &ExecPayloadResponseDeneb{}
if err := json.Unmarshal(rb, ep); err != nil {
return nil, nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockDeneb response")
}
if strings.ToLower(ep.Version) != version.String(version.Deneb) {
return nil, nil, errors.New("not a deneb payload")
}
p, blobBundle, err := ep.ToProto()
if err != nil {
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
}
payload, err := blocks.WrappedExecutionPayloadDeneb(p, 0)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
}
return payload, blobBundle, nil
return blocks.WrappedExecutionPayloadCapella(p, 0)
default:
return nil, nil, fmt.Errorf("unsupported block version %s", version.String(sb.Version()))
return nil, fmt.Errorf("unsupported block version %s", version.String(sb.Version()))
}
}

View File

@@ -12,9 +12,7 @@ import (
"strconv"
"testing"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/prysmaticlabs/go-bitfield"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
@@ -127,6 +125,7 @@ func TestClient_GetHeader(t *testing.T) {
var slot types.Slot = 23
parentHash := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
pubkey := ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
t.Run("server error", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
@@ -237,52 +236,6 @@ func TestClient_GetHeader(t *testing.T) {
require.DeepEqual(t, bidValue, value.Bytes())
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
})
t.Run("deneb", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponseDeneb)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
h, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.NoError(t, err)
expectedWithdrawalsRoot := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
bid, err := h.Message()
require.NoError(t, err)
bidHeader, err := bid.Header()
require.NoError(t, err)
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
bidValue := bytesutil.ReverseByteOrder(bid.Value())
require.DeepEqual(t, bidValue, value.Bytes())
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
bundle, err := bid.BlindedBlobsBundle()
require.NoError(t, err)
require.Equal(t, len(bundle.BlobRoots) <= fieldparams.MaxBlobsPerBlock && len(bundle.BlobRoots) > 0, true)
for i := range bundle.BlobRoots {
require.Equal(t, len(bundle.BlobRoots[i]) == fieldparams.RootLength, true)
}
require.Equal(t, len(bundle.KzgCommitments) > 0, true)
for i := range bundle.KzgCommitments {
require.Equal(t, len(bundle.KzgCommitments[i]) == 48, true)
}
require.Equal(t, len(bundle.Proofs) > 0, true)
for i := range bundle.Proofs {
require.Equal(t, len(bundle.Proofs[i]) == 48, true)
}
})
t.Run("unsupported version", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
@@ -324,7 +277,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
}
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
require.NoError(t, err)
ep, _, err := c.SubmitBlindedBlock(ctx, sbbb, nil)
ep, err := c.SubmitBlindedBlock(ctx, sbbb)
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"), ep.ParentHash()))
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
@@ -350,7 +303,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
}
sbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockCapella(t))
require.NoError(t, err)
ep, _, err := c.SubmitBlindedBlock(ctx, sbb, nil)
ep, err := c.SubmitBlindedBlock(ctx, sbb)
require.NoError(t, err)
withdrawals, err := ep.Withdrawals()
require.NoError(t, err)
@@ -360,40 +313,6 @@ func TestSubmitBlindedBlock(t *testing.T) {
assert.DeepEqual(t, ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943"), withdrawals[0].Address)
assert.Equal(t, uint64(1), withdrawals[0].Amount)
})
t.Run("deneb", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "deneb", r.Header.Get("Eth-Consensus-Version"))
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadDeneb)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
test := testSignedBlindedBeaconBlockAndBlobsDeneb(t)
sbb, err := blocks.NewSignedBeaconBlock(test.Block)
require.NoError(t, err)
ep, blobBundle, err := c.SubmitBlindedBlock(ctx, sbb, test.Blobs)
require.NoError(t, err)
withdrawals, err := ep.Withdrawals()
require.NoError(t, err)
require.Equal(t, 1, len(withdrawals))
assert.Equal(t, uint64(1), withdrawals[0].Index)
assert.Equal(t, types.ValidatorIndex(1), withdrawals[0].ValidatorIndex)
assert.DeepEqual(t, ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943"), withdrawals[0].Address)
assert.Equal(t, uint64(1), withdrawals[0].Amount)
require.NotNil(t, blobBundle)
require.Equal(t, hexutil.Encode(blobBundle.Blobs[0]), hexutil.Encode(make([]byte, fieldparams.BlobLength)))
require.Equal(t, hexutil.Encode(blobBundle.KzgCommitments[0]), "0x8dab030c51e16e84be9caab84ee3d0b8bbec1db4a0e4de76439da8424d9b957370a10a78851f97e4b54d2ce1ab0d686f")
require.Equal(t, hexutil.Encode(blobBundle.Proofs[0]), "0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a")
})
t.Run("mismatched versions, expected bellatrix got capella", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
@@ -411,13 +330,13 @@ func TestSubmitBlindedBlock(t *testing.T) {
}
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
require.NoError(t, err)
_, _, err = c.SubmitBlindedBlock(ctx, sbbb, nil)
_, err = c.SubmitBlindedBlock(ctx, sbbb)
require.ErrorContains(t, "not a bellatrix payload", err)
})
t.Run("not blinded", func(t *testing.T) {
sbb, err := blocks.NewSignedBeaconBlock(&eth.SignedBeaconBlockBellatrix{Block: &eth.BeaconBlockBellatrix{Body: &eth.BeaconBlockBodyBellatrix{}}})
require.NoError(t, err)
_, _, err = (&Client{}).SubmitBlindedBlock(ctx, sbb, nil)
_, err = (&Client{}).SubmitBlindedBlock(ctx, sbb)
require.ErrorIs(t, err, errNotBlinded)
})
}
@@ -707,168 +626,6 @@ func testSignedBlindedBeaconBlockCapella(t *testing.T) *eth.SignedBlindedBeaconB
}
}
func testSignedBlindedBeaconBlockAndBlobsDeneb(t *testing.T) *eth.SignedBlindedBeaconBlockAndBlobsDeneb {
return &eth.SignedBlindedBeaconBlockAndBlobsDeneb{
Block: &eth.SignedBlindedBeaconBlockDeneb{
Block: &eth.BlindedBeaconBlockDeneb{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Body: &eth.BlindedBeaconBlockBodyDeneb{
RandaoReveal: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
Eth1Data: &eth.Eth1Data{
DepositRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
DepositCount: 1,
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Graffiti: ezDecode(t, "0xdeadbeefc0ffee"),
ProposerSlashings: []*eth.ProposerSlashing{
{
Header_1: &eth.SignedBeaconBlockHeader{
Header: &eth.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
Header_2: &eth.SignedBeaconBlockHeader{
Header: &eth.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
},
AttesterSlashings: []*eth.AttesterSlashing{
{
Attestation_1: &eth.IndexedAttestation{
AttestingIndices: []uint64{1},
Data: &eth.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Source: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Target: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
Attestation_2: &eth.IndexedAttestation{
AttestingIndices: []uint64{1},
Data: &eth.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Source: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Target: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
},
Attestations: []*eth.Attestation{
{
AggregationBits: bitfield.Bitlist{0x01},
Data: &eth.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Source: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Target: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
Deposits: []*eth.Deposit{
{
Proof: [][]byte{ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")},
Data: &eth.Deposit_Data{
PublicKey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
WithdrawalCredentials: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Amount: 1,
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
},
VoluntaryExits: []*eth.SignedVoluntaryExit{
{
Exit: &eth.VoluntaryExit{
Epoch: 1,
ValidatorIndex: 1,
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
SyncAggregate: &eth.SyncAggregate{
SyncCommitteeSignature: make([]byte, 48),
SyncCommitteeBits: bitfield.Bitvector512{0x01},
},
ExecutionPayloadHeader: &v1.ExecutionPayloadHeaderDeneb{
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
ReceiptsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
LogsBloom: ezDecode(t, "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
PrevRandao: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BlockNumber: 1,
GasLimit: 1,
GasUsed: 1,
Timestamp: 1,
ExtraData: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BaseFeePerGas: []byte(strconv.FormatUint(1, 10)),
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
WithdrawalsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BlobGasUsed: 1,
ExcessBlobGas: 2,
},
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
Blobs: []*eth.SignedBlindedBlobSidecar{
{
Message: &eth.BlindedBlobSidecar{
BlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Index: 0,
Slot: 1,
BlockParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
ProposerIndex: 1,
BlobRoot: ezDecode(t, "0x24564723180fcb3d994104538d351c8dcbde12d541676bb736cf678018ca4739"),
KzgCommitment: ezDecode(t, "0x8dab030c51e16e84be9caab84ee3d0b8bbec1db4a0e4de76439da8424d9b957370a10a78851f97e4b54d2ce1ab0d686f"),
KzgProof: ezDecode(t, "0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a"),
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
}
}
func TestRequestLogger(t *testing.T) {
wo := WithObserver(&requestLogger{})
c, err := NewClient("localhost:3500", wo)

View File

@@ -10,7 +10,6 @@ go_library(
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
],
)

View File

@@ -7,7 +7,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
@@ -41,8 +40,8 @@ func (m MockClient) RegisterValidator(_ context.Context, svr []*ethpb.SignedVali
}
// SubmitBlindedBlock --
func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock, _ []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
return nil, nil, nil
func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error) {
return nil, nil
}
// Status --

File diff suppressed because it is too large Load Diff

View File

@@ -14,7 +14,6 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/golang/protobuf/proto"
"github.com/prysmaticlabs/go-bitfield"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
@@ -117,47 +116,6 @@ var testExampleHeaderResponseCapella = `{
}
}`
var testExampleHeaderResponseDeneb = `{
"version": "deneb",
"data": {
"message": {
"header": {
"parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"block_number": "1",
"gas_limit": "1",
"gas_used": "1",
"timestamp": "1",
"extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"base_fee_per_gas": "452312848583266388373324160190187140051835877600158453279131187530910662656",
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"transactions_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"withdrawals_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"blob_gas_used": "1",
"excess_blob_gas": "2"
},
"blinded_blobs_bundle": {
"commitments": [
"0x8dab030c51e16e84be9caab84ee3d0b8bbec1db4a0e4de76439da8424d9b957370a10a78851f97e4b54d2ce1ab0d686f"
],
"proofs": [
"0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a"
],
"blob_roots": [
"0x24564723180fcb3d994104538d351c8dcbde12d541676bb736cf678018ca4739"
]
},
"value": "652312848583266388373324160190187140051835877600158453279131187530910662656",
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
},
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
}
}`
var testExampleHeaderResponseUnknownVersion = `{
"version": "bad",
"data": {
@@ -560,51 +518,6 @@ var testExampleExecutionPayloadCapella = `{
}
}`
var testExampleExecutionPayloadDeneb = fmt.Sprintf(`{
"version": "deneb",
"data": {
"execution_payload":{
"parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"block_number": "1",
"gas_limit": "1",
"gas_used": "1",
"timestamp": "1",
"extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"base_fee_per_gas": "452312848583266388373324160190187140051835877600158453279131187530910662656",
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"transactions": [
"0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86"
],
"withdrawals": [
{
"index": "1",
"validator_index": "1",
"address": "0xcf8e0d4e9587369b2301d0790347320302cc0943",
"amount": "1"
}
],
"blob_gas_used": "2",
"excess_blob_gas": "3"
},
"blobs_bundle": {
"commitments": [
"0x8dab030c51e16e84be9caab84ee3d0b8bbec1db4a0e4de76439da8424d9b957370a10a78851f97e4b54d2ce1ab0d686f"
],
"proofs": [
"0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a"
],
"blobs": [
"%s"
]
}
}
}`, hexutil.Encode(make([]byte, fieldparams.BlobLength)))
func TestExecutionPayloadResponseUnmarshal(t *testing.T) {
epr := &ExecPayloadResponse{}
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayload), epr))
@@ -776,107 +689,6 @@ func TestExecutionPayloadResponseCapellaUnmarshal(t *testing.T) {
assert.Equal(t, uint64(1), w.Amount.Uint64())
}
func TestExecutionPayloadResponseDenebUnmarshal(t *testing.T) {
epr := &ExecPayloadResponseDeneb{}
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayloadDeneb), epr))
cases := []struct {
expected string
actual string
name string
}{
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(epr.Data.ExecutionPayload.ParentHash),
name: "ExecPayloadResponse.ExecutionPayload.ParentHash",
},
{
expected: "0xabcf8e0d4e9587369b2301d0790347320302cc09",
actual: hexutil.Encode(epr.Data.ExecutionPayload.FeeRecipient),
name: "ExecPayloadResponse.ExecutionPayload.FeeRecipient",
},
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(epr.Data.ExecutionPayload.StateRoot),
name: "ExecPayloadResponse.ExecutionPayload.StateRoot",
},
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(epr.Data.ExecutionPayload.ReceiptsRoot),
name: "ExecPayloadResponse.ExecutionPayload.ReceiptsRoot",
},
{
expected: "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
actual: hexutil.Encode(epr.Data.ExecutionPayload.LogsBloom),
name: "ExecPayloadResponse.ExecutionPayload.LogsBloom",
},
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(epr.Data.ExecutionPayload.PrevRandao),
name: "ExecPayloadResponse.ExecutionPayload.PrevRandao",
},
{
expected: "1",
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.BlockNumber),
name: "ExecPayloadResponse.ExecutionPayload.BlockNumber",
},
{
expected: "1",
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.GasLimit),
name: "ExecPayloadResponse.ExecutionPayload.GasLimit",
},
{
expected: "1",
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.GasUsed),
name: "ExecPayloadResponse.ExecutionPayload.GasUsed",
},
{
expected: "1",
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.Timestamp),
name: "ExecPayloadResponse.ExecutionPayload.Timestamp",
},
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(epr.Data.ExecutionPayload.ExtraData),
name: "ExecPayloadResponse.ExecutionPayload.ExtraData",
},
{
expected: "452312848583266388373324160190187140051835877600158453279131187530910662656",
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.BaseFeePerGas),
name: "ExecPayloadResponse.ExecutionPayload.BaseFeePerGas",
},
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(epr.Data.ExecutionPayload.BlockHash),
name: "ExecPayloadResponse.ExecutionPayload.BlockHash",
},
{
expected: "2",
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.BlobGasUsed),
name: "ExecPayloadResponse.ExecutionPayload.BlobGasUsed",
},
{
expected: "3",
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.ExcessBlobGas),
name: "ExecPayloadResponse.ExecutionPayload.ExcessBlobGas",
},
}
for _, c := range cases {
require.Equal(t, c.expected, c.actual, fmt.Sprintf("unexpected value for field %s", c.name))
}
require.Equal(t, 1, len(epr.Data.ExecutionPayload.Transactions))
txHash := "0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86"
require.Equal(t, txHash, hexutil.Encode(epr.Data.ExecutionPayload.Transactions[0]))
require.Equal(t, 1, len(epr.Data.ExecutionPayload.Withdrawals))
w := epr.Data.ExecutionPayload.Withdrawals[0]
assert.Equal(t, uint64(1), w.Index.Uint64())
assert.Equal(t, uint64(1), w.ValidatorIndex.Uint64())
assert.DeepEqual(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943", w.Address.String())
assert.Equal(t, uint64(1), w.Amount.Uint64())
assert.Equal(t, uint64(2), uint64(epr.Data.ExecutionPayload.BlobGasUsed))
assert.Equal(t, uint64(3), uint64(epr.Data.ExecutionPayload.ExcessBlobGas))
}
func TestExecutionPayloadResponseToProto(t *testing.T) {
hr := &ExecPayloadResponse{}
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayload), hr))
@@ -984,85 +796,6 @@ func TestExecutionPayloadResponseCapellaToProto(t *testing.T) {
}
func TestExecutionPayloadResponseDenebToProto(t *testing.T) {
hr := &ExecPayloadResponseDeneb{}
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayloadDeneb), hr))
p, blobsBundle, err := hr.ToProto()
require.NoError(t, err)
parentHash, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
feeRecipient, err := hexutil.Decode("0xabcf8e0d4e9587369b2301d0790347320302cc09")
require.NoError(t, err)
stateRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
receiptsRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
logsBloom, err := hexutil.Decode("0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
require.NoError(t, err)
prevRandao, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
extraData, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
blockHash, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
tx, err := hexutil.Decode("0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86")
require.NoError(t, err)
txList := [][]byte{tx}
address, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943")
require.NoError(t, err)
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
expected := &v1.ExecutionPayloadDeneb{
ParentHash: parentHash,
FeeRecipient: feeRecipient,
StateRoot: stateRoot,
ReceiptsRoot: receiptsRoot,
LogsBloom: logsBloom,
PrevRandao: prevRandao,
BlockNumber: 1,
GasLimit: 1,
GasUsed: 1,
Timestamp: 1,
ExtraData: extraData,
BaseFeePerGas: bfpg.SSZBytes(),
BlockHash: blockHash,
Transactions: txList,
Withdrawals: []*v1.Withdrawal{
{
Index: 1,
ValidatorIndex: 1,
Address: address,
Amount: 1,
},
},
BlobGasUsed: 2,
ExcessBlobGas: 3,
}
require.DeepEqual(t, expected, p)
commitment, err := hexutil.Decode("0x8dab030c51e16e84be9caab84ee3d0b8bbec1db4a0e4de76439da8424d9b957370a10a78851f97e4b54d2ce1ab0d686f")
require.NoError(t, err)
proof, err := hexutil.Decode("0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a")
require.NoError(t, err)
expectedBlobs := &v1.BlobsBundle{
KzgCommitments: [][]byte{
commitment,
},
Proofs: [][]byte{
proof,
},
Blobs: [][]byte{
make([]byte, fieldparams.BlobLength),
},
}
require.DeepEqual(t, blobsBundle, expectedBlobs)
}
func pbEth1Data() *eth.Eth1Data {
return &eth.Eth1Data{
DepositRoot: make([]byte, 32),
@@ -1293,30 +1026,6 @@ func pbExecutionPayloadHeaderCapella(t *testing.T) *v1.ExecutionPayloadHeaderCap
}
}
func pbExecutionPayloadHeaderDeneb(t *testing.T) *v1.ExecutionPayloadHeaderDeneb {
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
return &v1.ExecutionPayloadHeaderDeneb{
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
ReceiptsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
LogsBloom: ezDecode(t, "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
PrevRandao: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BlockNumber: 1,
GasLimit: 1,
GasUsed: 1,
Timestamp: 1,
ExtraData: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BaseFeePerGas: bfpg.SSZBytes(),
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
WithdrawalsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BlobGasUsed: 1,
ExcessBlobGas: 2,
}
}
func TestExecutionPayloadHeader_MarshalJSON(t *testing.T) {
h := &ExecutionPayloadHeader{
ExecutionPayloadHeader: pbExecutionPayloadHeader(t),
@@ -1337,16 +1046,6 @@ func TestExecutionPayloadHeaderCapella_MarshalJSON(t *testing.T) {
require.Equal(t, expected, string(b))
}
func TestExecutionPayloadHeaderDeneb_MarshalJSON(t *testing.T) {
h := &ExecutionPayloadHeaderDeneb{
ExecutionPayloadHeaderDeneb: pbExecutionPayloadHeaderDeneb(t),
}
b, err := json.Marshal(h)
require.NoError(t, err)
expected := `{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"452312848583266388373324160190187140051835877600158453279131187530910662656","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","withdrawals_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","blob_gas_used":"1","excess_blob_gas":"2"}`
require.Equal(t, expected, string(b))
}
var testBuilderBid = `{
"version":"bellatrix",
"data":{
@@ -1457,14 +1156,6 @@ func TestUint256Unmarshal(t *testing.T) {
require.Equal(t, expected, string(m))
}
func TestUint256Unmarshal_BadData(t *testing.T) {
var bigNum Uint256
assert.ErrorContains(t, "provided Uint256 json string is too short", bigNum.UnmarshalJSON([]byte{'"'}))
assert.ErrorContains(t, "provided Uint256 json string is malformed", bigNum.UnmarshalJSON([]byte{'"', '1', '2'}))
}
func TestUint256UnmarshalNegative(t *testing.T) {
m := "-1"
var value Uint256

View File

@@ -1,97 +0,0 @@
package client
import (
"context"
"io"
"net"
"net/http"
"net/url"
"github.com/pkg/errors"
)
// Client is a wrapper object around the HTTP client.
type Client struct {
hc *http.Client
baseURL *url.URL
token string
}
// NewClient constructs a new client with the provided options (ex WithTimeout).
// `host` is the base host + port used to construct request urls. This value can be
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
u, err := urlForHost(host)
if err != nil {
return nil, err
}
c := &Client{
hc: &http.Client{},
baseURL: u,
}
for _, o := range opts {
o(c)
}
return c, nil
}
// Token returns the bearer token used for jwt authentication
func (c *Client) Token() string {
return c.token
}
// BaseURL returns the base url of the client
func (c *Client) BaseURL() *url.URL {
return c.baseURL
}
// Do execute the request against the http client
func (c *Client) Do(req *http.Request) (*http.Response, error) {
return c.hc.Do(req)
}
func urlForHost(h string) (*url.URL, error) {
// try to parse as url (being permissive)
u, err := url.Parse(h)
if err == nil && u.Host != "" {
return u, nil
}
// try to parse as host:port
host, port, err := net.SplitHostPort(h)
if err != nil {
return nil, ErrMalformedHostname
}
return &url.URL{Host: net.JoinHostPort(host, port), Scheme: "http"}, nil
}
// NodeURL returns a human-readable string representation of the beacon node base url.
func (c *Client) NodeURL() string {
return c.baseURL.String()
}
// Get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
func (c *Client) Get(ctx context.Context, path string, opts ...ReqOption) ([]byte, error) {
u := c.baseURL.ResolveReference(&url.URL{Path: path})
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
for _, o := range opts {
o(req)
}
r, err := c.hc.Do(req)
if err != nil {
return nil, err
}
defer func() {
err = r.Body.Close()
}()
if r.StatusCode != http.StatusOK {
return nil, Non200Err(r)
}
b, err := io.ReadAll(r.Body)
if err != nil {
return nil, errors.Wrap(err, "error reading http response body")
}
return b, nil
}

View File

@@ -1,48 +0,0 @@
package client
import (
"net/url"
"testing"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func TestValidHostname(t *testing.T) {
cases := []struct {
name string
hostArg string
path string
joined string
err error
}{
{
name: "hostname without port",
hostArg: "mydomain.org",
err: ErrMalformedHostname,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
cl, err := NewClient(c.hostArg)
if c.err != nil {
require.ErrorIs(t, err, c.err)
return
}
require.NoError(t, err)
require.Equal(t, c.joined, cl.BaseURL().ResolveReference(&url.URL{Path: c.path}).String())
})
}
}
func TestWithAuthenticationToken(t *testing.T) {
cl, err := NewClient("https://www.offchainlabs.com:3500", WithAuthenticationToken("my token"))
require.NoError(t, err)
require.Equal(t, cl.Token(), "my token")
}
func TestBaseURL(t *testing.T) {
cl, err := NewClient("https://www.offchainlabs.com:3500")
require.NoError(t, err)
require.Equal(t, "www.offchainlabs.com", cl.BaseURL().Hostname())
require.Equal(t, "3500", cl.BaseURL().Port())
}

View File

@@ -1,40 +0,0 @@
package client
import (
"fmt"
"io"
"net/http"
"github.com/pkg/errors"
)
// ErrMalformedHostname is used to indicate if a host name's format is incorrect.
var ErrMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
// ErrNotOK is used to indicate when an HTTP request to the API failed with any non-2xx response code.
// More specific errors may be returned, but an error in reaction to a non-2xx response will always wrap ErrNotOK.
var ErrNotOK = errors.New("did not receive 2xx response from API")
// ErrNotFound specifically means that a '404 - NOT FOUND' response was received from the API.
var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
// ErrInvalidNodeVersion indicates that the /eth/v1/node/version API response format was not recognized.
var ErrInvalidNodeVersion = errors.New("invalid node version response")
// Non200Err is a function that parses an HTTP response to handle responses that are not 200 with a formatted error.
func Non200Err(response *http.Response) error {
bodyBytes, err := io.ReadAll(response.Body)
var body string
if err != nil {
body = "(Unable to read response body.)"
} else {
body = "response body:\n" + string(bodyBytes)
}
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
switch response.StatusCode {
case 404:
return errors.Wrap(ErrNotFound, msg)
default:
return errors.Wrap(ErrNotOK, msg)
}
}

View File

@@ -1,48 +0,0 @@
package client
import (
"fmt"
"net/http"
"time"
)
// ReqOption is a request functional option.
type ReqOption func(*http.Request)
// WithSSZEncoding is a request functional option that adds SSZ encoding header.
func WithSSZEncoding() ReqOption {
return func(req *http.Request) {
req.Header.Set("Accept", "application/octet-stream")
}
}
// WithAuthorizationToken is a request functional option that adds header for authorization token.
func WithAuthorizationToken(token string) ReqOption {
return func(req *http.Request) {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
}
}
// ClientOpt is a functional option for the Client type (http.Client wrapper)
type ClientOpt func(*Client)
// WithTimeout sets the .Timeout attribute of the wrapped http.Client.
func WithTimeout(timeout time.Duration) ClientOpt {
return func(c *Client) {
c.hc.Timeout = timeout
}
}
// WithRoundTripper replaces the underlying HTTP's transport with a custom one.
func WithRoundTripper(t http.RoundTripper) ClientOpt {
return func(c *Client) {
c.hc.Transport = t
}
}
// WithAuthenticationToken sets an oauth token to be used.
func WithAuthenticationToken(token string) ClientOpt {
return func(c *Client) {
c.token = token
}
}

View File

@@ -1,13 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["client.go"],
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/validator",
visibility = ["//visibility:public"],
deps = [
"//api/client:go_default_library",
"//validator/rpc/apimiddleware:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -1,121 +0,0 @@
package validator
import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/validator/rpc/apimiddleware"
)
const (
localKeysPath = "/eth/v1/keystores"
remoteKeysPath = "/eth/v1/remotekeys"
feeRecipientPath = "/eth/v1/validator/{pubkey}/feerecipient"
)
// Client provides a collection of helper methods for calling the Keymanager API endpoints.
type Client struct {
*client.Client
}
// NewClient returns a new Client that includes functions for REST calls to keymanager APIs.
func NewClient(host string, opts ...client.ClientOpt) (*Client, error) {
c, err := client.NewClient(host, opts...)
if err != nil {
return nil, err
}
return &Client{c}, nil
}
// GetValidatorPubKeys gets the current list of web3signer or the local validator public keys in hex format.
func (c *Client) GetValidatorPubKeys(ctx context.Context) ([]string, error) {
jsonlocal, err := c.GetLocalValidatorKeys(ctx)
if err != nil {
return nil, err
}
jsonremote, err := c.GetRemoteValidatorKeys(ctx)
if err != nil {
return nil, err
}
if len(jsonlocal.Keystores) == 0 && len(jsonremote.Keystores) == 0 {
return nil, errors.New("there are no local keys or remote keys on the validator")
}
hexKeys := make(map[string]bool)
for index := range jsonlocal.Keystores {
hexKeys[jsonlocal.Keystores[index].ValidatingPubkey] = true
}
for index := range jsonremote.Keystores {
hexKeys[jsonremote.Keystores[index].Pubkey] = true
}
keys := make([]string, 0)
for k := range hexKeys {
keys = append(keys, k)
}
return keys, nil
}
// GetLocalValidatorKeys calls the keymanager APIs for local validator keys
func (c *Client) GetLocalValidatorKeys(ctx context.Context) (*apimiddleware.ListKeystoresResponseJson, error) {
localBytes, err := c.Get(ctx, localKeysPath, client.WithAuthorizationToken(c.Token()))
if err != nil {
return nil, err
}
jsonlocal := &apimiddleware.ListKeystoresResponseJson{}
if err := json.Unmarshal(localBytes, jsonlocal); err != nil {
return nil, errors.Wrap(err, "failed to parse local keystore list")
}
return jsonlocal, nil
}
// GetRemoteValidatorKeys calls the keymanager APIs for web3signer validator keys
func (c *Client) GetRemoteValidatorKeys(ctx context.Context) (*apimiddleware.ListRemoteKeysResponseJson, error) {
remoteBytes, err := c.Get(ctx, remoteKeysPath, client.WithAuthorizationToken(c.Token()))
if err != nil {
if !strings.Contains(err.Error(), "Prysm Wallet is not of type Web3Signer") {
return nil, err
}
}
jsonremote := &apimiddleware.ListRemoteKeysResponseJson{}
if len(remoteBytes) != 0 {
if err := json.Unmarshal(remoteBytes, jsonremote); err != nil {
return nil, errors.Wrap(err, "failed to parse remote keystore list")
}
}
return jsonremote, nil
}
// GetFeeRecipientAddresses takes a list of validators in hex format and returns an equal length list of fee recipients in hex format.
func (c *Client) GetFeeRecipientAddresses(ctx context.Context, validators []string) ([]string, error) {
feeRecipients := make([]string, len(validators))
for index, validator := range validators {
feejson, err := c.GetFeeRecipientAddress(ctx, validator)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("keymanager API failed to retrieve fee recipient for validator %s", validators[index]))
}
if feejson.Data == nil {
continue
}
feeRecipients[index] = feejson.Data.Ethaddress
}
return feeRecipients, nil
}
// GetFeeRecipientAddress takes a public key and calls the keymanager API to return its fee recipient.
func (c *Client) GetFeeRecipientAddress(ctx context.Context, pubkey string) (*apimiddleware.GetFeeRecipientByPubkeyResponseJson, error) {
path := strings.Replace(feeRecipientPath, "{pubkey}", pubkey, 1)
b, err := c.Get(ctx, path, client.WithAuthorizationToken(c.Token()))
if err != nil {
return nil, err
}
feejson := &apimiddleware.GetFeeRecipientByPubkeyResponseJson{}
if err := json.Unmarshal(b, feejson); err != nil {
return nil, errors.Wrap(err, "failed to parse fee recipient")
}
return feejson, nil
}

View File

@@ -13,7 +13,6 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware",
visibility = ["//visibility:public"],
deps = [
"//api:go_default_library",
"//api/grpc:go_default_library",
"//encoding/bytesutil:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
@@ -33,7 +32,6 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//api:go_default_library",
"//api/grpc:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",

View File

@@ -10,7 +10,6 @@ import (
"strings"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api"
"github.com/prysmaticlabs/prysm/v4/api/grpc"
)
@@ -117,11 +116,7 @@ func HandleGrpcResponseError(errJson ErrorJson, resp *http.Response, respBody []
// Something went wrong, but the request completed, meaning we can write headers and the error message.
for h, vs := range resp.Header {
for _, v := range vs {
if strings.HasSuffix(h, api.VersionHeader) {
w.Header().Set(api.VersionHeader, v)
} else {
w.Header().Set(h, v)
}
w.Header().Set(h, v)
}
}
// Handle gRPC timeout.
@@ -192,11 +187,9 @@ func WriteMiddlewareResponseHeadersAndBody(grpcResp *http.Response, responseJson
var statusCodeHeader string
for h, vs := range grpcResp.Header {
// We don't want to expose any gRPC metadata in the HTTP response, so we skip forwarding metadata headers.
if strings.HasPrefix(h, grpc.MetadataPrefix) {
if h == grpc.WithPrefix(grpc.HttpCodeMetadataKey) {
if strings.HasPrefix(h, "Grpc-Metadata") {
if h == "Grpc-Metadata-"+grpc.HttpCodeMetadataKey {
statusCodeHeader = vs[0]
} else if strings.HasSuffix(h, api.VersionHeader) {
w.Header().Set(api.VersionHeader, vs[0])
}
} else {
for _, v := range vs {
@@ -230,7 +223,7 @@ func WriteError(w http.ResponseWriter, errJson ErrorJson, responseHeader http.He
// Include custom error in the error JSON.
hasCustomError := false
if responseHeader != nil {
customError, ok := responseHeader[grpc.WithPrefix(grpc.CustomErrorMetadataKey)]
customError, ok := responseHeader["Grpc-Metadata-"+grpc.CustomErrorMetadataKey]
if ok {
hasCustomError = true
// Assume header has only one value and read the 0 index.

View File

@@ -8,7 +8,6 @@ import (
"strings"
"testing"
"github.com/prysmaticlabs/prysm/v4/api"
"github.com/prysmaticlabs/prysm/v4/api/grpc"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
@@ -281,8 +280,7 @@ func TestWriteMiddlewareResponseHeadersAndBody(t *testing.T) {
response := &http.Response{
Header: http.Header{
"Foo": []string{"foo"},
grpc.WithPrefix(grpc.HttpCodeMetadataKey): []string{"204"},
grpc.WithPrefix(api.VersionHeader): []string{"capella"},
"Grpc-Metadata-" + grpc.HttpCodeMetadataKey: []string{"204"},
},
}
container := defaultResponseContainer()
@@ -301,9 +299,6 @@ func TestWriteMiddlewareResponseHeadersAndBody(t *testing.T) {
require.Equal(t, true, ok, "header not found")
require.Equal(t, 1, len(v), "wrong number of header values")
assert.Equal(t, "224", v[0])
v, ok = writer.Header()["Eth-Consensus-Version"]
require.Equal(t, true, ok, "header not found")
assert.Equal(t, "capella", v[0])
assert.Equal(t, 204, writer.Code)
assert.DeepEqual(t, responseJson, writer.Body.Bytes())
})
@@ -325,12 +320,11 @@ func TestWriteMiddlewareResponseHeadersAndBody(t *testing.T) {
t.Run("GET_invalid_status_code", func(t *testing.T) {
response := &http.Response{
Header: http.Header{"Grpc-Metadata-Eth-Consensus-Version": []string{"capella"}},
Header: http.Header{},
}
// Set invalid status code.
response.Header[grpc.WithPrefix(grpc.HttpCodeMetadataKey)] = []string{"invalid"}
response.Header[grpc.WithPrefix(api.VersionHeader)] = []string{"capella"}
response.Header["Grpc-Metadata-"+grpc.HttpCodeMetadataKey] = []string{"invalid"}
container := defaultResponseContainer()
responseJson, err := json.Marshal(container)
@@ -396,7 +390,7 @@ func TestWriteMiddlewareResponseHeadersAndBody(t *testing.T) {
func TestWriteError(t *testing.T) {
t.Run("ok", func(t *testing.T) {
responseHeader := http.Header{
grpc.WithPrefix(grpc.CustomErrorMetadataKey): []string{"{\"CustomField\":\"bar\"}"},
"Grpc-Metadata-" + grpc.CustomErrorMetadataKey: []string{"{\"CustomField\":\"bar\"}"},
}
errJson := &testErrorJson{
Message: "foo",
@@ -426,7 +420,7 @@ func TestWriteError(t *testing.T) {
logHook := test.NewGlobal()
responseHeader := http.Header{
grpc.WithPrefix(grpc.CustomErrorMetadataKey): []string{"invalid"},
"Grpc-Metadata-" + grpc.CustomErrorMetadataKey: []string{"invalid"},
}
WriteError(httptest.NewRecorder(), &testErrorJson{}, responseHeader)

View File

@@ -6,11 +6,3 @@ const CustomErrorMetadataKey = "Custom-Error"
// HttpCodeMetadataKey is the key to use when setting custom HTTP status codes in gRPC metadata.
const HttpCodeMetadataKey = "X-Http-Code"
// MetadataPrefix is the prefix for grpc headers on metadata
const MetadataPrefix = "Grpc-Metadata"
// WithPrefix creates a new string with grpc metadata prefix
func WithPrefix(value string) string {
return MetadataPrefix + "-" + value
}

View File

@@ -1,7 +0,0 @@
package api
const (
VersionHeader = "Eth-Consensus-Version"
JsonMediaType = "application/json"
OctetStreamMediaType = "application/octet-stream"
)

View File

@@ -144,7 +144,6 @@ func (f *Feed) Send(value interface{}) (nsent int) {
if !f.typecheck(rvalue.Type()) {
f.sendLock <- struct{}{}
f.mu.Unlock()
panic(feedTypeError{op: "Send", got: rvalue.Type(), want: f.etype})
}
f.mu.Unlock()

View File

@@ -32,8 +32,6 @@ func TestFeedPanics(t *testing.T) {
f.Send(2)
want := feedTypeError{op: "Send", got: reflect.TypeOf(uint64(0)), want: reflect.TypeOf(0)}
assert.NoError(t, checkPanic(want, func() { f.Send(uint64(2)) }))
// Validate it doesn't deadlock.
assert.NoError(t, checkPanic(want, func() { f.Send(uint64(2)) }))
}
{
var f Feed

View File

@@ -35,7 +35,6 @@ go_library(
deps = [
"//async:go_default_library",
"//async/event:go_default_library",
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/core/altair:go_default_library",
@@ -89,7 +88,6 @@ go_library(
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
)

View File

@@ -340,13 +340,7 @@ func (s *Service) IsOptimistic(_ context.Context) (bool, error) {
}
s.headLock.RLock()
headRoot := s.head.root
headSlot := s.head.slot
headOptimistic := s.head.optimistic
s.headLock.RUnlock()
// we trust the head package for recent head slots, otherwise fallback to forkchoice
if headSlot+2 >= s.CurrentSlot() {
return headOptimistic, nil
}
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
@@ -387,7 +381,7 @@ func (s *Service) InForkchoice(root [32]byte) bool {
return s.cfg.ForkChoiceStore.HasNode(root)
}
// IsViableForCheckpoint returns whether the given checkpoint is a checkpoint in any
// IsViableForkCheckpoint returns whether the given checkpoint is a checkpoint in any
// chain known to forkchoice
func (s *Service) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
s.cfg.ForkChoiceStore.RLock()
@@ -499,13 +493,6 @@ func (s *Service) Ancestor(ctx context.Context, root []byte, slot primitives.Slo
return ar[:], nil
}
// SetOptimisticToInvalid wraps the corresponding method in forkchoice
func (s *Service) SetOptimisticToInvalid(ctx context.Context, root, parent, lvh [32]byte) ([][32]byte, error) {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
return s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parent, lvh)
}
// SetGenesisTime sets the genesis time of beacon chain.
func (s *Service) SetGenesisTime(t time.Time) {
s.genesisTime = t

View File

@@ -422,12 +422,6 @@ func TestService_IsOptimistic(t *testing.T) {
opt, err := c.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, primitives.Slot(0), c.CurrentSlot())
require.Equal(t, false, opt)
c.SetGenesisTime(time.Now().Add(-time.Second * time.Duration(4*params.BeaconConfig().SecondsPerSlot)))
opt, err = c.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, true, opt)
}

View File

@@ -17,6 +17,8 @@ var (
errNilJustifiedCheckpoint = errors.New("nil justified checkpoint returned from state")
// errBlockDoesNotExist is returned when a block does not exist for a particular state summary.
errBlockDoesNotExist = errors.New("could not find block in DB")
// errWrongBlockCount is returned when the wrong number of blocks or block roots is used
errWrongBlockCount = errors.New("wrong number of blocks or block roots")
// errBlockNotFoundInCacheOrDB is returned when a block is not found in the cache or DB.
errBlockNotFoundInCacheOrDB = errors.New("block not found in cache or db")
// errWSBlockNotFound is returned when a block is not found in the WS cache or DB.
@@ -39,15 +41,13 @@ var (
type invalidBlock struct {
invalidAncestorRoots [][32]byte
error
root [32]byte
lastValidHash [32]byte
root [32]byte
}
type invalidBlockError interface {
Error() string
InvalidAncestorRoots() [][32]byte
BlockRoot() [32]byte
LastValidHash() [32]byte
}
// BlockRoot returns the invalid block root.
@@ -55,11 +55,6 @@ func (e invalidBlock) BlockRoot() [32]byte {
return e.root
}
// LastValidHash returns the last valid hash root.
func (e invalidBlock) LastValidHash() [32]byte {
return e.lastValidHash
}
// InvalidAncestorRoots returns an optional list of invalid roots of the invalid block which leads up last valid root.
func (e invalidBlock) InvalidAncestorRoots() [][32]byte {
return e.invalidAncestorRoots
@@ -77,19 +72,6 @@ func IsInvalidBlock(e error) bool {
return true
}
// InvalidBlockLVH returns the invalid block last valid hash root. If the error
// doesn't have a last valid hash, [32]byte{} is returned.
func InvalidBlockLVH(e error) [32]byte {
if e == nil {
return [32]byte{}
}
d, ok := e.(invalidBlockError)
if !ok {
return [32]byte{}
}
return d.LastValidHash()
}
// InvalidBlockRoot returns the invalid block root. If the error
// doesn't have an invalid blockroot. [32]byte{} is returned.
func InvalidBlockRoot(e error) [32]byte {

View File

@@ -2,7 +2,6 @@ package blockchain
import (
"context"
"crypto/sha256"
"fmt"
"github.com/pkg/errors"
@@ -27,8 +26,6 @@ import (
"go.opencensus.io/trace"
)
const blobCommitmentVersionKZG uint8 = 0x01
var defaultLatestValidHash = bytesutil.PadTo([]byte{0xff}, 32)
// notifyForkchoiceUpdateArg is the argument for the forkchoice update notification `notifyForkchoiceUpdate`.
@@ -157,7 +154,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
var pId [8]byte
copy(pId[:], payloadID[:])
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nextSlot, proposerId, pId, arg.headRoot)
} else if hasAttr && payloadID == nil && !features.Get().PrepareAllPayloads {
} else if hasAttr && payloadID == nil {
log.WithFields(logrus.Fields{
"blockHash": fmt.Sprintf("%#x", headPayload.BlockHash()),
"slot": headBlk.Slot(),
@@ -185,24 +182,21 @@ func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, er
// notifyNewPayload signals execution engine on a new payload.
// It returns true if the EL has returned VALID for the block
func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
preStateHeader interfaces.ExecutionData, blk interfaces.ReadOnlySignedBeaconBlock) (bool, error) {
func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
postStateHeader interfaces.ExecutionData, blk interfaces.ReadOnlySignedBeaconBlock) (bool, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
defer span.End()
// Execution payload is only supported in Bellatrix and beyond. Pre
// merge blocks are never optimistic
if blk == nil {
return false, errors.New("signed beacon block can't be nil")
}
if preStateVersion < version.Bellatrix {
if blocks.IsPreBellatrixVersion(postStateVersion) {
return true, nil
}
if err := consensusblocks.BeaconBlockIsNil(blk); err != nil {
return false, err
}
body := blk.Block().Body()
enabled, err := blocks.IsExecutionEnabledUsingHeader(preStateHeader, body)
enabled, err := blocks.IsExecutionEnabledUsingHeader(postStateHeader, body)
if err != nil {
return false, errors.Wrap(invalidBlock{error: err}, "could not determine if execution is enabled")
}
@@ -213,18 +207,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
if err != nil {
return false, errors.Wrap(invalidBlock{error: err}, "could not get execution payload")
}
var lastValidHash []byte
if blk.Version() >= version.Deneb {
var versionedHashes [][32]byte
versionedHashes, err = kzgCommitmentsToVersionedHashes(blk.Block().Body())
if err != nil {
return false, errors.Wrap(err, "could not get versioned hashes to feed the engine")
}
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes)
} else {
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, [][32]byte{} /*empty version hashes before Deneb*/)
}
lastValidHash, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
switch err {
case nil:
newPayloadValidNodeCount.Inc()
@@ -237,37 +220,35 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
}).Info("Called new payload with optimistic block")
return false, nil
case execution.ErrInvalidPayloadStatus:
lvh := bytesutil.ToBytes32(lastValidHash)
return false, invalidBlock{
error: ErrInvalidPayload,
lastValidHash: lvh,
newPayloadInvalidNodeCount.Inc()
root, err := blk.Block().HashTreeRoot()
if err != nil {
return false, err
}
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, blk.Block().ParentRoot(), bytesutil.ToBytes32(lastValidHash))
if err != nil {
return false, err
}
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
return false, err
}
log.WithFields(logrus.Fields{
"slot": blk.Block().Slot(),
"blockRoot": fmt.Sprintf("%#x", root),
"invalidChildrenCount": len(invalidRoots),
}).Warn("Pruned invalid blocks")
return false, invalidBlock{
invalidAncestorRoots: invalidRoots,
error: ErrInvalidPayload,
}
case execution.ErrInvalidBlockHashPayloadStatus:
newPayloadInvalidNodeCount.Inc()
return false, ErrInvalidBlockHashPayloadStatus
default:
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
}
}
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, lvh [32]byte) error {
newPayloadInvalidNodeCount.Inc()
invalidRoots, err := s.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
if err != nil {
return err
}
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
return err
}
log.WithFields(logrus.Fields{
"blockRoot": fmt.Sprintf("%#x", root),
"invalidChildrenCount": len(invalidRoots),
}).Warn("Pruned invalid blocks")
return invalidBlock{
invalidAncestorRoots: invalidRoots,
error: ErrInvalidPayload,
lastValidHash: lvh,
}
}
// getPayloadAttributes returns the payload attributes for the given state and slot.
// The attribute is required to initiate a payload build process in the context of an `engine_forkchoiceUpdated` call.
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot primitives.Slot, headRoot []byte) (bool, payloadattribute.Attributer, primitives.ValidatorIndex) {
@@ -324,7 +305,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
var attr payloadattribute.Attributer
switch st.Version() {
case version.Capella, version.Deneb:
case version.Capella:
withdrawals, err := st.ExpectedWithdrawals()
if err != nil {
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
@@ -374,17 +355,3 @@ func (s *Service) removeInvalidBlockAndState(ctx context.Context, blkRoots [][32
}
return nil
}
func kzgCommitmentsToVersionedHashes(body interfaces.ReadOnlyBeaconBlockBody) ([][32]byte, error) {
commitments, err := body.BlobKzgCommitments()
if err != nil {
return nil, errors.Wrap(invalidBlock{error: err}, "could not get blob kzg commitments")
}
versionedHashes := make([][32]byte, len(commitments))
for i, commitment := range commitments {
versionedHashes[i] = sha256.Sum256(commitment)
versionedHashes[i][0] = blobCommitmentVersionKZG
}
return versionedHashes, nil
}

View File

@@ -525,13 +525,11 @@ func Test_NotifyNewPayload(t *testing.T) {
{
name: "phase 0 post state",
postState: phase0State,
blk: altairBlk, // same as phase 0 for this test
isValidPayload: true,
},
{
name: "altair post state",
postState: altairState,
blk: altairBlk,
isValidPayload: true,
},
{
@@ -745,37 +743,6 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
require.Equal(t, true, validated)
}
func Test_reportInvalidBlock(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MainnetConfig())
service, tr := minimalTestService(t)
ctx, _, fcs := tr.ctx, tr.db, tr.fcs
jcp := &ethpb.Checkpoint{}
st, root, err := prepareForkchoiceState(ctx, 0, [32]byte{'A'}, [32]byte{}, [32]byte{'a'}, jcp, jcp)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 1, [32]byte{'B'}, [32]byte{'A'}, [32]byte{'b'}, jcp, jcp)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'C'}, [32]byte{'B'}, [32]byte{'c'}, jcp, jcp)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 3, [32]byte{'D'}, [32]byte{'C'}, [32]byte{'d'}, jcp, jcp)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, root))
require.NoError(t, fcs.SetOptimisticToValid(ctx, [32]byte{'A'}))
err = service.pruneInvalidBlock(ctx, [32]byte{'D'}, [32]byte{'C'}, [32]byte{'a'})
require.Equal(t, IsInvalidBlock(err), true)
require.Equal(t, InvalidBlockLVH(err), [32]byte{'a'})
invalidRoots := InvalidAncestorRoots(err)
require.Equal(t, 3, len(invalidRoots))
require.Equal(t, [32]byte{'D'}, invalidRoots[0])
require.Equal(t, [32]byte{'C'}, invalidRoots[1])
require.Equal(t, [32]byte{'B'}, invalidRoots[2])
}
func Test_GetPayloadAttribute(t *testing.T) {
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
ctx := tr.ctx
@@ -860,42 +827,6 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
require.Equal(t, 0, len(a))
}
func Test_GetPayloadAttributeDeneb(t *testing.T) {
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
ctx := tr.ctx
st, _ := util.DeterministicGenesisStateDeneb(t, 1)
hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0, []byte{})
require.Equal(t, false, hasPayload)
require.Equal(t, primitives.ValidatorIndex(0), vId)
// Cache hit, advance state, no fee recipient
suggestedVid := primitives.ValidatorIndex(1)
slot := primitives.Slot(1)
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
hook := logTest.NewGlobal()
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, true, hasPayload)
require.Equal(t, suggestedVid, vId)
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
a, err := attr.Withdrawals()
require.NoError(t, err)
require.Equal(t, 0, len(a))
// Cache hit, advance state, has fee recipient
suggestedAddr := common.HexToAddress("123")
require.NoError(t, service.cfg.BeaconDB.SaveFeeRecipientsByValidatorIDs(ctx, []primitives.ValidatorIndex{suggestedVid}, []common.Address{suggestedAddr}))
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
hasPayload, attr, vId = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, true, hasPayload)
require.Equal(t, suggestedVid, vId)
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
a, err = attr.Withdrawals()
require.NoError(t, err)
require.Equal(t, 0, len(a))
}
func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MainnetConfig())
@@ -1081,28 +1012,3 @@ func TestService_getPayloadHash(t *testing.T) {
require.NoError(t, err)
require.DeepEqual(t, [32]byte{'a'}, h)
}
func TestKZGCommitmentToVersionedHashes(t *testing.T) {
kzg1 := make([]byte, 96)
kzg1[10] = 'a'
kzg2 := make([]byte, 96)
kzg2[1] = 'b'
commitments := [][]byte{kzg1, kzg2}
blk := &ethpb.SignedBeaconBlockDeneb{
Block: &ethpb.BeaconBlockDeneb{
Body: &ethpb.BeaconBlockBodyDeneb{
BlobKzgCommitments: commitments,
},
},
}
b, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
vhs, err := kzgCommitmentsToVersionedHashes(b.Block().Body())
require.NoError(t, err)
vh0 := [32]byte{1, 207, 35, 21, 201, 118, 88, 167, 237, 84, 173, 161, 129, 118, 94, 35, 179, 250, 219, 81, 80, 250, 179, 149, 9, 246, 49, 192, 185, 175, 69, 102}
vh1 := [32]byte{1, 226, 124, 226, 142, 82, 126, 176, 113, 150, 179, 26, 240, 245, 250, 24, 130, 172, 231, 1, 166, 130, 2, 42, 183, 121, 248, 22, 172, 57, 212, 126}
require.Equal(t, 2, len(vhs))
require.Equal(t, vhs[0], vh0)
require.Equal(t, vhs[1], vh1)
}

View File

@@ -47,11 +47,9 @@ func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
// This defines the current chain service's view of head.
type head struct {
root [32]byte // current head root.
block interfaces.ReadOnlySignedBeaconBlock // current head block.
state state.BeaconState // current head state.
slot primitives.Slot // the head block slot number
optimistic bool // optimistic status when saved head
root [32]byte // current head root.
block interfaces.ReadOnlySignedBeaconBlock // current head block.
state state.BeaconState // current head state.
}
// This saves head info to the local service cache, it also saves the
@@ -96,10 +94,6 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
return errors.Wrap(err, "could not get old head root")
}
oldHeadRoot := bytesutil.ToBytes32(r)
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(newHeadRoot)
if err != nil {
log.WithError(err).Error("could not check if node is optimistically synced")
}
if headBlock.Block().ParentRoot() != oldHeadRoot {
// A chain re-org occurred, so we fire an event notifying the rest of the services.
commonRoot, forkSlot, err := s.cfg.ForkChoiceStore.CommonAncestor(ctx, oldHeadRoot, newHeadRoot)
@@ -131,6 +125,10 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
reorgDistance.Observe(float64(dis))
reorgDepth.Observe(float64(dep))
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(newHeadRoot)
if err != nil {
return errors.Wrap(err, "could not check if node is optimistically synced")
}
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.Reorg,
Data: &ethpbv1.EventChainReorg{
@@ -152,14 +150,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
}
// Cache the new head info.
newHead := &head{
root: newHeadRoot,
block: headBlock,
state: headState,
optimistic: isOptimistic,
slot: headBlock.Block().Slot(),
}
if err := s.setHead(newHead); err != nil {
if err := s.setHead(newHeadRoot, headBlock, headState); err != nil {
return errors.Wrap(err, "could not set head")
}
@@ -204,22 +195,20 @@ func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.ReadOnlySignedB
return nil
}
// This sets head view object which is used to track the head slot, root, block, state and optimistic status
func (s *Service) setHead(newHead *head) error {
// This sets head view object which is used to track the head slot, root, block and state.
func (s *Service) setHead(root [32]byte, block interfaces.ReadOnlySignedBeaconBlock, state state.BeaconState) error {
s.headLock.Lock()
defer s.headLock.Unlock()
// This does a full copy of the block and state.
bCp, err := newHead.block.Copy()
bCp, err := block.Copy()
if err != nil {
return err
}
s.head = &head{
root: newHead.root,
block: bCp,
state: newHead.state.Copy(),
optimistic: newHead.optimistic,
slot: newHead.slot,
root: root,
block: bCp,
state: state.Copy(),
}
return nil
}

View File

@@ -157,11 +157,7 @@ func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot primitives
if headState == nil || headState.IsNil() {
return nil, errors.New("nil state")
}
headRoot, err := s.HeadRoot(ctx)
if err != nil {
return nil, err
}
headState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, slot)
headState, err = transition.ProcessSlotsIfPossible(ctx, headState, slot)
if err != nil {
return nil, err
}

View File

@@ -6,7 +6,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
dbTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/testing/require"
@@ -16,7 +15,7 @@ import (
func TestService_HeadSyncCommitteeIndices(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
c := &Service{}
c.head = &head{state: s}
// Current period
@@ -39,7 +38,7 @@ func TestService_HeadSyncCommitteeIndices(t *testing.T) {
func TestService_headCurrentSyncCommitteeIndices(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
c := &Service{}
c.head = &head{state: s}
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
@@ -67,7 +66,7 @@ func TestService_headNextSyncCommitteeIndices(t *testing.T) {
func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
c := &Service{}
c.head = &head{state: s}
// Process slot up to 2 * `EpochsPerSyncCommitteePeriod` so it can run `ProcessSyncCommitteeUpdates` twice.
@@ -82,7 +81,7 @@ func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
func TestService_HeadSyncCommitteeDomain(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
c := &Service{}
c.head = &head{state: s}
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorsRoot())

View File

@@ -1,31 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"trusted_setup.go",
"validation.go",
],
embedsrcs = ["trusted_setup.json"],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/kzg",
visibility = ["//visibility:public"],
deps = [
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"trusted_setup_test.go",
"validation_test.go",
],
embed = [":go_default_library"],
deps = [
"//proto/prysm/v1alpha1:go_default_library",
"//testing/require:go_default_library",
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
],
)

View File

@@ -1,28 +0,0 @@
package kzg
import (
_ "embed"
"encoding/json"
GoKZG "github.com/crate-crypto/go-kzg-4844"
"github.com/pkg/errors"
)
var (
//go:embed trusted_setup.json
embeddedTrustedSetup []byte // 1.2Mb
kzgContext *GoKZG.Context
)
func Start() error {
parsedSetup := GoKZG.JSONTrustedSetup{}
err := json.Unmarshal(embeddedTrustedSetup, &parsedSetup)
if err != nil {
return errors.Wrap(err, "could not parse trusted setup JSON")
}
kzgContext, err = GoKZG.NewContext4096(&parsedSetup)
if err != nil {
return errors.Wrap(err, "could not initialize go-kzg context")
}
return nil
}

File diff suppressed because one or more lines are too long

View File

@@ -1,12 +0,0 @@
package kzg
import (
"testing"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func TestStart(t *testing.T) {
require.NoError(t, Start())
require.NotNil(t, kzgContext)
}

View File

@@ -1,47 +0,0 @@
package kzg
import (
"fmt"
GoKZG "github.com/crate-crypto/go-kzg-4844"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
// IsDataAvailable checks that
// - all blobs in the block are available
// - Expected KZG commitments match the number of blobs in the block
// - That the number of proofs match the number of blobs
// - That the proofs are verified against the KZG commitments
func IsDataAvailable(commitments [][]byte, sidecars []*ethpb.BlobSidecar) error {
if len(commitments) != len(sidecars) {
return fmt.Errorf("could not check data availability, expected %d commitments, obtained %d",
len(commitments), len(sidecars))
}
if len(commitments) == 0 {
return nil
}
blobs := make([]GoKZG.Blob, len(commitments))
proofs := make([]GoKZG.KZGProof, len(commitments))
cmts := make([]GoKZG.KZGCommitment, len(commitments))
for i, sidecar := range sidecars {
blobs[i] = bytesToBlob(sidecar.Blob)
proofs[i] = bytesToKZGProof(sidecar.KzgProof)
cmts[i] = bytesToCommitment(commitments[i])
}
return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs)
}
func bytesToBlob(blob []byte) (ret GoKZG.Blob) {
copy(ret[:], blob)
return
}
func bytesToCommitment(commitment []byte) (ret GoKZG.KZGCommitment) {
copy(ret[:], commitment)
return
}
func bytesToKZGProof(proof []byte) (ret GoKZG.KZGProof) {
copy(ret[:], proof)
return
}

View File

@@ -1,25 +0,0 @@
package kzg
import (
"testing"
GoKZG "github.com/crate-crypto/go-kzg-4844"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func TestIsDataAvailable(t *testing.T) {
sidecars := make([]*ethpb.BlobSidecar, 0)
commitments := make([][]byte, 0)
require.NoError(t, IsDataAvailable(commitments, sidecars))
}
func TestBytesToAny(t *testing.T) {
bytes := []byte{0x01, 0x02}
blob := GoKZG.Blob{0x01, 0x02}
commitment := GoKZG.KZGCommitment{0x01, 0x02}
proof := GoKZG.KZGProof{0x01, 0x02}
require.DeepEqual(t, blob, bytesToBlob(bytes))
require.DeepEqual(t, commitment, bytesToCommitment(bytes))
require.DeepEqual(t, proof, bytesToKZGProof(bytes))
}

View File

@@ -53,7 +53,7 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
txs, err := p.Transactions()
switch {
case errors.Is(err, consensus_types.ErrUnsupportedField):
case errors.Is(err, consensus_types.ErrUnsupportedGetter):
case err != nil:
return err
default:
@@ -73,7 +73,7 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
level := log.Logger.GetLevel()
if level >= logrus.DebugLevel {
parentRoot := block.ParentRoot()
lf := logrus.Fields{
log.WithFields(logrus.Fields{
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
@@ -87,12 +87,7 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime),
"deposits": len(block.Body().Deposits()),
}
commits, err := block.Body().BlobKzgCommitments()
if err == nil {
lf["commitments"] = len(commits)
}
log.WithFields(lf).Debug("Synced new block")
}).Debug("Synced new block")
} else {
log.WithFields(logrus.Fields{
"slot": block.Slot(),
@@ -125,7 +120,7 @@ func logPayload(block interfaces.ReadOnlyBeaconBlock) error {
fields := logrus.Fields{
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.ParentHash())),
"blockNumber": payload.BlockNumber(),
"blockNumber": payload.BlockNumber,
"gasUtilized": fmt.Sprintf("%.2f", gasUtilized),
}
if block.Version() >= version.Capella {

View File

@@ -172,15 +172,11 @@ var (
})
onBlockProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
Name: "on_block_processing_milliseconds",
Help: "Total time in milliseconds to complete a call to postBlockProcess()",
Help: "Total time in milliseconds to complete a call to onBlock()",
})
stateTransitionProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
Name: "state_transition_processing_milliseconds",
Help: "Total time to call a state transition in validateStateTransition()",
})
chainServiceProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
Name: "chain_service_processing_milliseconds",
Help: "Total time to call a chain service in ReceiveBlock()",
Help: "Total time to call a state transition in onBlock()",
})
processAttsElapsedTime = promauto.NewHistogram(
prometheus.HistogramOpts{
@@ -250,45 +246,40 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
slashingBalance := uint64(0)
slashingEffectiveBalance := uint64(0)
for i := 0; i < postState.NumValidators(); i++ {
validator, err := postState.ValidatorAtIndexReadOnly(primitives.ValidatorIndex(i))
if err != nil {
log.WithError(err).Error("Could not load validator")
continue
}
for i, validator := range postState.Validators() {
bal, err := postState.BalanceAtIndex(primitives.ValidatorIndex(i))
if err != nil {
log.WithError(err).Error("Could not load validator balance")
continue
}
if validator.Slashed() {
if currentEpoch < validator.ExitEpoch() {
if validator.Slashed {
if currentEpoch < validator.ExitEpoch {
slashingInstances++
slashingBalance += bal
slashingEffectiveBalance += validator.EffectiveBalance()
slashingEffectiveBalance += validator.EffectiveBalance
} else {
slashedInstances++
}
continue
}
if validator.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
if currentEpoch < validator.ExitEpoch() {
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
if currentEpoch < validator.ExitEpoch {
exitingInstances++
exitingBalance += bal
exitingEffectiveBalance += validator.EffectiveBalance()
exitingEffectiveBalance += validator.EffectiveBalance
} else {
exitedInstances++
}
continue
}
if currentEpoch < validator.ActivationEpoch() {
if currentEpoch < validator.ActivationEpoch {
pendingInstances++
pendingBalance += bal
continue
}
activeInstances++
activeBalance += bal
activeEffectiveBalance += validator.EffectiveBalance()
activeEffectiveBalance += validator.EffectiveBalance
}
activeInstances += exitingInstances + slashingInstances
activeBalance += exitingBalance + slashingBalance

View File

@@ -172,10 +172,3 @@ func WithClockSynchronizer(gs *startup.ClockSynchronizer) Option {
return nil
}
}
func WithSyncComplete(c chan struct{}) Option {
return func(s *Service) error {
s.syncComplete = c
return nil
}
}

View File

@@ -6,7 +6,6 @@ import (
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/kzg"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
@@ -23,6 +22,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
@@ -40,11 +40,59 @@ const depositDeadline = 20 * time.Second
// This defines size of the upper bound for initial sync block cache.
var initialSyncBlockCacheSize = uint64(2 * params.BeaconConfig().SlotsPerEpoch)
// postBlockProcess is called when a gossip block is received. This function performs
// several duties most importantly informing the engine if head was updated,
// saving the new head information to the blockchain package and
// handling attestations, slashings and similar included in the block.
func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, postState state.BeaconState, isValidPayload bool) error {
// onBlock is called when a gossip block is received. It runs regular state transition on the block.
// The block's signing root should be computed before calling this method to avoid redundant
// computation in this method and methods it calls into.
//
// Spec pseudocode definition:
//
// def on_block(store: Store, signed_block: ReadOnlySignedBeaconBlock) -> None:
// block = signed_block.message
// # Parent block must be known
// assert block.parent_root in store.block_states
// # Make a copy of the state to avoid mutability issues
// pre_state = copy(store.block_states[block.parent_root])
// # Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past.
// assert get_current_slot(store) >= block.slot
//
// # Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor)
// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
// assert block.slot > finalized_slot
// # Check block is a descendant of the finalized block at the checkpoint finalized slot
// assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root
//
// # Check the block is valid and compute the post-state
// state = pre_state.copy()
// state_transition(state, signed_block, True)
// # Add new block to the store
// store.blocks[hash_tree_root(block)] = block
// # Add new state for this block to the store
// store.block_states[hash_tree_root(block)] = state
//
// # Update justified checkpoint
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
// if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
// store.best_justified_checkpoint = state.current_justified_checkpoint
// if should_update_justified_checkpoint(store, state.current_justified_checkpoint):
// store.justified_checkpoint = state.current_justified_checkpoint
//
// # Update finalized checkpoint
// if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
// store.finalized_checkpoint = state.finalized_checkpoint
//
// # Potentially update justified if different from store
// if store.justified_checkpoint != state.current_justified_checkpoint:
// # Update justified if new justified is later than store justified
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
// store.justified_checkpoint = state.current_justified_checkpoint
// return
//
// # Update justified if store justified is not in chain with finalized checkpoint
// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
// ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot)
// if ancestor_at_finalized_slot != store.finalized_checkpoint.root:
// store.justified_checkpoint = state.current_justified_checkpoint
func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.onBlock")
defer span.End()
if err := consensusblocks.BeaconBlockIsNil(signed); err != nil {
@@ -53,7 +101,52 @@ func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOn
startTime := time.Now()
b := signed.Block()
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, postState, blockRoot); err != nil {
preState, err := s.getBlockPreState(ctx, b)
if err != nil {
return err
}
// Verify that the parent block is in forkchoice
if !s.cfg.ForkChoiceStore.HasNode(b.ParentRoot()) {
return ErrNotDescendantOfFinalized
}
// Save current justified and finalized epochs for future use.
currStoreJustifiedEpoch := s.cfg.ForkChoiceStore.JustifiedCheckpoint().Epoch
currStoreFinalizedEpoch := s.cfg.ForkChoiceStore.FinalizedCheckpoint().Epoch
preStateFinalizedEpoch := preState.FinalizedCheckpoint().Epoch
preStateJustifiedEpoch := preState.CurrentJustifiedCheckpoint().Epoch
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
if err != nil {
return err
}
stateTransitionStartTime := time.Now()
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
if err != nil {
return invalidBlock{error: err}
}
stateTransitionProcessingTime.Observe(float64(time.Since(stateTransitionStartTime).Milliseconds()))
postStateVersion, postStateHeader, err := getStateVersionAndPayload(postState)
if err != nil {
return err
}
isValidPayload, err := s.notifyNewPayload(ctx, postStateVersion, postStateHeader, signed)
if err != nil {
return errors.Wrap(err, "could not validate new payload")
}
if isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, preStateVersion, preStateHeader, signed); err != nil {
return err
}
}
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
return err
}
if err := s.insertBlockToForkchoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
}
if err := s.handleBlockAttestations(ctx, signed.Block(), postState); err != nil {
@@ -67,6 +160,34 @@ func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOn
}
}
// If slasher is configured, forward the attestations in the block via
// an event feed for processing.
if features.Get().EnableSlasher {
// Feed the indexed attestation to slasher if enabled. This action
// is done in the background to avoid adding more load to this critical code path.
go func() {
// Using a different context to prevent timeouts as this operation can be expensive
// and we want to avoid affecting the critical code path.
ctx := context.TODO()
for _, att := range signed.Block().Body().Attestations() {
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, att.Data.Slot, att.Data.CommitteeIndex)
if err != nil {
log.WithError(err).Error("Could not get attestation committee")
tracing.AnnotateError(span, err)
return
}
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committee)
if err != nil {
log.WithError(err).Error("Could not convert to indexed attestation")
tracing.AnnotateError(span, err)
return
}
s.cfg.SlasherAttestationsFeed.Send(indexedAtt)
}
}()
}
justified := s.cfg.ForkChoiceStore.JustifiedCheckpoint()
start := time.Now()
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
if err != nil {
@@ -87,6 +208,18 @@ func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOn
"headRoot": fmt.Sprintf("%#x", headRoot),
"headWeight": headWeight,
}).Debug("Head block is not the received block")
} else {
// Updating next slot state cache can happen in the background. It shouldn't block rest of the process.
go func() {
// Use a custom deadline here, since this method runs asynchronously.
// We ignore the parent method's context and instead create a new one
// with a custom deadline, therefore using the background context instead.
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
if err := transition.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
log.WithError(err).Debug("could not update next slot state cache")
}
}()
}
newBlockHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
@@ -96,12 +229,6 @@ func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOn
return err
}
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(blockRoot)
if err != nil {
log.WithError(err).Error("Could not check if block is optimistic")
optimistic = true
}
// Send notification of the processed block to the state feed.
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,
@@ -110,35 +237,57 @@ func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOn
BlockRoot: blockRoot,
SignedBlock: signed,
Verified: true,
Optimistic: optimistic,
},
})
defer reportAttestationInclusion(b)
if headRoot == blockRoot {
// Updating next slot state cache can happen in the background
// except in the epoch boundary in which case we lock to handle
// the shuffling and proposer caches updates.
// We handle these caches only on canonical
// blocks, otherwise this will be handled by lateBlockTasks
slot := postState.Slot()
if slots.IsEpochEnd(slot) {
if err := transition.UpdateNextSlotCache(ctx, blockRoot[:], postState); err != nil {
return errors.Wrap(err, "could not update next slot state cache")
}
if err := s.handleEpochBoundary(ctx, slot, postState, blockRoot[:]); err != nil {
return errors.Wrap(err, "could not handle epoch boundary")
}
} else {
go func() {
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
if err := transition.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
log.WithError(err).Error("could not update next slot state cache")
}
}()
// Save justified check point to db.
postStateJustifiedEpoch := postState.CurrentJustifiedCheckpoint().Epoch
if justified.Epoch > currStoreJustifiedEpoch || (justified.Epoch == postStateJustifiedEpoch && justified.Epoch > preStateJustifiedEpoch) {
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, &ethpb.Checkpoint{
Epoch: justified.Epoch, Root: justified.Root[:],
}); err != nil {
return err
}
}
// Save finalized check point to db and more.
postStateFinalizedEpoch := postState.FinalizedCheckpoint().Epoch
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
if finalized.Epoch > currStoreFinalizedEpoch || (finalized.Epoch == postStateFinalizedEpoch && finalized.Epoch > preStateFinalizedEpoch) {
if err := s.updateFinalized(ctx, &ethpb.Checkpoint{Epoch: finalized.Epoch, Root: finalized.Root[:]}); err != nil {
return err
}
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(finalized.Root)
if err != nil {
return errors.Wrap(err, "could not check if node is optimistically synced")
}
go func() {
// Send an event regarding the new finalized checkpoint over a common event feed.
stateRoot := signed.Block().StateRoot()
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.FinalizedCheckpoint,
Data: &ethpbv1.EventFinalizedCheckpoint{
Epoch: postState.FinalizedCheckpoint().Epoch,
Block: postState.FinalizedCheckpoint().Root,
State: stateRoot[:],
ExecutionOptimistic: isOptimistic,
},
})
// Use a custom deadline here, since this method runs asynchronously.
// We ignore the parent method's context and instead create a new one
// with a custom deadline, therefore using the background context instead.
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
defer cancel()
if err := s.insertFinalizedDeposits(depCtx, finalized.Root); err != nil {
log.WithError(err).Error("Could not insert finalized deposits.")
}
}()
}
defer reportAttestationInclusion(b)
if err := s.handleEpochBoundary(ctx, postState); err != nil {
return err
}
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
return nil
}
@@ -161,14 +310,19 @@ func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionD
return preStateVersion, preStateHeader, nil
}
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock) error {
func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySignedBeaconBlock,
blockRoots [][32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
defer span.End()
if len(blks) == 0 {
if len(blks) == 0 || len(blockRoots) == 0 {
return errors.New("no blocks provided")
}
if len(blks) != len(blockRoots) {
return errWrongBlockCount
}
if err := consensusblocks.BeaconBlockIsNil(blks[0]); err != nil {
return invalidBlock{error: err}
}
@@ -218,7 +372,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
}
// Save potential boundary states.
if slots.IsEpochStart(preState.Slot()) {
boundaries[b.Root()] = preState.Copy()
boundaries[blockRoots[i]] = preState.Copy()
}
jCheckpoints[i] = preState.CurrentJustifiedCheckpoint()
fCheckpoints[i] = preState.FinalizedCheckpoint()
@@ -251,12 +405,11 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, len(blks))
var isValidPayload bool
for i, b := range blks {
root := b.Root()
isValidPayload, err = s.notifyNewPayload(ctx,
postVersionAndHeaders[i].version,
postVersionAndHeaders[i].header, b)
if err != nil {
return s.handleInvalidExecutionError(ctx, err, root, b.Block().ParentRoot())
return err
}
if isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, preVersionAndHeaders[i].version,
@@ -268,13 +421,13 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
JustifiedCheckpoint: jCheckpoints[i],
FinalizedCheckpoint: fCheckpoints[i]}
pendingNodes[len(blks)-i-1] = args
if err := s.saveInitSyncBlock(ctx, root, b); err != nil {
if err := s.saveInitSyncBlock(ctx, blockRoots[i], b); err != nil {
tracing.AnnotateError(span, err)
return err
}
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{
Slot: b.Block().Slot(),
Root: root[:],
Root: blockRoots[i][:],
}); err != nil {
tracing.AnnotateError(span, err)
return err
@@ -298,9 +451,8 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
return err
}
}
lastB := blks[len(blks)-1]
lastBR := lastB.Root()
// Also saves the last post state which to be used as pre state for the next batch.
lastBR := blockRoots[len(blks)-1]
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
return err
}
@@ -318,6 +470,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
return errors.Wrap(err, "could not set optimistic block to valid")
}
}
lastB := blks[len(blks)-1]
arg := &notifyForkchoiceUpdateArg{
headState: preState,
headRoot: lastBR,
@@ -329,48 +482,67 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
return s.saveHeadNoDB(ctx, lastB, lastBR, preState)
}
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
e := coreTime.CurrentEpoch(st)
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
return errors.Wrap(err, "could not update committee cache")
}
if err := helpers.UpdateProposerIndicesInCache(ctx, st, e); err != nil {
return errors.Wrap(err, "could not update proposer index cache")
}
go func() {
// Use a custom deadline here, since this method runs asynchronously.
// We ignore the parent method's context and instead create a new one
// with a custom deadline, therefore using the background context instead.
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
if err := helpers.UpdateCommitteeCache(slotCtx, st, e+1); err != nil {
log.WithError(err).Warn("Could not update committee cache")
// Epoch boundary bookkeeping such as logging epoch summaries.
func (s *Service) handleEpochBoundary(ctx context.Context, postState state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
defer span.End()
var err error
if postState.Slot()+1 == s.nextEpochBoundarySlot {
copied := postState.Copy()
copied, err := transition.ProcessSlots(ctx, copied, copied.Slot()+1)
if err != nil {
return err
}
if err := helpers.UpdateProposerIndicesInCache(slotCtx, st, e+1); err != nil {
log.WithError(err).Warn("Failed to cache next epoch proposers")
// Update caches for the next epoch at epoch boundary slot - 1.
if err := helpers.UpdateCommitteeCache(ctx, copied, coreTime.CurrentEpoch(copied)); err != nil {
return err
}
}()
if err := helpers.UpdateProposerIndicesInCache(ctx, copied); err != nil {
return err
}
} else if postState.Slot() >= s.nextEpochBoundarySlot {
s.nextEpochBoundarySlot, err = slots.EpochStart(coreTime.NextEpoch(postState))
if err != nil {
return err
}
// Update caches at epoch boundary slot.
// The following updates have shortcut to return nil cheaply if fulfilled during boundary slot - 1.
if err := helpers.UpdateCommitteeCache(ctx, postState, coreTime.CurrentEpoch(postState)); err != nil {
return err
}
if err := helpers.UpdateProposerIndicesInCache(ctx, postState); err != nil {
return err
}
headSt, err := s.HeadState(ctx)
if err != nil {
return err
}
if err := reportEpochMetrics(ctx, postState, headSt); err != nil {
return err
}
}
return nil
}
// Epoch boundary tasks: it copies the headState and updates the epoch boundary
// caches.
func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.BeaconState, blockRoot []byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
// This feeds in the block to fork choice store. It's allows fork choice store
// to gain information on the most current chain.
func (s *Service) insertBlockToForkchoiceStore(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, root [32]byte, st state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.insertBlockToForkchoiceStore")
defer span.End()
// return early if we are advancing to a past epoch
if slot < headState.Slot() {
return nil
if !s.cfg.ForkChoiceStore.HasNode(blk.ParentRoot()) {
fCheckpoint := st.FinalizedCheckpoint()
jCheckpoint := st.CurrentJustifiedCheckpoint()
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil {
return err
}
}
if !slots.IsEpochEnd(slot) {
return nil
}
copied := headState.Copy()
copied, err := transition.ProcessSlotsUsingNextSlotCache(ctx, copied, blockRoot, slot+1)
if err != nil {
return err
}
return s.updateEpochBoundaryCaches(ctx, copied)
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, root)
}
// This feeds in the attestations included in the block to fork choice store. It's allows fork choice store
@@ -479,58 +651,33 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
// This routine checks if there is a cached proposer payload ID available for the next slot proposer.
// If there is not, it will call forkchoice updated with the correct payload attribute then cache the payload ID.
func (s *Service) runLateBlockTasks() {
if err := s.waitForSync(); err != nil {
log.WithError(err).Error("failed to wait for initial sync")
return
}
attThreshold := params.BeaconConfig().SecondsPerSlot / 3
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, time.Duration(attThreshold)*time.Second, params.BeaconConfig().SecondsPerSlot)
for {
select {
case <-ticker.C():
s.lateBlockTasks(s.ctx)
case <-s.ctx.Done():
log.Debug("Context closed, exiting routine")
func (s *Service) spawnLateBlockTasksLoop() {
go func() {
_, err := s.clockWaiter.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Error("spawnLateBlockTasksLoop encountered an error waiting for initialization")
return
}
}
}
attThreshold := params.BeaconConfig().SecondsPerSlot / 3
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, time.Duration(attThreshold)*time.Second, params.BeaconConfig().SecondsPerSlot)
for {
select {
case <-ticker.C():
s.lateBlockTasks(s.ctx)
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
if signed.Version() < version.Deneb {
return nil
}
block := signed.Block()
if block == nil {
return errors.New("invalid nil beacon block")
}
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
if slots.ToEpoch(block.Slot())+params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest > primitives.Epoch(s.CurrentSlot()) {
return nil
}
body := block.Body()
if body == nil {
return errors.New("invalid nil beacon block body")
}
kzgCommitments, err := body.BlobKzgCommitments()
if err != nil {
return errors.Wrap(err, "could not get KZG commitments")
}
sidecars, err := s.cfg.BeaconDB.BlobSidecarsByRoot(ctx, root)
if err != nil {
return errors.Wrap(err, "could not get blob sidecars")
}
return kzg.IsDataAvailable(kzgCommitments, sidecars)
case <-s.ctx.Done():
log.Debug("Context closed, exiting routine")
return
}
}
}()
}
// lateBlockTasks is called 4 seconds into the slot and performs tasks
// related to late blocks. It emits a MissedSlot state feed event.
// It calls FCU and sets the right attributes if we are proposing next slot
// it also updates the next slot cache and the proposer index cache to deal with skipped slots.
// it also updates the next slot cache to deal with skipped slots.
func (s *Service) lateBlockTasks(ctx context.Context) {
currentSlot := s.CurrentSlot()
if s.CurrentSlot() == s.HeadSlot() {
return
}
@@ -538,30 +685,12 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
Type: statefeed.MissedSlot,
})
s.headLock.RLock()
headRoot := s.headRoot()
headState := s.headState(ctx)
s.headLock.RUnlock()
lastRoot, lastState := transition.LastCachedState()
if lastState == nil {
lastRoot, lastState = headRoot[:], headState
}
// Copy all the field tries in our cached state in the event of late
// blocks.
lastState.CopyAllTries()
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("could not update next slot state cache")
}
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
log.WithError(err).Error("lateBlockTasks: could not update epoch boundary caches")
}
// Head root should be empty when retrieving proposer index for the next slot.
_, id, has := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(s.CurrentSlot()+1, [32]byte{} /* head root */)
// There exists proposer for next slot, but we haven't called fcu w/ payload attribute yet.
if (!has && !features.Get().PrepareAllPayloads) || id != [8]byte{} {
return
}
s.headLock.RLock()
headBlock, err := s.headBlock()
if err != nil {
@@ -569,6 +698,8 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve head block")
return
}
headRoot := s.headRoot()
headState := s.headState(ctx)
s.headLock.RUnlock()
_, err = s.notifyForkchoiceUpdate(ctx, &notifyForkchoiceUpdateArg{
headState: headState,
@@ -578,21 +709,11 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
if err != nil {
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
}
}
// waitForSync blocks until the node is synced to the head.
func (s *Service) waitForSync() error {
select {
case <-s.syncComplete:
return nil
case <-s.ctx.Done():
return errors.New("context closed, exiting goroutine")
lastRoot, lastState := transition.LastCachedState()
if lastState == nil {
lastRoot, lastState = headRoot[:], headState
}
if err = transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("could not update next slot state cache")
}
}
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot [32]byte, parentRoot [32]byte) error {
if IsInvalidBlock(err) && InvalidBlockLVH(err) != [32]byte{} {
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, InvalidBlockLVH(err))
}
return err
}

View File

@@ -14,7 +14,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
mathutil "github.com/prysmaticlabs/prysm/v4/math"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/time"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"go.opencensus.io/trace"
)
@@ -210,44 +209,35 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
return s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes)
}
// inserts finalized deposits into our finalized deposit trie, needs to be
// called in the background
func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) {
// inserts finalized deposits into our finalized deposit trie.
func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.insertFinalizedDeposits")
defer span.End()
startTime := time.Now()
// Update deposit cache.
finalizedState, err := s.cfg.StateGen.StateByRoot(ctx, fRoot)
if err != nil {
log.WithError(err).Error("could not fetch finalized state")
return
return errors.Wrap(err, "could not fetch finalized state")
}
// We update the cache up to the last deposit index in the finalized block's state.
// We can be confident that these deposits will be included in some block
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
eth1DepositIndex, err := mathutil.Int(finalizedState.Eth1DepositIndex())
if err != nil {
log.WithError(err).Error("could not cast eth1 deposit index")
return
return errors.Wrap(err, "could not cast eth1 deposit index")
}
// The deposit index in the state is always the index of the next deposit
// to be included(rather than the last one to be processed). This was most likely
// done as the state cannot represent signed integers.
finalizedEth1DepIdx := eth1DepositIndex - 1
if err = s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(finalizedEth1DepIdx)); err != nil {
log.WithError(err).Error("could not insert finalized deposits")
return
eth1DepositIndex -= 1
if err = s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(eth1DepositIndex)); err != nil {
return err
}
// Deposit proofs are only used during state transition and can be safely removed to save space.
if err = s.cfg.DepositCache.PruneProofs(ctx, int64(finalizedEth1DepIdx)); err != nil {
log.WithError(err).Error("could not prune deposit proofs")
if err = s.cfg.DepositCache.PruneProofs(ctx, int64(eth1DepositIndex)); err != nil {
return errors.Wrap(err, "could not prune deposit proofs")
}
// Prune deposits which have already been finalized, the below method prunes all pending deposits (non-inclusive) up
// to the provided eth1 deposit index.
s.cfg.DepositCache.PrunePendingDeposits(ctx, int64(eth1DepositIndex)) // lint:ignore uintcast -- Deposit index should not exceed int64 in your lifetime.
log.WithField("duration", time.Since(startTime).String()).Debug("Finalized deposit insertion completed")
return nil
}
// This ensures that the input root defaults to using genesis root instead of zero hashes. This is needed for handling

View File

@@ -41,6 +41,103 @@ import (
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestStore_OnBlock(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
var genesisStateRoot [32]byte
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
util.SaveBlock(t, ctx, beaconDB, genesis)
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
ojc := &ethpb.Checkpoint{}
stfcs, root, err := prepareForkchoiceState(ctx, 0, validGenesisRoot, [32]byte{}, [32]byte{}, ojc, ojc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, stfcs, root))
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
require.NoError(t, err)
random := util.NewBeaconBlock()
random.Block.Slot = 1
random.Block.ParentRoot = validGenesisRoot[:]
util.SaveBlock(t, ctx, beaconDB, random)
randomParentRoot, err := random.Block.HashTreeRoot()
assert.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), randomParentRoot))
randomParentRoot2 := roots[1]
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot2}))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), bytesutil.ToBytes32(randomParentRoot2)))
stfcs, root, err = prepareForkchoiceState(ctx, 2, bytesutil.ToBytes32(randomParentRoot2),
validGenesisRoot, [32]byte{'r'}, ojc, ojc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, stfcs, root))
tests := []struct {
name string
blk *ethpb.SignedBeaconBlock
s state.BeaconState
time uint64
wantErrString string
}{
{
name: "parent block root does not have a state",
blk: util.NewBeaconBlock(),
s: st.Copy(),
wantErrString: "could not reconstruct parent state",
},
{
name: "block is from the future",
blk: func() *ethpb.SignedBeaconBlock {
b := util.NewBeaconBlock()
b.Block.ParentRoot = randomParentRoot2
b.Block.Slot = params.BeaconConfig().FarFutureSlot
return b
}(),
s: st.Copy(),
wantErrString: "is in the far distant future",
},
{
name: "could not get finalized block",
blk: func() *ethpb.SignedBeaconBlock {
b := util.NewBeaconBlock()
b.Block.ParentRoot = randomParentRoot[:]
b.Block.Slot = 2
return b
}(),
s: st.Copy(),
wantErrString: "not descendant of finalized checkpoint",
},
{
name: "same slot as finalized block",
blk: func() *ethpb.SignedBeaconBlock {
b := util.NewBeaconBlock()
b.Block.Slot = 0
b.Block.ParentRoot = randomParentRoot2
return b
}(),
s: st.Copy(),
wantErrString: "block is equal or earlier than finalized block, slot 0 < slot 0",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fRoot := bytesutil.ToBytes32(roots[0])
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: fRoot}))
root, err := tt.blk.Block.HashTreeRoot()
assert.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(tt.blk)
require.NoError(t, err)
err = service.onBlock(ctx, wsb, root)
assert.ErrorContains(t, tt.wantErrString, err)
})
}
}
func TestStore_OnBlockBatch(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
@@ -49,7 +146,8 @@ func TestStore_OnBlockBatch(t *testing.T) {
require.NoError(t, service.saveGenesisData(ctx, st))
bState := st.Copy()
var blks []consensusblocks.ROBlock
var blks []interfaces.ReadOnlySignedBeaconBlock
var blkRoots [][32]byte
for i := 0; i < 97; i++ {
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
require.NoError(t, err)
@@ -62,15 +160,16 @@ func TestStore_OnBlockBatch(t *testing.T) {
require.NoError(t, service.saveInitSyncBlock(ctx, root, wsb))
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
rwsb, err := consensusblocks.NewROBlock(wsb)
require.NoError(t, err)
blks = append(blks, rwsb)
blks = append(blks, wsb)
blkRoots = append(blkRoots, root)
}
err := service.onBlockBatch(ctx, blks)
err := service.onBlockBatch(ctx, blks, blkRoots[1:])
require.ErrorIs(t, errWrongBlockCount, err)
err = service.onBlockBatch(ctx, blks, blkRoots)
require.NoError(t, err)
jcp := service.CurrentJustifiedCheckpt()
jroot := bytesutil.ToBytes32(jcp.Root)
require.Equal(t, blks[63].Root(), jroot)
require.Equal(t, blkRoots[63], jroot)
require.Equal(t, primitives.Epoch(2), service.cfg.ForkChoiceStore.JustifiedCheckpoint().Epoch)
}
@@ -82,7 +181,8 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
require.NoError(t, service.saveGenesisData(ctx, st))
bState := st.Copy()
var blks []consensusblocks.ROBlock
var blks []interfaces.ReadOnlySignedBeaconBlock
var blkRoots [][32]byte
blkCount := 4
for i := 0; i <= blkCount; i++ {
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
@@ -91,12 +191,13 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
require.NoError(t, err)
bState, err = transition.ExecuteStateTransition(ctx, bState, wsb)
require.NoError(t, err)
rwsb, err := consensusblocks.NewROBlock(wsb)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.saveInitSyncBlock(ctx, rwsb.Root(), wsb))
blks = append(blks, rwsb)
require.NoError(t, service.saveInitSyncBlock(ctx, root, wsb))
blks = append(blks, wsb)
blkRoots = append(blkRoots, root)
}
require.NoError(t, service.onBlockBatch(ctx, blks))
require.NoError(t, service.onBlockBatch(ctx, blks, blkRoots))
}
func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
@@ -535,7 +636,8 @@ func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 1024)
service.head = &head{state: s}
require.NoError(t, s.SetSlot(2*params.BeaconConfig().SlotsPerEpoch))
require.NoError(t, service.handleEpochBoundary(ctx, s.Slot(), s, []byte{}))
require.NoError(t, service.handleEpochBoundary(ctx, s))
require.Equal(t, 3*params.BeaconConfig().SlotsPerEpoch, service.nextEpochBoundarySlot)
}
func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
@@ -555,20 +657,7 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, fcs.NewSlot(ctx, i))
// Save current justified and finalized epochs for future use.
currStoreJustifiedEpoch := service.CurrentJustifiedCheckpt().Epoch
currStoreFinalizedEpoch := service.FinalizedCheckpt().Epoch
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, r, postState, true))
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
require.NoError(t, err)
require.NoError(t, service.onBlock(ctx, wsb, r))
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
require.NoError(t, err)
}
@@ -603,20 +692,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
// Save current justified and finalized epochs for future use.
currStoreJustifiedEpoch := service.CurrentJustifiedCheckpt().Epoch
currStoreFinalizedEpoch := service.FinalizedCheckpt().Epoch
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, r, postState, true))
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
require.NoError(t, err)
require.NoError(t, service.onBlock(ctx, wsb, r))
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
require.NoError(t, err)
}
@@ -638,7 +714,8 @@ func TestOnBlock_CanFinalize(t *testing.T) {
func TestOnBlock_NilBlock(t *testing.T) {
service, tr := minimalTestService(t)
err := service.postBlockProcess(tr.ctx, nil, [32]byte{}, nil, true)
err := service.onBlock(tr.ctx, nil, [32]byte{})
require.Equal(t, true, IsInvalidBlock(err))
}
@@ -652,11 +729,11 @@ func TestOnBlock_InvalidSignature(t *testing.T) {
blk, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
blk.Signature = []byte{'a'} // Mutate the signature.
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
_, err = service.validateStateTransition(ctx, preState, wsb)
err = service.onBlock(ctx, wsb, r)
require.Equal(t, true, IsInvalidBlock(err))
}
@@ -680,13 +757,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, r, postState, false))
require.NoError(t, service.onBlock(ctx, wsb, r))
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
require.NoError(t, err)
}
@@ -712,7 +783,7 @@ func TestInsertFinalizedDeposits(t *testing.T) {
Signature: zeroSig[:],
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root)))
}
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'})
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
fDeposits := depositCache.FinalizedDeposits(ctx)
assert.Equal(t, 7, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
deps := depositCache.AllDeposits(ctx, big.NewInt(107))
@@ -721,45 +792,6 @@ func TestInsertFinalizedDeposits(t *testing.T) {
}
}
func TestInsertFinalizedDeposits_PrunePendingDeposits(t *testing.T) {
service, tr := minimalTestService(t)
ctx, depositCache := tr.ctx, tr.dc
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
gs = gs.Copy()
assert.NoError(t, gs.SetEth1Data(&ethpb.Eth1Data{DepositCount: 10}))
assert.NoError(t, gs.SetEth1DepositIndex(8))
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
var zeroSig [96]byte
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
root := []byte(strconv.Itoa(int(i)))
assert.NoError(t, depositCache.InsertDeposit(ctx, &ethpb.Deposit{Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.FromBytes48([fieldparams.BLSPubkeyLength]byte{}),
WithdrawalCredentials: params.BeaconConfig().ZeroHash[:],
Amount: 0,
Signature: zeroSig[:],
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root)))
depositCache.InsertPendingDeposit(ctx, &ethpb.Deposit{Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.FromBytes48([fieldparams.BLSPubkeyLength]byte{}),
WithdrawalCredentials: params.BeaconConfig().ZeroHash[:],
Amount: 0,
Signature: zeroSig[:],
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root))
}
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'})
fDeposits := depositCache.FinalizedDeposits(ctx)
assert.Equal(t, 7, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
deps := depositCache.AllDeposits(ctx, big.NewInt(107))
for _, d := range deps {
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
}
pendingDeps := depositCache.PendingContainers(ctx, nil)
for _, d := range pendingDeps {
assert.DeepEqual(t, true, d.Index >= 8, "Pending deposits were not pruned")
}
}
func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
service, tr := minimalTestService(t)
ctx, depositCache := tr.ctx, tr.dc
@@ -787,7 +819,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
// Insert 3 deposits before hand.
require.NoError(t, depositCache.InsertFinalizedDeposits(ctx, 2))
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'})
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
fDeposits := depositCache.FinalizedDeposits(ctx)
assert.Equal(t, 5, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
@@ -797,7 +829,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
}
// Insert New Finalized State with higher deposit count.
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k', '2'})
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k', '2'}))
fDeposits = depositCache.FinalizedDeposits(ctx)
assert.Equal(t, 12, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
deps = depositCache.AllDeposits(ctx, big.NewInt(112))
@@ -1099,35 +1131,19 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
var wg sync.WaitGroup
wg.Add(4)
go func() {
preState, err := service.getBlockPreState(ctx, wsb1.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb1)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(ctx, wsb1, r1, postState, true))
require.NoError(t, service.onBlock(ctx, wsb1, r1))
wg.Done()
}()
go func() {
preState, err := service.getBlockPreState(ctx, wsb2.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb2)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(ctx, wsb2, r2, postState, true))
require.NoError(t, service.onBlock(ctx, wsb2, r2))
wg.Done()
}()
go func() {
preState, err := service.getBlockPreState(ctx, wsb3.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb3)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(ctx, wsb3, r3, postState, true))
require.NoError(t, service.onBlock(ctx, wsb3, r3))
wg.Done()
}()
go func() {
preState, err := service.getBlockPreState(ctx, wsb4.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb4)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(ctx, wsb4, r4, postState, true))
require.NoError(t, service.onBlock(ctx, wsb4, r4))
wg.Done()
}()
wg.Wait()
@@ -1195,13 +1211,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
require.NoError(t, service.onBlock(ctx, wsb, root))
}
for i := 6; i < 12; i++ {
@@ -1214,12 +1224,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, false)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
}
@@ -1233,12 +1238,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, false)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
}
// Check that we haven't justified the second epoch yet
@@ -1255,12 +1255,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
require.NoError(t, err)
firstInvalidRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
err = service.postBlockProcess(ctx, wsb, firstInvalidRoot, postState, false)
err = service.onBlock(ctx, wsb, firstInvalidRoot)
require.NoError(t, err)
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
require.Equal(t, primitives.Epoch(2), jc.Epoch)
@@ -1283,12 +1278,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, false)
err = service.onBlock(ctx, wsb, root)
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
// Check that forkchoice's head is the last invalid block imported. The
// store's headroot is the previous head (since the invalid block did
@@ -1311,13 +1301,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, true)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
// Check the newly imported block is head, it justified the right
// checkpoint and the node is no longer optimistic
@@ -1374,12 +1358,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
require.NoError(t, service.onBlock(ctx, wsb, root))
}
for i := 6; i < 12; i++ {
@@ -1392,12 +1371,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, false)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
}
@@ -1411,13 +1385,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, false)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
}
// Check that we haven't justified the second epoch yet
@@ -1434,12 +1402,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, err)
firstInvalidRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
err = service.postBlockProcess(ctx, wsb, firstInvalidRoot, postState, false)
err = service.onBlock(ctx, wsb, firstInvalidRoot)
require.NoError(t, err)
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
require.Equal(t, primitives.Epoch(2), jc.Epoch)
@@ -1462,12 +1425,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
require.NoError(t, err)
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
err = service.onBlock(ctx, wsb, root)
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
// not finish importing and it was never imported to forkchoice). Check
@@ -1490,12 +1448,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, true)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
// Check the newly imported block is head, it justified the right
// checkpoint and the node is no longer optimistic
@@ -1553,13 +1506,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
require.NoError(t, service.onBlock(ctx, wsb, root))
}
for i := 6; i < 12; i++ {
@@ -1572,13 +1519,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, false)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
}
@@ -1592,12 +1533,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
lastValidRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
err = service.postBlockProcess(ctx, wsb, lastValidRoot, postState, false)
err = service.onBlock(ctx, wsb, lastValidRoot)
require.NoError(t, err)
// save the post state and the payload Hash of this block since it will
// be the LVH
@@ -1619,12 +1555,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
invalidRoots[i-13], err = b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState))
err = service.postBlockProcess(ctx, wsb, invalidRoots[i-13], postState, false)
err = service.onBlock(ctx, wsb, invalidRoots[i-13])
require.NoError(t, err)
}
// Check that we have justified the second epoch
@@ -1645,12 +1576,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
require.NoError(t, err)
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
err = service.onBlock(ctx, wsb, root)
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
@@ -1684,12 +1610,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, true))
require.NoError(t, service.onBlock(ctx, wsb, root))
// Check that the head is still INVALID and the node is still optimistic
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
optimistic, err = service.IsOptimistic(ctx)
@@ -1707,12 +1628,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, true)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
require.NoError(t, err)
@@ -1732,13 +1648,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, true)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
sjc = service.CurrentJustifiedCheckpt()
@@ -1789,12 +1699,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
require.NoError(t, service.onBlock(ctx, wsb, root))
}
for i := 6; i < 12; i++ {
@@ -1807,12 +1712,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, false)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
}
@@ -1826,12 +1726,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, err)
lastValidRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
err = service.postBlockProcess(ctx, wsb, lastValidRoot, postState, false)
err = service.onBlock(ctx, wsb, lastValidRoot)
require.NoError(t, err)
// save the post state and the payload Hash of this block since it will
// be the LVH
@@ -1852,18 +1747,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
// Save current justified and finalized epochs for future use.
currStoreJustifiedEpoch := service.CurrentJustifiedCheckpt().Epoch
currStoreFinalizedEpoch := service.FinalizedCheckpt().Epoch
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
require.NoError(t, err)
require.NoError(t, service.onBlock(ctx, wsb, root))
}
// Check that we have justified the second epoch
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
@@ -1882,11 +1766,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
require.NoError(t, err)
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
err = service.onBlock(ctx, wsb, root)
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
// Check that the headroot/state are not in DB and restart the node
@@ -1928,10 +1808,8 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
rwsb, err := consensusblocks.NewROBlock(wsb)
require.NoError(t, err)
// We use onBlockBatch here because the valid chain is missing in forkchoice
require.NoError(t, service.onBlockBatch(ctx, []consensusblocks.ROBlock{rwsb}))
require.NoError(t, service.onBlockBatch(ctx, []interfaces.ReadOnlySignedBeaconBlock{wsb}, [][32]byte{root}))
// Check that the head is now VALID and the node is not optimistic
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot()))
headRoot, err = service.HeadRoot(ctx)
@@ -1970,12 +1848,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
require.NoError(t, service.onBlock(ctx, wsb, root))
st, err = service.HeadState(ctx)
require.NoError(t, err)

View File

@@ -86,30 +86,23 @@ func (s *Service) spawnProcessAttestationsRoutine() {
}
log.Warn("Genesis time received, now available to process attestations")
}
// Wait for node to be synced before running the routine.
if err := s.waitForSync(); err != nil {
log.WithError(err).Error("Could not wait to sync")
return
}
reorgInterval := time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot) - reorgLateBlockCountAttestations
ticker := slots.NewSlotTickerWithIntervals(s.genesisTime, []time.Duration{0, reorgInterval})
st := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
pat := slots.NewSlotTickerWithOffset(s.genesisTime, -reorgLateBlockCountAttestations, params.BeaconConfig().SecondsPerSlot)
for {
select {
case <-s.ctx.Done():
return
case slotInterval := <-ticker.C():
if slotInterval.Interval > 0 {
s.UpdateHead(s.ctx, slotInterval.Slot+1)
} else {
s.cfg.ForkChoiceStore.Lock()
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
log.WithError(err).Error("could not process new slot")
}
s.cfg.ForkChoiceStore.Unlock()
s.UpdateHead(s.ctx, slotInterval.Slot)
case <-pat.C():
s.UpdateHead(s.ctx, s.CurrentSlot()+1)
case <-st.C():
s.cfg.ForkChoiceStore.Lock()
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, s.CurrentSlot()); err != nil {
log.WithError(err).Error("could not process new slot")
}
s.cfg.ForkChoiceStore.Unlock()
s.UpdateHead(s.ctx, s.CurrentSlot())
}
}
}()

View File

@@ -128,13 +128,7 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, tRoot, postState, false))
require.NoError(t, service.onBlock(ctx, wsb, tRoot))
copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot)
require.NoError(t, err)
require.Equal(t, 2, fcs.NodeCount())
@@ -184,13 +178,7 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, tRoot, postState, false))
require.NoError(t, service.onBlock(ctx, wsb, tRoot))
require.Equal(t, 2, fcs.NodeCount())
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
require.Equal(t, tRoot, service.head.root)

View File

@@ -7,24 +7,15 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"go.opencensus.io/trace"
"golang.org/x/sync/errgroup"
)
// This defines how many epochs since finality the run time will begin to save hot state on to the DB.
@@ -33,9 +24,8 @@ var epochsSinceFinalitySaveHotStateDB = primitives.Epoch(100)
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
type BlockReceiver interface {
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock) error
ReceiveBlockBatch(ctx context.Context, blocks []interfaces.ReadOnlySignedBeaconBlock, blkRoots [][32]byte) error
HasBlock(ctx context.Context, root [32]byte) bool
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
}
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
@@ -57,85 +47,14 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
return err
}
preState, err := s.getBlockPreState(ctx, blockCopy.Block())
if err != nil {
return errors.Wrap(err, "could not get block's prestate")
}
// Save current justified and finalized epochs for future use.
currStoreJustifiedEpoch := s.CurrentJustifiedCheckpt().Epoch
currStoreFinalizedEpoch := s.FinalizedCheckpt().Epoch
currentEpoch := coreTime.CurrentEpoch(preState)
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
if err != nil {
return err
}
eg, _ := errgroup.WithContext(ctx)
var postState state.BeaconState
eg.Go(func() error {
postState, err = s.validateStateTransition(ctx, preState, blockCopy)
if err != nil {
return errors.Wrap(err, "failed to validate consensus state transition function")
}
return nil
})
var isValidPayload bool
eg.Go(func() error {
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, blockCopy, blockRoot)
if err != nil {
return errors.Wrap(err, "could not notify the engine of the new payload")
}
return nil
})
if err := eg.Wait(); err != nil {
return err
}
if err := s.isDataAvailable(ctx, blockRoot, blockCopy); err != nil {
return errors.Wrap(err, "could not validate blob data availability")
}
// The rest of block processing takes a lock on forkchoice.
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
if err := s.savePostStateInfo(ctx, blockRoot, blockCopy, postState); err != nil {
return errors.Wrap(err, "could not save post state info")
}
if err := s.postBlockProcess(ctx, blockCopy, blockRoot, postState, isValidPayload); err != nil {
// Apply state transition on the new block.
if err := s.onBlock(ctx, blockCopy, blockRoot); err != nil {
err := errors.Wrap(err, "could not process block")
tracing.AnnotateError(span, err)
return err
}
if coreTime.CurrentEpoch(postState) > currentEpoch {
headSt, err := s.HeadState(ctx)
if err != nil {
return errors.Wrap(err, "could not get head state")
}
if err := reportEpochMetrics(ctx, postState, headSt); err != nil {
log.WithError(err).Error("could not report epoch metrics")
}
}
if err := s.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch); err != nil {
return errors.Wrap(err, "could not update justified checkpoint")
}
newFinalized, err := s.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
if err != nil {
return errors.Wrap(err, "could not update finalized checkpoint")
}
// Send finalized events and finalized deposits in the background
if newFinalized {
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
go s.sendNewFinalizedEvent(blockCopy, postState)
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
go func() {
s.insertFinalizedDeposits(depCtx, finalized.Root)
cancel()
}()
}
// If slasher is configured, forward the attestations in the block via an event feed for processing.
if features.Get().EnableSlasher {
go s.sendBlockAttestationsToSlasher(blockCopy, preState)
}
// Handle post block operations such as pruning exits and bls messages if incoming block is the head
if err := s.prunePostBlockOperationPools(ctx, blockCopy, blockRoot); err != nil {
@@ -167,15 +86,13 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
log.WithError(err).Error("Unable to log state transition data")
}
chainServiceProcessingTime.Observe(float64(time.Since(receivedTime).Milliseconds()))
return nil
}
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
// the state, performing batch verification of all collected signatures and then performing the appropriate
// actions for a block post-transition.
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock) error {
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.ReadOnlySignedBeaconBlock, blkRoots [][32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch")
defer span.End()
@@ -183,31 +100,25 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock
defer s.cfg.ForkChoiceStore.Unlock()
// Apply state transition on the incoming newly received block batches, one by one.
if err := s.onBlockBatch(ctx, blocks); err != nil {
if err := s.onBlockBatch(ctx, blocks, blkRoots); err != nil {
err := errors.Wrap(err, "could not process block in batch")
tracing.AnnotateError(span, err)
return err
}
for _, b := range blocks {
for i, b := range blocks {
blockCopy, err := b.Copy()
if err != nil {
return err
}
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(b.Root())
if err != nil {
log.WithError(err).Error("Could not check if block is optimistic")
optimistic = true
}
// Send notification of the processed block to the state feed.
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
Slot: blockCopy.Block().Slot(),
BlockRoot: b.Root(),
BlockRoot: blkRoots[i],
SignedBlock: blockCopy,
Verified: true,
Optimistic: optimistic,
},
})
@@ -239,11 +150,6 @@ func (s *Service) HasBlock(ctx context.Context, root [32]byte) bool {
return s.hasBlockInInitSyncOrDB(ctx, root)
}
// RecentBlockSlot returns block slot form fork choice store
func (s *Service) RecentBlockSlot(root [32]byte) (primitives.Slot, error) {
return s.cfg.ForkChoiceStore.Slot(root)
}
// ReceiveAttesterSlashing receives an attester slashing and inserts it to forkchoice
func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing *ethpb.AttesterSlashing) {
s.cfg.ForkChoiceStore.Lock()
@@ -320,109 +226,3 @@ func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
return s.cfg.StateGen.DisableSaveHotStateToDB(ctx)
}
// This performs the state transition function and returns the poststate or an
// error if the block fails to verify the consensus rules
func (s *Service) validateStateTransition(ctx context.Context, preState state.BeaconState, signed interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
b := signed.Block()
// Verify that the parent block is in forkchoice
parentRoot := b.ParentRoot()
if !s.InForkchoice(parentRoot) {
return nil, ErrNotDescendantOfFinalized
}
stateTransitionStartTime := time.Now()
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
if err != nil {
return nil, invalidBlock{error: err}
}
stateTransitionProcessingTime.Observe(float64(time.Since(stateTransitionStartTime).Milliseconds()))
return postState, nil
}
// updateJustificationOnBlock updates the justified checkpoint on DB if the
// incoming block has updated it on forkchoice.
func (s *Service) updateJustificationOnBlock(ctx context.Context, preState, postState state.BeaconState, preJustifiedEpoch primitives.Epoch) error {
justified := s.cfg.ForkChoiceStore.JustifiedCheckpoint()
preStateJustifiedEpoch := preState.CurrentJustifiedCheckpoint().Epoch
postStateJustifiedEpoch := postState.CurrentJustifiedCheckpoint().Epoch
if justified.Epoch > preJustifiedEpoch || (justified.Epoch == postStateJustifiedEpoch && justified.Epoch > preStateJustifiedEpoch) {
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, &ethpb.Checkpoint{
Epoch: justified.Epoch, Root: justified.Root[:],
}); err != nil {
return err
}
}
return nil
}
// updateFinalizationOnBlock performs some duties when the incoming block
// changes the finalized checkpoint. It returns true when this has happened.
func (s *Service) updateFinalizationOnBlock(ctx context.Context, preState, postState state.BeaconState, preFinalizedEpoch primitives.Epoch) (bool, error) {
preStateFinalizedEpoch := preState.FinalizedCheckpoint().Epoch
postStateFinalizedEpoch := postState.FinalizedCheckpoint().Epoch
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
if finalized.Epoch > preFinalizedEpoch || (finalized.Epoch == postStateFinalizedEpoch && finalized.Epoch > preStateFinalizedEpoch) {
if err := s.updateFinalized(ctx, &ethpb.Checkpoint{Epoch: finalized.Epoch, Root: finalized.Root[:]}); err != nil {
return true, err
}
return true, nil
}
return false, nil
}
// sendNewFinalizedEvent sends a new finalization checkpoint event over the
// event feed. It needs to be called on the background
func (s *Service) sendNewFinalizedEvent(signed interfaces.ReadOnlySignedBeaconBlock, postState state.BeaconState) {
isValidPayload := false
s.headLock.RLock()
if s.head != nil {
isValidPayload = s.head.optimistic
}
s.headLock.RUnlock()
// Send an event regarding the new finalized checkpoint over a common event feed.
stateRoot := signed.Block().StateRoot()
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.FinalizedCheckpoint,
Data: &ethpbv1.EventFinalizedCheckpoint{
Epoch: postState.FinalizedCheckpoint().Epoch,
Block: postState.FinalizedCheckpoint().Root,
State: stateRoot[:],
ExecutionOptimistic: isValidPayload,
},
})
}
// sendBlockAttestationsToSlasher sends the incoming block's attestation to the slasher
func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySignedBeaconBlock, preState state.BeaconState) {
// Feed the indexed attestation to slasher if enabled. This action
// is done in the background to avoid adding more load to this critical code path.
ctx := context.TODO()
for _, att := range signed.Block().Body().Attestations() {
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, att.Data.Slot, att.Data.CommitteeIndex)
if err != nil {
log.WithError(err).Error("Could not get attestation committee")
return
}
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committee)
if err != nil {
log.WithError(err).Error("Could not convert to indexed attestation")
return
}
s.cfg.SlasherAttestationsFeed.Send(indexedAtt)
}
}
// validateExecutionOnBlock notifies the engine of the incoming block execution payload and returns true if the payload is valid
func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header interfaces.ExecutionData, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) (bool, error) {
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, signed)
if err != nil {
return false, s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
}
if signed.Version() < version.Capella && isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, ver, header, signed); err != nil {
return isValidPayload, err
}
}
return isValidPayload, nil
}

View File

@@ -10,6 +10,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
@@ -227,11 +228,13 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
s, _ := minimalTestService(t, WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}))
err := s.saveGenesisData(ctx, genesis)
require.NoError(t, err)
root, err := tt.args.block.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(tt.args.block)
require.NoError(t, err)
rwsb, err := blocks.NewROBlock(wsb)
require.NoError(t, err)
err = s.ReceiveBlockBatch(ctx, []blocks.ROBlock{rwsb})
blks := []interfaces.ReadOnlySignedBeaconBlock{wsb}
roots := [][32]byte{root}
err = s.ReceiveBlockBatch(ctx, blks, roots)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
} else {

View File

@@ -12,13 +12,11 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/async/event"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/kzg"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
@@ -36,6 +34,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
@@ -46,21 +45,21 @@ import (
// Service represents a service that handles the internal
// logic of managing the full PoS beacon chain.
type Service struct {
cfg *config
ctx context.Context
cancel context.CancelFunc
genesisTime time.Time
head *head
headLock sync.RWMutex
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
boundaryRoots [][32]byte
checkpointStateCache *cache.CheckpointStateCache
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
initSyncBlocksLock sync.RWMutex
wsVerifier *WeakSubjectivityVerifier
clockSetter startup.ClockSetter
clockWaiter startup.ClockWaiter
syncComplete chan struct{}
cfg *config
ctx context.Context
cancel context.CancelFunc
genesisTime time.Time
head *head
headLock sync.RWMutex
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
nextEpochBoundarySlot primitives.Slot
boundaryRoots [][32]byte
checkpointStateCache *cache.CheckpointStateCache
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
initSyncBlocksLock sync.RWMutex
wsVerifier *WeakSubjectivityVerifier
clockSetter startup.ClockSetter
clockWaiter startup.ClockWaiter
}
// config options for the service.
@@ -92,10 +91,6 @@ var ErrMissingClockSetter = errors.New("blockchain Service initialized without a
// NewService instantiates a new block service instance that will
// be registered into a running beacon node.
func NewService(ctx context.Context, opts ...Option) (*Service, error) {
err := kzg.Start()
if err != nil {
return nil, errors.Wrap(err, "could not initialize go-kzg context")
}
ctx, cancel := context.WithCancel(ctx)
srv := &Service{
ctx: ctx,
@@ -113,6 +108,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
if srv.clockSetter == nil {
return nil, ErrMissingClockSetter
}
var err error
srv.wsVerifier, err = NewWeakSubjectivityVerifier(srv.cfg.WeakSubjectivityCheckpt, srv.cfg.BeaconDB)
if err != nil {
return nil, err
@@ -134,7 +130,7 @@ func (s *Service) Start() {
}
}
s.spawnProcessAttestationsRoutine()
go s.runLateBlockTasks()
s.spawnLateBlockTasksLoop()
}
// Stop the blockchain service's main event loop and associated goroutines.
@@ -311,13 +307,7 @@ func (s *Service) initializeHeadFromDB(ctx context.Context) error {
if err != nil {
return errors.Wrap(err, "could not get finalized block")
}
if err := s.setHead(&head{
finalizedRoot,
finalizedBlock,
finalizedState,
finalizedBlock.Block().Slot(),
false,
}); err != nil {
if err := s.setHead(finalizedRoot, finalizedBlock, finalizedState); err != nil {
return errors.Wrap(err, "could not set head")
}
@@ -411,7 +401,7 @@ func (s *Service) initializeBeaconChain(
if err := helpers.UpdateCommitteeCache(ctx, genesisState, 0); err != nil {
return nil, err
}
if err := helpers.UpdateProposerIndicesInCache(ctx, genesisState, coreTime.CurrentEpoch(genesisState)); err != nil {
if err := helpers.UpdateProposerIndicesInCache(ctx, genesisState); err != nil {
return nil, err
}
@@ -449,13 +439,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
}
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
if err := s.setHead(&head{
genesisBlkRoot,
genesisBlk,
genesisState,
genesisBlk.Block().Slot(),
false,
}); err != nil {
if err := s.setHead(genesisBlkRoot, genesisBlk, genesisState); err != nil {
log.WithError(err).Fatal("Could not set head")
}
return nil

View File

@@ -377,7 +377,9 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
require.NoError(t, err)
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, r))
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, s.insertBlockToForkchoiceStore(ctx, wsb.Block(), r, beaconState))
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
@@ -451,7 +453,9 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
bs := &ethpb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}}
beaconState, err := state_native.InitializeFromProtoPhase0(bs)
require.NoError(b, err)
require.NoError(b, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, r))
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(b, err)
require.NoError(b, s.insertBlockToForkchoiceStore(ctx, wsb.Block(), r, beaconState))
b.ResetTimer()
for i := 0; i < b.N; i++ {

View File

@@ -52,11 +52,6 @@ func (mb *mockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ ui
return nil
}
func (mb *mockBroadcaster) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.SignedBlobSidecar) error {
mb.broadcastCalled = true
return nil
}
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
}

View File

@@ -23,7 +23,6 @@ go_library(
"//beacon-chain/state/state-native:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",

View File

@@ -22,7 +22,6 @@ import (
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
@@ -70,7 +69,6 @@ type ChainService struct {
OptimisticCheckRootReceived [32]byte
FinalizedRoots map[[32]byte]bool
OptimisticRoots map[[32]byte]bool
BlockSlot primitives.Slot
}
func (s *ChainService) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
@@ -205,7 +203,7 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block interf
}
// ReceiveBlockBatch processes blocks in batches from initial-sync.
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock) error {
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySignedBeaconBlock, _ [][32]byte) error {
if s.State == nil {
return ErrNilState
}
@@ -391,11 +389,6 @@ func (s *ChainService) HasBlock(ctx context.Context, rt [32]byte) bool {
return s.InitSyncBlockRoots[rt]
}
// RecentBlockSlot mocks the same method in the chain service.
func (s *ChainService) RecentBlockSlot([32]byte) (primitives.Slot, error) {
return s.BlockSlot, nil
}
// HeadGenesisValidatorsRoot mocks HeadGenesisValidatorsRoot method in chain service.
func (_ *ChainService) HeadGenesisValidatorsRoot() [32]byte {
return [32]byte{}
@@ -540,7 +533,7 @@ func (s *ChainService) GetProposerHead() [32]byte {
return [32]byte{}
}
// SetForkChoiceGenesisTime mocks the same method in the chain service
// SetForkchoiceGenesisTime mocks the same method in the chain service
func (s *ChainService) SetForkChoiceGenesisTime(timestamp uint64) {
if s.ForkChoiceStore != nil {
s.ForkChoiceStore.SetGenesisTime(timestamp)

View File

@@ -15,12 +15,10 @@ go_library(
"//beacon-chain/cache:go_default_library",
"//beacon-chain/db:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/fieldparams:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//monitoring/tracing:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",

View File

@@ -2,7 +2,6 @@ package builder
import (
"context"
"fmt"
"reflect"
"time"
@@ -11,12 +10,10 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
log "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
@@ -27,7 +24,7 @@ var ErrNoBuilder = errors.New("builder endpoint not configured")
// BlockBuilder defines the interface for interacting with the block builder
type BlockBuilder interface {
SubmitBlindedBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blobs []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error)
SubmitBlindedBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error)
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubKey [48]byte) (builder.SignedBid, error)
RegisterValidator(ctx context.Context, reg []*ethpb.SignedValidatorRegistrationV1) error
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
@@ -90,7 +87,7 @@ func (s *Service) Stop() error {
}
// SubmitBlindedBlock submits a blinded block to the builder relay network.
func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, blobs []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error) {
ctx, span := trace.StartSpan(ctx, "builder.SubmitBlindedBlock")
defer span.End()
start := time.Now()
@@ -98,13 +95,10 @@ func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlyS
submitBlindedBlockLatency.Observe(float64(time.Since(start).Milliseconds()))
}()
if s.c == nil {
return nil, nil, ErrNoBuilder
}
if uint64(len(blobs)) > fieldparams.MaxBlobsPerBlock {
return nil, nil, fmt.Errorf("blob count %d beyond max limit of %d", len(blobs), fieldparams.MaxBlobsPerBlock)
return nil, ErrNoBuilder
}
return s.c.SubmitBlindedBlock(ctx, b, blobs)
return s.c.SubmitBlindedBlock(ctx, b)
}
// GetHeader retrieves the header for a given slot and parent hash from the builder relay network.

View File

@@ -3,7 +3,6 @@ package builder
import (
"context"
"testing"
"time"
buildertesting "github.com/prysmaticlabs/prysm/v4/api/client/builder/testing"
blockchainTesting "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
@@ -39,21 +38,6 @@ func Test_RegisterValidator(t *testing.T) {
assert.Equal(t, true, builder.RegisteredVals[pubkey])
}
func Test_RegisterValidator_WithCache(t *testing.T) {
ctx := context.Background()
headFetcher := &blockchainTesting.ChainService{}
builder := buildertesting.NewClient()
s, err := NewService(ctx, WithRegistrationCache(), WithHeadFetcher(headFetcher), WithBuilderClient(&builder))
require.NoError(t, err)
pubkey := bytesutil.ToBytes48([]byte("pubkey"))
var feeRecipient [20]byte
reg := &eth.ValidatorRegistrationV1{Pubkey: pubkey[:], Timestamp: uint64(time.Now().UTC().Unix()), FeeRecipient: feeRecipient[:]}
require.NoError(t, s.RegisterValidator(ctx, []*eth.SignedValidatorRegistrationV1{{Message: reg}}))
registration, err := s.registrationCache.RegistrationByIndex(0)
require.NoError(t, err)
require.DeepEqual(t, reg, registration)
}
func Test_BuilderMethodsWithouClient(t *testing.T) {
s, err := NewService(context.Background())
require.NoError(t, err)
@@ -62,7 +46,7 @@ func Test_BuilderMethodsWithouClient(t *testing.T) {
_, err = s.GetHeader(context.Background(), 0, [32]byte{}, [48]byte{})
assert.ErrorContains(t, ErrNoBuilder.Error(), err)
_, _, err = s.SubmitBlindedBlock(context.Background(), nil, nil)
_, err = s.SubmitBlindedBlock(context.Background(), nil)
assert.ErrorContains(t, ErrNoBuilder.Error(), err)
err = s.RegisterValidator(context.Background(), nil)

View File

@@ -16,7 +16,6 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],

View File

@@ -13,7 +13,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
@@ -27,12 +26,9 @@ type MockBuilderService struct {
HasConfigured bool
Payload *v1.ExecutionPayload
PayloadCapella *v1.ExecutionPayloadCapella
PayloadDeneb *v1.ExecutionPayloadDeneb
BlobBundle *v1.BlobsBundle
ErrSubmitBlindedBlock error
Bid *ethpb.SignedBuilderBid
BidCapella *ethpb.SignedBuilderBidCapella
BidDeneb *ethpb.SignedBuilderBidDeneb
RegistrationCache *cache.RegistrationCache
ErrGetHeader error
ErrRegisterValidator error
@@ -45,36 +41,23 @@ func (s *MockBuilderService) Configured() bool {
}
// SubmitBlindedBlock for mocking.
func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.ReadOnlySignedBeaconBlock, _ []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
switch b.Version() {
case version.Bellatrix:
func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error) {
if s.Payload != nil {
w, err := blocks.WrappedExecutionPayload(s.Payload)
if err != nil {
return nil, nil, errors.Wrap(err, "could not wrap payload")
return nil, errors.Wrap(err, "could not wrap payload")
}
return w, nil, s.ErrSubmitBlindedBlock
case version.Capella:
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella, 0)
if err != nil {
return nil, nil, errors.Wrap(err, "could not wrap capella payload")
}
return w, nil, s.ErrSubmitBlindedBlock
case version.Deneb:
w, err := blocks.WrappedExecutionPayloadDeneb(s.PayloadDeneb, 0)
if err != nil {
return nil, nil, errors.Wrap(err, "could not wrap deneb payload")
}
return w, s.BlobBundle, s.ErrSubmitBlindedBlock
default:
return nil, nil, errors.New("unknown block version for mocking")
return w, s.ErrSubmitBlindedBlock
}
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella, 0)
if err != nil {
return nil, errors.Wrap(err, "could not wrap capella payload")
}
return w, s.ErrSubmitBlindedBlock
}
// GetHeader for mocking.
func (s *MockBuilderService) GetHeader(_ context.Context, slot primitives.Slot, _ [32]byte, _ [48]byte) (builder.SignedBid, error) {
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch || s.BidDeneb != nil {
return builder.WrappedSignedBuilderBidDeneb(s.BidDeneb)
}
if slots.ToEpoch(slot) >= params.BeaconConfig().CapellaForkEpoch || s.BidCapella != nil {
return builder.WrappedSignedBuilderBidCapella(s.BidCapella)
}

View File

@@ -34,7 +34,6 @@ go_library(
deps = [
"//beacon-chain/state:go_default_library",
"//cache/lru:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/slice:go_default_library",
@@ -88,6 +87,7 @@ go_test(
"//testing/util:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_google_gofuzz//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

View File

@@ -4,41 +4,35 @@ import (
"bytes"
"sync"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
)
const keyLength = 40
const vIdLength = 8
const pIdLength = 8
const vpIdsLength = vIdLength + pIdLength
// ProposerPayloadIDsCache is a cache of proposer payload IDs.
// The key is the concatenation of the slot and the block root.
// The value is the concatenation of the proposer and payload IDs, 8 bytes each.
// The key is the slot. The value is the concatenation of the proposer and payload IDs. 8 bytes each.
type ProposerPayloadIDsCache struct {
slotToProposerAndPayloadIDs map[[keyLength]byte][vpIdsLength]byte
slotToProposerAndPayloadIDs map[[40]byte][vpIdsLength]byte
sync.RWMutex
}
// NewProposerPayloadIDsCache creates a new proposer payload IDs cache.
func NewProposerPayloadIDsCache() *ProposerPayloadIDsCache {
return &ProposerPayloadIDsCache{
slotToProposerAndPayloadIDs: make(map[[keyLength]byte][vpIdsLength]byte),
slotToProposerAndPayloadIDs: make(map[[40]byte][vpIdsLength]byte),
}
}
// GetProposerPayloadIDs returns the proposer and payload IDs for the given slot and head root to build the block.
func (f *ProposerPayloadIDsCache) GetProposerPayloadIDs(
slot primitives.Slot,
r [fieldparams.RootLength]byte,
) (primitives.ValidatorIndex, [pIdLength]byte, bool) {
// GetProposerPayloadIDs returns the proposer and payload IDs for the given slot.
func (f *ProposerPayloadIDsCache) GetProposerPayloadIDs(slot primitives.Slot, r [32]byte) (primitives.ValidatorIndex, [8]byte, bool) {
f.RLock()
defer f.RUnlock()
ids, ok := f.slotToProposerAndPayloadIDs[idKey(slot, r)]
if !ok {
return 0, [pIdLength]byte{}, false
return 0, [8]byte{}, false
}
vId := ids[:vIdLength]
@@ -49,13 +43,8 @@ func (f *ProposerPayloadIDsCache) GetProposerPayloadIDs(
return primitives.ValidatorIndex(bytesutil.BytesToUint64BigEndian(vId)), pId, true
}
// SetProposerAndPayloadIDs sets the proposer and payload IDs for the given slot and head root to build block.
func (f *ProposerPayloadIDsCache) SetProposerAndPayloadIDs(
slot primitives.Slot,
vId primitives.ValidatorIndex,
pId [pIdLength]byte,
r [fieldparams.RootLength]byte,
) {
// SetProposerAndPayloadIDs sets the proposer and payload IDs for the given slot.
func (f *ProposerPayloadIDsCache) SetProposerAndPayloadIDs(slot primitives.Slot, vId primitives.ValidatorIndex, pId [8]byte, r [32]byte) {
f.Lock()
defer f.Unlock()
var vIdBytes [vIdLength]byte
@@ -74,7 +63,7 @@ func (f *ProposerPayloadIDsCache) SetProposerAndPayloadIDs(
}
}
// PrunePayloadIDs removes the payload ID entries older than input slot.
// PrunePayloadIDs removes the payload id entries that's current than input slot.
func (f *ProposerPayloadIDsCache) PrunePayloadIDs(slot primitives.Slot) {
f.Lock()
defer f.Unlock()
@@ -87,8 +76,8 @@ func (f *ProposerPayloadIDsCache) PrunePayloadIDs(slot primitives.Slot) {
}
}
func idKey(slot primitives.Slot, r [fieldparams.RootLength]byte) [keyLength]byte {
var k [keyLength]byte
func idKey(slot primitives.Slot, r [32]byte) [40]byte {
var k [40]byte
copy(k[:], append(bytesutil.Uint64ToBytesBigEndian(uint64(slot)), r[:]...))
return k
}

View File

@@ -3,11 +3,15 @@ package cache
import (
"context"
"sync"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/math"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
log "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -34,10 +38,33 @@ func (regCache *RegistrationCache) RegistrationByIndex(id primitives.ValidatorIn
regCache.lock.RUnlock()
return nil, errors.Wrapf(ErrNotFoundRegistration, "validator id %d", id)
}
isExpired, err := RegistrationTimeStampExpired(v.Timestamp)
if err != nil {
return nil, errors.Wrapf(err, "failed to check registration expiration")
}
if isExpired {
regCache.lock.RUnlock()
regCache.lock.Lock()
defer regCache.lock.Unlock()
delete(regCache.indexToRegistration, id)
log.Warnf("registration for validator index %d expired at unix time %d", id, v.Timestamp)
return nil, errors.Wrapf(ErrNotFoundRegistration, "validator id %d", id)
}
regCache.lock.RUnlock()
return v, nil
}
func RegistrationTimeStampExpired(ts uint64) (bool, error) {
// safely convert unint64 to int64
i, err := math.Int(ts)
if err != nil {
return false, err
}
expiryDuration := params.BeaconConfig().RegistrationDuration
// registered time + expiration duration < current time = expired
return time.Unix(int64(i), 0).Add(expiryDuration).Before(time.Now()), nil
}
// UpdateIndexToRegisteredMap adds or updates values in the cache based on the argument.
func (regCache *RegistrationCache) UpdateIndexToRegisteredMap(ctx context.Context, m map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1) {
_, span := trace.StartSpan(ctx, "RegistrationCache.UpdateIndexToRegisteredMap")

View File

@@ -6,12 +6,15 @@ import (
"time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestRegistrationCache(t *testing.T) {
hook := logTest.NewGlobal()
pubkey, err := hexutil.Decode("0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
require.NoError(t, err)
validatorIndex := primitives.ValidatorIndex(1)
@@ -28,14 +31,29 @@ func TestRegistrationCache(t *testing.T) {
reg, err := cache.RegistrationByIndex(validatorIndex)
require.NoError(t, err)
require.Equal(t, string(reg.Pubkey), string(pubkey))
t.Run("successfully updates", func(t *testing.T) {
pubkey, err := hexutil.Decode("0x88247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
require.NoError(t, err)
t.Run("Registration expired", func(t *testing.T) {
validatorIndex2 := primitives.ValidatorIndex(2)
overExpirationPadTime := time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot*uint64(params.BeaconConfig().SlotsPerEpoch)*4) // 4 epochs
m[validatorIndex2] = &ethpb.ValidatorRegistrationV1{
FeeRecipient: []byte{},
GasLimit: 100,
Timestamp: uint64(time.Now().Unix()),
Timestamp: uint64(time.Now().Add(-1 * overExpirationPadTime).Unix()),
Pubkey: pubkey,
}
cache.UpdateIndexToRegisteredMap(context.Background(), m)
_, err := cache.RegistrationByIndex(validatorIndex2)
require.ErrorContains(t, "no validator registered", err)
require.LogsContain(t, hook, "expired")
})
t.Run("Registration close to expiration still passes", func(t *testing.T) {
pubkey, err := hexutil.Decode("0x88247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
require.NoError(t, err)
validatorIndex2 := primitives.ValidatorIndex(2)
overExpirationPadTime := time.Second * time.Duration((params.BeaconConfig().SecondsPerSlot*uint64(params.BeaconConfig().SlotsPerEpoch)*3)-5) // 3 epochs - 5 seconds
m[validatorIndex2] = &ethpb.ValidatorRegistrationV1{
FeeRecipient: []byte{},
GasLimit: 100,
Timestamp: uint64(time.Now().Add(-1 * overExpirationPadTime).Unix()),
Pubkey: pubkey,
}
cache.UpdateIndexToRegisteredMap(context.Background(), m)
@@ -44,3 +62,21 @@ func TestRegistrationCache(t *testing.T) {
require.Equal(t, string(reg.Pubkey), string(pubkey))
})
}
func Test_RegistrationTimeStampExpired(t *testing.T) {
// expiration set at 3 epochs
t.Run("expired registration", func(t *testing.T) {
overExpirationPadTime := time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot*uint64(params.BeaconConfig().SlotsPerEpoch)*4) // 4 epochs
ts := uint64(time.Now().Add(-1 * overExpirationPadTime).Unix())
isExpired, err := RegistrationTimeStampExpired(ts)
require.NoError(t, err)
require.Equal(t, true, isExpired)
})
t.Run("is not expired registration", func(t *testing.T) {
overExpirationPadTime := time.Second * time.Duration((params.BeaconConfig().SecondsPerSlot*uint64(params.BeaconConfig().SlotsPerEpoch)*3)-5) // 3 epochs -5 seconds
ts := uint64(time.Now().Add(-1 * overExpirationPadTime).Unix())
isExpired, err := RegistrationTimeStampExpired(ts)
require.NoError(t, err)
require.Equal(t, false, isExpired)
})
}

View File

@@ -289,13 +289,13 @@ func AttestationParticipationFlagIndices(beaconState state.BeaconState, data *et
sourceFlagIndex := cfg.TimelySourceFlagIndex
targetFlagIndex := cfg.TimelyTargetFlagIndex
headFlagIndex := cfg.TimelyHeadFlagIndex
slotsPerEpoch := cfg.SlotsPerEpoch
sqtRootSlots := cfg.SqrRootSlotsPerEpoch
if matchedSrc && delay <= sqtRootSlots {
participatedFlags[sourceFlagIndex] = true
}
matchedSrcTgt := matchedSrc && matchedTgt
// Before Deneb no attestation should pass validation without having delay <= slotsPerEpoch.
if matchedSrcTgt {
if matchedSrcTgt && delay <= slotsPerEpoch {
participatedFlags[targetFlagIndex] = true
}
matchedSrcTgtHead := matchedHead && matchedSrcTgt

View File

@@ -630,9 +630,6 @@ func TestAttestationParticipationFlagIndices(t *testing.T) {
targetFlagIndex := cfg.TimelyTargetFlagIndex
headFlagIndex := cfg.TimelyHeadFlagIndex
denebState, _ := util.DeterministicGenesisStateDeneb(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, denebState.SetSlot(1))
tests := []struct {
name string
inputState state.BeaconState
@@ -681,34 +678,6 @@ func TestAttestationParticipationFlagIndices(t *testing.T) {
targetFlagIndex: true,
},
},
{
name: "participated source and target with delay",
inputState: func() state.BeaconState {
return beaconState
}(),
inputData: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]},
Target: &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]},
},
inputDelay: params.BeaconConfig().SlotsPerEpoch + 1,
participationIndices: map[uint8]bool{
targetFlagIndex: true,
},
},
{
name: "participated source and target with delay in deneb",
inputState: func() state.BeaconState {
return denebState
}(),
inputData: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]},
Target: &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]},
},
inputDelay: params.BeaconConfig().SlotsPerEpoch + 1,
participationIndices: map[uint8]bool{
targetFlagIndex: true,
},
},
{
name: "participated source and target and head",
inputState: func() state.BeaconState {
@@ -727,6 +696,7 @@ func TestAttestationParticipationFlagIndices(t *testing.T) {
},
},
}
for _, test := range tests {
flagIndices, err := altair.AttestationParticipationFlagIndices(test.inputState, test.inputData, test.inputDelay)
require.NoError(t, err)

View File

@@ -13,15 +13,6 @@ import (
"go.opencensus.io/trace"
)
// AttDelta contains rewards and penalties for a single attestation.
type AttDelta struct {
HeadReward uint64
SourceReward uint64
SourcePenalty uint64
TargetReward uint64
TargetPenalty uint64
}
// InitializePrecomputeValidators precomputes individual validator for its attested balances and the total sum of validators attested balances of the epoch.
func InitializePrecomputeValidators(ctx context.Context, beaconState state.BeaconState) ([]*precompute.Validator, *precompute.Balance, error) {
ctx, span := trace.StartSpan(ctx, "altair.InitializePrecomputeValidators")
@@ -235,7 +226,7 @@ func ProcessRewardsAndPenaltiesPrecompute(
return beaconState, errors.New("validator registries not the same length as state's validator registries")
}
attDeltas, err := AttestationsDelta(beaconState, bal, vals)
attsRewards, attsPenalties, err := AttestationsDelta(beaconState, bal, vals)
if err != nil {
return nil, errors.Wrap(err, "could not get attestation delta")
}
@@ -246,12 +237,11 @@ func ProcessRewardsAndPenaltiesPrecompute(
// Compute the post balance of the validator after accounting for the
// attester and proposer rewards and penalties.
delta := attDeltas[i]
balances[i], err = helpers.IncreaseBalanceWithVal(balances[i], delta.HeadReward+delta.SourceReward+delta.TargetReward)
balances[i], err = helpers.IncreaseBalanceWithVal(balances[i], attsRewards[i])
if err != nil {
return nil, err
}
balances[i] = helpers.DecreaseBalanceWithVal(balances[i], delta.SourcePenalty+delta.TargetPenalty)
balances[i] = helpers.DecreaseBalanceWithVal(balances[i], attsPenalties[i])
vals[i].AfterEpochTransitionBalance = balances[i]
}
@@ -265,8 +255,10 @@ func ProcessRewardsAndPenaltiesPrecompute(
// AttestationsDelta computes and returns the rewards and penalties differences for individual validators based on the
// voting records.
func AttestationsDelta(beaconState state.BeaconState, bal *precompute.Balance, vals []*precompute.Validator) ([]*AttDelta, error) {
attDeltas := make([]*AttDelta, len(vals))
func AttestationsDelta(beaconState state.BeaconState, bal *precompute.Balance, vals []*precompute.Validator) (rewards, penalties []uint64, err error) {
numOfVals := beaconState.NumValidators()
rewards = make([]uint64, numOfVals)
penalties = make([]uint64, numOfVals)
cfg := params.BeaconConfig()
prevEpoch := time.PrevEpoch(beaconState)
@@ -280,29 +272,29 @@ func AttestationsDelta(beaconState state.BeaconState, bal *precompute.Balance, v
bias := cfg.InactivityScoreBias
inactivityPenaltyQuotient, err := beaconState.InactivityPenaltyQuotient()
if err != nil {
return nil, err
return nil, nil, err
}
inactivityDenominator := bias * inactivityPenaltyQuotient
for i, v := range vals {
attDeltas[i], err = attestationDelta(bal, v, baseRewardMultiplier, inactivityDenominator, leak)
rewards[i], penalties[i], err = attestationDelta(bal, v, baseRewardMultiplier, inactivityDenominator, leak)
if err != nil {
return nil, err
return nil, nil, err
}
}
return attDeltas, nil
return rewards, penalties, nil
}
func attestationDelta(
bal *precompute.Balance,
val *precompute.Validator,
baseRewardMultiplier, inactivityDenominator uint64,
inactivityLeak bool) (*AttDelta, error) {
inactivityLeak bool) (reward, penalty uint64, err error) {
eligible := val.IsActivePrevEpoch || (val.IsSlashed && !val.IsWithdrawableCurrentEpoch)
// Per spec `ActiveCurrentEpoch` can't be 0 to process attestation delta.
if !eligible || bal.ActiveCurrentEpoch == 0 {
return &AttDelta{}, nil
return 0, 0, nil
}
cfg := params.BeaconConfig()
@@ -315,32 +307,32 @@ func attestationDelta(
srcWeight := cfg.TimelySourceWeight
tgtWeight := cfg.TimelyTargetWeight
headWeight := cfg.TimelyHeadWeight
attDelta := &AttDelta{}
reward, penalty = uint64(0), uint64(0)
// Process source reward / penalty
if val.IsPrevEpochSourceAttester && !val.IsSlashed {
if !inactivityLeak {
n := baseReward * srcWeight * (bal.PrevEpochAttested / increment)
attDelta.SourceReward += n / (activeIncrement * weightDenominator)
reward += n / (activeIncrement * weightDenominator)
}
} else {
attDelta.SourcePenalty += baseReward * srcWeight / weightDenominator
penalty += baseReward * srcWeight / weightDenominator
}
// Process target reward / penalty
if val.IsPrevEpochTargetAttester && !val.IsSlashed {
if !inactivityLeak {
n := baseReward * tgtWeight * (bal.PrevEpochTargetAttested / increment)
attDelta.TargetReward += n / (activeIncrement * weightDenominator)
reward += n / (activeIncrement * weightDenominator)
}
} else {
attDelta.TargetPenalty += baseReward * tgtWeight / weightDenominator
penalty += baseReward * tgtWeight / weightDenominator
}
// Process head reward / penalty
if val.IsPrevEpochHeadAttester && !val.IsSlashed {
if !inactivityLeak {
n := baseReward * headWeight * (bal.PrevEpochHeadAttested / increment)
attDelta.HeadReward += n / (activeIncrement * weightDenominator)
reward += n / (activeIncrement * weightDenominator)
}
}
@@ -349,10 +341,10 @@ func attestationDelta(
if !val.IsPrevEpochTargetAttester || val.IsSlashed {
n, err := math.Mul64(effectiveBalance, val.InactivityScore)
if err != nil {
return &AttDelta{}, err
return 0, 0, err
}
attDelta.TargetPenalty += n / inactivityDenominator
penalty += n / inactivityDenominator
}
return attDelta, nil
return reward, penalty, nil
}

View File

@@ -213,16 +213,9 @@ func TestAttestationsDelta(t *testing.T) {
require.NoError(t, err)
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
require.NoError(t, err)
deltas, err := AttestationsDelta(s, balance, validators)
rewards, penalties, err := AttestationsDelta(s, balance, validators)
require.NoError(t, err)
rewards := make([]uint64, len(deltas))
penalties := make([]uint64, len(deltas))
for i, d := range deltas {
rewards[i] = d.HeadReward + d.SourceReward + d.TargetReward
penalties[i] = d.SourcePenalty + d.TargetPenalty
}
// Reward amount should increase as validator index increases due to setup.
for i := 1; i < len(rewards); i++ {
require.Equal(t, true, rewards[i] > rewards[i-1])
@@ -251,16 +244,9 @@ func TestAttestationsDeltaBellatrix(t *testing.T) {
require.NoError(t, err)
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
require.NoError(t, err)
deltas, err := AttestationsDelta(s, balance, validators)
rewards, penalties, err := AttestationsDelta(s, balance, validators)
require.NoError(t, err)
rewards := make([]uint64, len(deltas))
penalties := make([]uint64, len(deltas))
for i, d := range deltas {
rewards[i] = d.HeadReward + d.SourceReward + d.TargetReward
penalties[i] = d.SourcePenalty + d.TargetPenalty
}
// Reward amount should increase as validator index increases due to setup.
for i := 1; i < len(rewards); i++ {
require.Equal(t, true, rewards[i] > rewards[i-1])
@@ -299,15 +285,8 @@ func TestProcessRewardsAndPenaltiesPrecompute_Ok(t *testing.T) {
}
wanted := make([]uint64, s.NumValidators())
deltas, err := AttestationsDelta(s, balance, validators)
rewards, penalties, err := AttestationsDelta(s, balance, validators)
require.NoError(t, err)
rewards := make([]uint64, len(deltas))
penalties := make([]uint64, len(deltas))
for i, d := range deltas {
rewards[i] = d.HeadReward + d.SourceReward + d.TargetReward
penalties[i] = d.SourcePenalty + d.TargetPenalty
}
for i := range rewards {
wanted[i] += rewards[i]
}

View File

@@ -2,7 +2,6 @@ package altair
import (
"context"
goErrors "errors"
"fmt"
"time"
@@ -23,10 +22,6 @@ import (
const maxRandomByte = uint64(1<<8 - 1)
var (
ErrTooLate = errors.New("sync message is too late")
)
// ValidateNilSyncContribution validates the following fields are not nil:
// -the contribution and proof itself
// -the message within contribution and proof
@@ -195,7 +190,6 @@ func IsSyncCommitteeAggregator(sig []byte) (bool, error) {
}
// ValidateSyncMessageTime validates sync message to ensure that the provided slot is valid.
// Spec: [IGNORE] The message's slot is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance), i.e. sync_committee_message.slot == current_slot
func ValidateSyncMessageTime(slot primitives.Slot, genesisTime time.Time, clockDisparity time.Duration) error {
if err := slots.ValidateClock(slot, uint64(genesisTime.Unix())); err != nil {
return err
@@ -223,19 +217,15 @@ func ValidateSyncMessageTime(slot primitives.Slot, genesisTime time.Time, clockD
upperBound := time.Now().Add(clockDisparity)
// Verify sync message slot is within the time range.
if messageTime.Before(lowerBound) || messageTime.After(upperBound) {
syncErr := fmt.Errorf(
"sync message time %v (message slot %d) not within allowable range of %v to %v (current slot %d)",
return fmt.Errorf(
"sync message time %v (slot %d) not within allowable range of %v (slot %d) to %v (slot %d)",
messageTime,
slot,
lowerBound,
uint64(lowerBound.Unix()-genesisTime.Unix())/params.BeaconConfig().SecondsPerSlot,
upperBound,
currentSlot,
uint64(upperBound.Unix()-genesisTime.Unix())/params.BeaconConfig().SecondsPerSlot,
)
// Wrap error message if sync message is too late.
if messageTime.Before(lowerBound) {
syncErr = goErrors.Join(ErrTooLate, syncErr)
}
return syncErr
}
return nil
}

View File

@@ -311,7 +311,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
syncMessageSlot: 16,
genesisTime: prysmTime.Now().Add(-(15 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)),
},
wantedErr: "(message slot 16) not within allowable range of",
wantedErr: "(slot 16) not within allowable range of",
},
{
name: "sync_message.slot == current_slot+CLOCK_DISPARITY",
@@ -327,7 +327,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
syncMessageSlot: 100,
genesisTime: prysmTime.Now().Add(-(100 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second) + params.BeaconNetworkConfig().MaximumGossipClockDisparity + 1000*time.Millisecond),
},
wantedErr: "(message slot 100) not within allowable range of",
wantedErr: "(slot 100) not within allowable range of",
},
{
name: "sync_message.slot == current_slot-CLOCK_DISPARITY",
@@ -343,7 +343,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
syncMessageSlot: 101,
genesisTime: prysmTime.Now().Add(-(100*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second + params.BeaconNetworkConfig().MaximumGossipClockDisparity)),
},
wantedErr: "(message slot 101) not within allowable range of",
wantedErr: "(slot 101) not within allowable range of",
},
{
name: "sync_message.slot is well beyond current slot",

View File

@@ -16,7 +16,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"go.opencensus.io/trace"
)
@@ -82,6 +81,7 @@ func VerifyAttestationNoVerifySignature(
s := att.Data.Slot
minInclusionCheck := s+params.BeaconConfig().MinAttestationInclusionDelay <= beaconState.Slot()
epochInclusionCheck := beaconState.Slot() <= s+params.BeaconConfig().SlotsPerEpoch
if !minInclusionCheck {
return fmt.Errorf(
"attestation slot %d + inclusion delay %d > state slot %d",
@@ -90,17 +90,13 @@ func VerifyAttestationNoVerifySignature(
beaconState.Slot(),
)
}
if beaconState.Version() < version.Deneb {
epochInclusionCheck := beaconState.Slot() <= s+params.BeaconConfig().SlotsPerEpoch
if !epochInclusionCheck {
return fmt.Errorf(
"state slot %d > attestation slot %d + SLOTS_PER_EPOCH %d",
beaconState.Slot(),
s,
params.BeaconConfig().SlotsPerEpoch,
)
}
if !epochInclusionCheck {
return fmt.Errorf(
"state slot %d > attestation slot %d + SLOTS_PER_EPOCH %d",
beaconState.Slot(),
s,
params.BeaconConfig().SlotsPerEpoch,
)
}
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, beaconState, att.Data.Target.Epoch)
if err != nil {

View File

@@ -127,44 +127,6 @@ func TestProcessAttestationsNoVerify_OK(t *testing.T) {
assert.NoError(t, err)
}
func TestProcessAttestationsNoVerify_OlderThanSlotsPerEpoch(t *testing.T) {
aggBits := bitfield.NewBitlist(3)
aggBits.SetBitAt(1, true)
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
},
AggregationBits: aggBits,
}
ctx := context.Background()
t.Run("attestation older than slots per epoch", func(t *testing.T) {
beaconState, _ := util.DeterministicGenesisState(t, 100)
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().SlotsPerEpoch + 1)
require.NoError(t, err)
ckp := beaconState.CurrentJustifiedCheckpoint()
copy(ckp.Root, "hello-world")
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(ckp))
require.NoError(t, beaconState.AppendCurrentEpochAttestations(&ethpb.PendingAttestation{}))
require.ErrorContains(t, "state slot 33 > attestation slot 0 + SLOTS_PER_EPOCH 32", blocks.VerifyAttestationNoVerifySignature(ctx, beaconState, att))
})
t.Run("attestation older than slots per epoch in deneb", func(t *testing.T) {
beaconState, _ := util.DeterministicGenesisStateDeneb(t, 100)
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().SlotsPerEpoch + 1)
require.NoError(t, err)
ckp := beaconState.CurrentJustifiedCheckpoint()
copy(ckp.Root, "hello-world")
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(ckp))
require.NoError(t, blocks.VerifyAttestationNoVerifySignature(ctx, beaconState, att))
})
}
func TestVerifyAttestationNoVerifySignature_OK(t *testing.T) {
// Attestation with an empty signature

View File

@@ -413,7 +413,7 @@ func TestFuzzProcessVoluntaryExitsNoVerify_10000(t *testing.T) {
}
}
func TestFuzzVerifyExit_10000(t *testing.T) {
func TestFuzzVerifyExit_10000(_ *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
ve := &ethpb.SignedVoluntaryExit{}
rawVal := &ethpb.Validator{}
@@ -425,18 +425,9 @@ func TestFuzzVerifyExit_10000(t *testing.T) {
fuzzer.Fuzz(rawVal)
fuzzer.Fuzz(fork)
fuzzer.Fuzz(&slot)
state := &ethpb.BeaconState{
Slot: slot,
Fork: fork,
GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:],
}
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
require.NoError(t, err)
val, err := state_native.NewValidator(&ethpb.Validator{})
_ = err
err = VerifyExitAndSignature(val, s, ve)
err = VerifyExitAndSignature(val, slot, fork, ve, params.BeaconConfig().ZeroHash[:])
_ = err
}
}

View File

@@ -12,7 +12,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
@@ -59,7 +58,7 @@ func ProcessVoluntaryExits(
if err != nil {
return nil, err
}
if err := VerifyExitAndSignature(val, beaconState, exit); err != nil {
if err := VerifyExitAndSignature(val, beaconState.Slot(), beaconState.Fork(), exit, beaconState.GenesisValidatorsRoot()); err != nil {
return nil, errors.Wrapf(err, "could not verify exit %d", idx)
}
beaconState, err = v.InitiateValidatorExit(ctx, beaconState, exit.Exit.ValidatorIndex)
@@ -93,27 +92,15 @@ func ProcessVoluntaryExits(
// initiate_validator_exit(state, voluntary_exit.validator_index)
func VerifyExitAndSignature(
validator state.ReadOnlyValidator,
state state.ReadOnlyBeaconState,
currentSlot primitives.Slot,
fork *ethpb.Fork,
signed *ethpb.SignedVoluntaryExit,
genesisRoot []byte,
) error {
if signed == nil || signed.Exit == nil {
return errors.New("nil exit")
}
currentSlot := state.Slot()
fork := state.Fork()
genesisRoot := state.GenesisValidatorsRoot()
// EIP-7044: Beginning in Deneb, fix the fork version to Capella.
// This allows for signed validator exits to be valid forever.
if state.Version() >= version.Deneb {
fork = &ethpb.Fork{
PreviousVersion: params.BeaconConfig().CapellaForkVersion,
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
Epoch: params.BeaconConfig().CapellaForkEpoch,
}
}
exit := signed.Exit
if err := verifyExitConditions(validator, currentSlot, exit); err != nil {
return err

View File

@@ -8,7 +8,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
@@ -134,38 +133,38 @@ func TestProcessVoluntaryExits_AppliesCorrectStatus(t *testing.T) {
}
func TestVerifyExitAndSignature(t *testing.T) {
type args struct {
currentSlot primitives.Slot
}
tests := []struct {
name string
setup func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, state.ReadOnlyBeaconState, error)
args args
setup func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, *ethpb.Fork, []byte, error)
wantErr string
}{
{
name: "Empty Exit",
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, state.ReadOnlyBeaconState, error) {
args: args{
currentSlot: 0,
},
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, *ethpb.Fork, []byte, error) {
fork := &ethpb.Fork{
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
Epoch: 0,
}
genesisRoot := [32]byte{'a'}
st := &ethpb.BeaconState{
Slot: 0,
Fork: fork,
GenesisValidatorsRoot: genesisRoot[:],
}
s, err := state_native.InitializeFromProtoUnsafePhase0(st)
if err != nil {
return nil, nil, nil, err
}
return &ethpb.Validator{}, &ethpb.SignedVoluntaryExit{}, s, nil
return &ethpb.Validator{}, &ethpb.SignedVoluntaryExit{}, fork, genesisRoot[:], nil
},
wantErr: "nil exit",
},
{
name: "Happy Path",
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, state.ReadOnlyBeaconState, error) {
args: args{
currentSlot: (params.BeaconConfig().SlotsPerEpoch * 2) + 1,
},
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, *ethpb.Fork, []byte, error) {
fork := &ethpb.Fork{
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
@@ -187,18 +186,15 @@ func TestVerifyExitAndSignature(t *testing.T) {
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
signedExit.Signature = sig.Marshal()
if err := bs.SetFork(fork); err != nil {
return nil, nil, nil, err
}
if err := bs.SetSlot((params.BeaconConfig().SlotsPerEpoch * 2) + 1); err != nil {
return nil, nil, nil, err
}
return validator, signedExit, bs, nil
return validator, signedExit, fork, bs.GenesisValidatorsRoot(), nil
},
},
{
name: "bad signature",
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, state.ReadOnlyBeaconState, error) {
args: args{
currentSlot: (params.BeaconConfig().SlotsPerEpoch * 2) + 1,
},
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, *ethpb.Fork, []byte, error) {
fork := &ethpb.Fork{
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
@@ -219,72 +215,27 @@ func TestVerifyExitAndSignature(t *testing.T) {
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
signedExit.Signature = sig.Marshal()
if err := bs.SetFork(fork); err != nil {
return nil, nil, nil, err
}
if err := bs.SetSlot((params.BeaconConfig().SlotsPerEpoch * 2) + 1); err != nil {
return nil, nil, nil, err
}
// use wrong genesis root and don't update validator
genesisRoot := [32]byte{'a'}
if err := bs.SetGenesisValidatorsRoot(genesisRoot[:]); err != nil {
return nil, nil, nil, err
}
return validator, signedExit, bs, nil
// use wrong genesis root and don't update validator
return validator, signedExit, fork, genesisRoot[:], nil
},
wantErr: "signature did not verify",
},
{
name: "EIP-7044: deneb exits should verify with capella fork information",
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, state.ReadOnlyBeaconState, error) {
fork := &ethpb.Fork{
PreviousVersion: params.BeaconConfig().CapellaForkVersion,
CurrentVersion: params.BeaconConfig().DenebForkVersion,
Epoch: primitives.Epoch(2),
}
signedExit := &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
Epoch: 2,
ValidatorIndex: 0,
},
}
bs, keys := util.DeterministicGenesisState(t, 1)
bs, err := state_native.InitializeFromProtoUnsafeDeneb(&ethpb.BeaconStateDeneb{
GenesisValidatorsRoot: bs.GenesisValidatorsRoot(),
Fork: fork,
Slot: (params.BeaconConfig().SlotsPerEpoch * 2) + 1,
Validators: bs.Validators(),
})
if err != nil {
return nil, nil, nil, err
}
validator := bs.Validators()[0]
validator.ActivationEpoch = 1
err = bs.UpdateValidatorAtIndex(0, validator)
require.NoError(t, err)
sb, err := signing.ComputeDomainAndSign(bs, signedExit.Exit.Epoch, signedExit.Exit, params.BeaconConfig().DomainVoluntaryExit, keys[0])
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
signedExit.Signature = sig.Marshal()
return validator, signedExit, bs, nil
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := params.BeaconConfig().ShardCommitteePeriod
params.BeaconConfig().ShardCommitteePeriod = 0
validator, signedExit, st, err := tt.setup()
validator, signedExit, fork, genesisRoot, err := tt.setup()
require.NoError(t, err)
rvalidator, err := state_native.NewValidator(validator)
require.NoError(t, err)
err = blocks.VerifyExitAndSignature(
rvalidator,
st,
tt.args.currentSlot,
fork,
signedExit,
genesisRoot,
)
if tt.wantErr == "" {
require.NoError(t, err)

View File

@@ -145,40 +145,6 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
},
Signature: params.BeaconConfig().EmptySignature[:],
})
case *ethpb.BeaconStateDeneb:
return blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockDeneb{
Block: &ethpb.BeaconBlockDeneb{
ParentRoot: params.BeaconConfig().ZeroHash[:],
StateRoot: root[:],
Body: &ethpb.BeaconBlockBodyDeneb{
RandaoReveal: make([]byte, 96),
Eth1Data: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
},
Graffiti: make([]byte, 32),
SyncAggregate: &ethpb.SyncAggregate{
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
},
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{ // Deneb difference.
ParentHash: make([]byte, 32),
FeeRecipient: make([]byte, 20),
StateRoot: make([]byte, 32),
ReceiptsRoot: make([]byte, 32),
LogsBloom: make([]byte, 256),
PrevRandao: make([]byte, 32),
BaseFeePerGas: make([]byte, 32),
BlockHash: make([]byte, 32),
Transactions: make([][]byte, 0),
Withdrawals: make([]*enginev1.Withdrawal, 0),
},
BlsToExecutionChanges: make([]*ethpb.SignedBLSToExecutionChange, 0),
BlobKzgCommitments: make([][]byte, 0),
},
},
Signature: params.BeaconConfig().EmptySignature[:],
})
default:
return nil, ErrUnrecognizedState
}

View File

@@ -61,7 +61,7 @@ func IsExecutionBlock(body interfaces.ReadOnlyBeaconBlockBody) (bool, error) {
}
payload, err := body.Execution()
switch {
case errors.Is(err, consensus_types.ErrUnsupportedField):
case errors.Is(err, consensus_types.ErrUnsupportedGetter):
return false, nil
case err != nil:
return false, err

View File

@@ -1,30 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["upgrade.go"],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/deneb",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/params:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["upgrade_test.go"],
deps = [
":go_default_library",
"//beacon-chain/core/time:go_default_library",
"//config/params:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
],
)

View File

@@ -1,115 +0,0 @@
package deneb
import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v4/config/params"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
// UpgradeToDeneb updates inputs a generic state to return the version Deneb state.
func UpgradeToDeneb(state state.BeaconState) (state.BeaconState, error) {
epoch := time.CurrentEpoch(state)
currentSyncCommittee, err := state.CurrentSyncCommittee()
if err != nil {
return nil, err
}
nextSyncCommittee, err := state.NextSyncCommittee()
if err != nil {
return nil, err
}
prevEpochParticipation, err := state.PreviousEpochParticipation()
if err != nil {
return nil, err
}
currentEpochParticipation, err := state.CurrentEpochParticipation()
if err != nil {
return nil, err
}
inactivityScores, err := state.InactivityScores()
if err != nil {
return nil, err
}
payloadHeader, err := state.LatestExecutionPayloadHeader()
if err != nil {
return nil, err
}
txRoot, err := payloadHeader.TransactionsRoot()
if err != nil {
return nil, err
}
wdRoot, err := payloadHeader.WithdrawalsRoot()
if err != nil {
return nil, err
}
wi, err := state.NextWithdrawalIndex()
if err != nil {
return nil, err
}
vi, err := state.NextWithdrawalValidatorIndex()
if err != nil {
return nil, err
}
summaries, err := state.HistoricalSummaries()
if err != nil {
return nil, err
}
s := &ethpb.BeaconStateDeneb{
GenesisTime: state.GenesisTime(),
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
Slot: state.Slot(),
Fork: &ethpb.Fork{
PreviousVersion: state.Fork().CurrentVersion,
CurrentVersion: params.BeaconConfig().DenebForkVersion,
Epoch: epoch,
},
LatestBlockHeader: state.LatestBlockHeader(),
BlockRoots: state.BlockRoots(),
StateRoots: state.StateRoots(),
HistoricalRoots: [][]byte{},
Eth1Data: state.Eth1Data(),
Eth1DataVotes: state.Eth1DataVotes(),
Eth1DepositIndex: state.Eth1DepositIndex(),
Validators: state.Validators(),
Balances: state.Balances(),
RandaoMixes: state.RandaoMixes(),
Slashings: state.Slashings(),
PreviousEpochParticipation: prevEpochParticipation,
CurrentEpochParticipation: currentEpochParticipation,
JustificationBits: state.JustificationBits(),
PreviousJustifiedCheckpoint: state.PreviousJustifiedCheckpoint(),
CurrentJustifiedCheckpoint: state.CurrentJustifiedCheckpoint(),
FinalizedCheckpoint: state.FinalizedCheckpoint(),
InactivityScores: inactivityScores,
CurrentSyncCommittee: currentSyncCommittee,
NextSyncCommittee: nextSyncCommittee,
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: payloadHeader.ParentHash(),
FeeRecipient: payloadHeader.FeeRecipient(),
StateRoot: payloadHeader.StateRoot(),
ReceiptsRoot: payloadHeader.ReceiptsRoot(),
LogsBloom: payloadHeader.LogsBloom(),
PrevRandao: payloadHeader.PrevRandao(),
BlockNumber: payloadHeader.BlockNumber(),
GasLimit: payloadHeader.GasLimit(),
GasUsed: payloadHeader.GasUsed(),
Timestamp: payloadHeader.Timestamp(),
ExtraData: payloadHeader.ExtraData(),
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
BlockHash: payloadHeader.BlockHash(),
ExcessBlobGas: 0,
BlobGasUsed: 0,
TransactionsRoot: txRoot,
WithdrawalsRoot: wdRoot,
},
NextWithdrawalIndex: wi,
NextWithdrawalValidatorIndex: vi,
HistoricalSummaries: summaries,
}
return state_native.InitializeFromProtoUnsafeDeneb(s)
}

View File

@@ -1,94 +0,0 @@
package deneb_test
import (
"testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/deneb"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/config/params"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
)
func TestUpgradeToDeneb(t *testing.T) {
st, _ := util.DeterministicGenesisStateCapella(t, params.BeaconConfig().MaxValidatorsPerCommittee)
preForkState := st.Copy()
mSt, err := deneb.UpgradeToDeneb(st)
require.NoError(t, err)
require.Equal(t, preForkState.GenesisTime(), mSt.GenesisTime())
require.DeepSSZEqual(t, preForkState.GenesisValidatorsRoot(), mSt.GenesisValidatorsRoot())
require.Equal(t, preForkState.Slot(), mSt.Slot())
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), mSt.LatestBlockHeader())
require.DeepSSZEqual(t, preForkState.BlockRoots(), mSt.BlockRoots())
require.DeepSSZEqual(t, preForkState.StateRoots(), mSt.StateRoots())
require.DeepSSZEqual(t, preForkState.Eth1Data(), mSt.Eth1Data())
require.DeepSSZEqual(t, preForkState.Eth1DataVotes(), mSt.Eth1DataVotes())
require.DeepSSZEqual(t, preForkState.Eth1DepositIndex(), mSt.Eth1DepositIndex())
require.DeepSSZEqual(t, preForkState.Validators(), mSt.Validators())
require.DeepSSZEqual(t, preForkState.Balances(), mSt.Balances())
require.DeepSSZEqual(t, preForkState.RandaoMixes(), mSt.RandaoMixes())
require.DeepSSZEqual(t, preForkState.Slashings(), mSt.Slashings())
require.DeepSSZEqual(t, preForkState.JustificationBits(), mSt.JustificationBits())
require.DeepSSZEqual(t, preForkState.PreviousJustifiedCheckpoint(), mSt.PreviousJustifiedCheckpoint())
require.DeepSSZEqual(t, preForkState.CurrentJustifiedCheckpoint(), mSt.CurrentJustifiedCheckpoint())
require.DeepSSZEqual(t, preForkState.FinalizedCheckpoint(), mSt.FinalizedCheckpoint())
numValidators := mSt.NumValidators()
p, err := mSt.PreviousEpochParticipation()
require.NoError(t, err)
require.DeepSSZEqual(t, make([]byte, numValidators), p)
p, err = mSt.CurrentEpochParticipation()
require.NoError(t, err)
require.DeepSSZEqual(t, make([]byte, numValidators), p)
s, err := mSt.InactivityScores()
require.NoError(t, err)
require.DeepSSZEqual(t, make([]uint64, numValidators), s)
f := mSt.Fork()
require.DeepSSZEqual(t, &ethpb.Fork{
PreviousVersion: st.Fork().CurrentVersion,
CurrentVersion: params.BeaconConfig().DenebForkVersion,
Epoch: time.CurrentEpoch(st),
}, f)
csc, err := mSt.CurrentSyncCommittee()
require.NoError(t, err)
psc, err := preForkState.CurrentSyncCommittee()
require.NoError(t, err)
require.DeepSSZEqual(t, psc, csc)
nsc, err := mSt.NextSyncCommittee()
require.NoError(t, err)
psc, err = preForkState.NextSyncCommittee()
require.NoError(t, err)
require.DeepSSZEqual(t, psc, nsc)
header, err := mSt.LatestExecutionPayloadHeader()
require.NoError(t, err)
protoHeader, ok := header.Proto().(*enginev1.ExecutionPayloadHeaderDeneb)
require.Equal(t, true, ok)
prevHeader, err := preForkState.LatestExecutionPayloadHeader()
require.NoError(t, err)
txRoot, err := prevHeader.TransactionsRoot()
require.NoError(t, err)
wdRoot, err := prevHeader.WithdrawalsRoot()
require.NoError(t, err)
wanted := &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: prevHeader.ParentHash(),
FeeRecipient: prevHeader.FeeRecipient(),
StateRoot: prevHeader.StateRoot(),
ReceiptsRoot: prevHeader.ReceiptsRoot(),
LogsBloom: prevHeader.LogsBloom(),
PrevRandao: prevHeader.PrevRandao(),
BlockNumber: prevHeader.BlockNumber(),
GasLimit: prevHeader.GasLimit(),
GasUsed: prevHeader.GasUsed(),
Timestamp: prevHeader.Timestamp(),
BaseFeePerGas: prevHeader.BaseFeePerGas(),
BlockHash: prevHeader.BlockHash(),
TransactionsRoot: txRoot,
WithdrawalsRoot: wdRoot,
}
require.DeepEqual(t, wanted, protoHeader)
}

View File

@@ -39,8 +39,6 @@ type BlockProcessedData struct {
SignedBlock interfaces.ReadOnlySignedBeaconBlock
// Verified is true if the block's BLS contents have been verified.
Verified bool
// Optimistic is true if the block is optimistic.
Optimistic bool
}
// ChainStartedData is the data sent with ChainStarted events.

View File

@@ -38,7 +38,6 @@ go_library(
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)
@@ -69,6 +68,7 @@ go_test(
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/slice:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -8,15 +8,11 @@ import (
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/crypto/hash"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
"github.com/prysmaticlabs/prysm/v4/time/slots"
log "github.com/sirupsen/logrus"
)
var (
ErrTooLate = errors.New("attestation is too late")
)
// ValidateNilAttestation checks if any composite field of input attestation is nil.
@@ -70,6 +66,25 @@ func IsAggregator(committeeCount uint64, slotSig []byte) (bool, error) {
return binary.LittleEndian.Uint64(b[:8])%modulo == 0, nil
}
// AggregateSignature returns the aggregated signature of the input attestations.
//
// Spec pseudocode definition:
//
// def get_aggregate_signature(attestations: Sequence[Attestation]) -> BLSSignature:
// signatures = [attestation.signature for attestation in attestations]
// return bls.Aggregate(signatures)
func AggregateSignature(attestations []*ethpb.Attestation) (bls.Signature, error) {
sigs := make([]bls.Signature, len(attestations))
var err error
for i := 0; i < len(sigs); i++ {
sigs[i], err = bls.SignatureFromBytes(attestations[i].Signature)
if err != nil {
return nil, err
}
}
return bls.AggregateSignatures(sigs), nil
}
// IsAggregated returns true if the attestation is an aggregated attestation,
// false otherwise.
func IsAggregated(attestation *ethpb.Attestation) bool {
@@ -167,33 +182,14 @@ func ValidateAttestationTime(attSlot primitives.Slot, genesisTime time.Time, clo
lowerBoundsSlot,
currentSlot,
)
if attTime.Before(lowerBounds) {
attReceivedTooLateCount.Inc()
return attError
}
if attTime.After(upperBounds) {
attReceivedTooEarlyCount.Inc()
return attError
}
attEpoch := slots.ToEpoch(attSlot)
if attEpoch < params.BeaconConfig().DenebForkEpoch {
if attTime.Before(lowerBounds) {
attReceivedTooLateCount.Inc()
return errors.Join(ErrTooLate, attError)
}
return nil
}
// EIP-7045: Starting in Deneb, allow any attestations from the current or previous epoch.
currentEpoch := slots.ToEpoch(currentSlot)
prevEpoch, err := currentEpoch.SafeSub(1)
if err != nil {
log.WithError(err).Debug("Ignoring underflow for a deneb attestation inclusion check in epoch 0")
prevEpoch = 0
}
attSlotEpoch := slots.ToEpoch(attSlot)
if attSlotEpoch != currentEpoch && attSlotEpoch != prevEpoch {
return fmt.Errorf("attestation slot %d not within current epoch %d or previous epoch %d", attSlot, currentEpoch, prevEpoch)
}
return nil
}

View File

@@ -10,6 +10,8 @@ import (
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
@@ -43,6 +45,44 @@ func TestAttestation_IsAggregator(t *testing.T) {
})
}
func TestAttestation_AggregateSignature(t *testing.T) {
t.Run("verified", func(t *testing.T) {
pubkeys := make([]bls.PublicKey, 0, 100)
atts := make([]*ethpb.Attestation, 0, 100)
msg := bytesutil.ToBytes32([]byte("hello"))
for i := 0; i < 100; i++ {
priv, err := bls.RandKey()
require.NoError(t, err)
pub := priv.PublicKey()
sig := priv.Sign(msg[:])
pubkeys = append(pubkeys, pub)
att := &ethpb.Attestation{Signature: sig.Marshal()}
atts = append(atts, att)
}
aggSig, err := helpers.AggregateSignature(atts)
require.NoError(t, err)
assert.Equal(t, true, aggSig.FastAggregateVerify(pubkeys, msg), "Signature did not verify")
})
t.Run("not verified", func(t *testing.T) {
pubkeys := make([]bls.PublicKey, 0, 100)
atts := make([]*ethpb.Attestation, 0, 100)
msg := []byte("hello")
for i := 0; i < 100; i++ {
priv, err := bls.RandKey()
require.NoError(t, err)
pub := priv.PublicKey()
sig := priv.Sign(msg)
pubkeys = append(pubkeys, pub)
att := &ethpb.Attestation{Signature: sig.Marshal()}
atts = append(atts, att)
}
aggSig, err := helpers.AggregateSignature(atts[0 : len(atts)-2])
require.NoError(t, err)
assert.Equal(t, false, aggSig.FastAggregateVerify(pubkeys, bytesutil.ToBytes32(msg)), "Signature not suppose to verify")
})
}
func TestAttestation_ComputeSubnetForAttestation(t *testing.T) {
// Create 10 committees
committeeCount := uint64(10)
@@ -85,11 +125,6 @@ func TestAttestation_ComputeSubnetForAttestation(t *testing.T) {
}
func Test_ValidateAttestationTime(t *testing.T) {
cfg := params.BeaconConfig().Copy()
cfg.DenebForkEpoch = 5
params.OverrideBeaconConfig(cfg)
params.SetupTestConfigCleanup(t)
if params.BeaconNetworkConfig().MaximumGossipClockDisparity < 200*time.Millisecond {
t.Fatal("This test expects the maximum clock disparity to be at least 200ms")
}
@@ -160,39 +195,6 @@ func Test_ValidateAttestationTime(t *testing.T) {
).Add(200 * time.Millisecond),
},
},
{
name: "attestation.slot < current_slot-ATTESTATION_PROPAGATION_SLOT_RANGE in deneb",
args: args{
attSlot: 300 - params.BeaconNetworkConfig().AttestationPropagationSlotRange - 1,
genesisTime: prysmTime.Now().Add(-300 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
},
},
{
name: "attestation.slot = current_slot-ATTESTATION_PROPAGATION_SLOT_RANGE in deneb",
args: args{
attSlot: 300 - params.BeaconNetworkConfig().AttestationPropagationSlotRange,
genesisTime: prysmTime.Now().Add(-300 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
},
},
{
name: "attestation.slot = current_slot-ATTESTATION_PROPAGATION_SLOT_RANGE, received 200ms late in deneb",
args: args{
attSlot: 300 - params.BeaconNetworkConfig().AttestationPropagationSlotRange,
genesisTime: prysmTime.Now().Add(
-300 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second,
).Add(200 * time.Millisecond),
},
},
{
name: "attestation.slot != current epoch or previous epoch in deneb",
args: args{
attSlot: 300 - params.BeaconNetworkConfig().AttestationPropagationSlotRange,
genesisTime: prysmTime.Now().Add(
-500 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second,
).Add(200 * time.Millisecond),
},
wantedErr: "attestation slot 268 not within current epoch 15 or previous epoch 14",
},
{
name: "attestation.slot is well beyond current slot",
args: args{

View File

@@ -295,58 +295,61 @@ func ShuffledIndices(s state.ReadOnlyBeaconState, epoch primitives.Epoch) ([]pri
}
// UpdateCommitteeCache gets called at the beginning of every epoch to cache the committee shuffled indices
// list with committee index and epoch number. It caches the shuffled indices for the input epoch.
func UpdateCommitteeCache(ctx context.Context, state state.ReadOnlyBeaconState, e primitives.Epoch) error {
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
return err
}
if committeeCache.HasEntry(string(seed[:])) {
return nil
}
shuffledIndices, err := ShuffledIndices(state, e)
if err != nil {
return err
// list with committee index and epoch number. It caches the shuffled indices for current epoch and next epoch.
func UpdateCommitteeCache(ctx context.Context, state state.ReadOnlyBeaconState, epoch primitives.Epoch) error {
for _, e := range []primitives.Epoch{epoch, epoch + 1} {
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
return err
}
if committeeCache.HasEntry(string(seed[:])) {
return nil
}
shuffledIndices, err := ShuffledIndices(state, e)
if err != nil {
return err
}
count := SlotCommitteeCount(uint64(len(shuffledIndices)))
// Store the sorted indices as well as shuffled indices. In current spec,
// sorted indices is required to retrieve proposer index. This is also
// used for failing verify signature fallback.
sortedIndices := make([]primitives.ValidatorIndex, len(shuffledIndices))
copy(sortedIndices, shuffledIndices)
sort.Slice(sortedIndices, func(i, j int) bool {
return sortedIndices[i] < sortedIndices[j]
})
if err := committeeCache.AddCommitteeShuffledList(ctx, &cache.Committees{
ShuffledIndices: shuffledIndices,
CommitteeCount: uint64(params.BeaconConfig().SlotsPerEpoch.Mul(count)),
Seed: seed,
SortedIndices: sortedIndices,
}); err != nil {
return err
}
}
count := SlotCommitteeCount(uint64(len(shuffledIndices)))
// Store the sorted indices as well as shuffled indices. In current spec,
// sorted indices is required to retrieve proposer index. This is also
// used for failing verify signature fallback.
sortedIndices := make([]primitives.ValidatorIndex, len(shuffledIndices))
copy(sortedIndices, shuffledIndices)
sort.Slice(sortedIndices, func(i, j int) bool {
return sortedIndices[i] < sortedIndices[j]
})
if err := committeeCache.AddCommitteeShuffledList(ctx, &cache.Committees{
ShuffledIndices: shuffledIndices,
CommitteeCount: uint64(params.BeaconConfig().SlotsPerEpoch.Mul(count)),
Seed: seed,
SortedIndices: sortedIndices,
}); err != nil {
return err
}
return nil
}
// UpdateProposerIndicesInCache updates proposer indices entry of the committee cache.
// Input state is used to retrieve active validator indices.
// Input epoch is the epoch to retrieve proposer indices for.
func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaconState, epoch primitives.Epoch) error {
func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaconState) error {
// The cache uses the state root at the (current epoch - 1)'s slot as key. (e.g. for epoch 2, the key is root at slot 63)
// Which is the reason why we skip genesis epoch.
if epoch <= params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
if time.CurrentEpoch(state) <= params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
return nil
}
// Use state root from (current_epoch - 1))
s, err := slots.EpochEnd(epoch - 1)
wantedEpoch := time.PrevEpoch(state)
s, err := slots.EpochEnd(wantedEpoch)
if err != nil {
return err
}
r, err := state.StateRootAtIndex(uint64(s % params.BeaconConfig().SlotsPerHistoricalRoot))
r, err := StateRootAtSlot(state, s)
if err != nil {
return err
}
@@ -363,11 +366,11 @@ func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaco
return nil
}
indices, err := ActiveValidatorIndices(ctx, state, epoch)
indices, err := ActiveValidatorIndices(ctx, state, time.CurrentEpoch(state))
if err != nil {
return err
}
proposerIndices, err := precomputeProposerIndices(state, indices, epoch)
proposerIndices, err := precomputeProposerIndices(state, indices)
if err != nil {
return err
}
@@ -429,10 +432,11 @@ func computeCommittee(
// This computes proposer indices of the current epoch and returns a list of proposer indices,
// the index of the list represents the slot number.
func precomputeProposerIndices(state state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex, e primitives.Epoch) ([]primitives.ValidatorIndex, error) {
func precomputeProposerIndices(state state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex) ([]primitives.ValidatorIndex, error) {
hashFunc := hash.CustomSHA256Hasher()
proposerIndices := make([]primitives.ValidatorIndex, params.BeaconConfig().SlotsPerEpoch)
e := time.CurrentEpoch(state)
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconProposer)
if err != nil {
return nil, errors.Wrap(err, "could not generate seed")

View File

@@ -413,7 +413,7 @@ func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
require.NoError(t, err)
require.NoError(t, UpdateCommitteeCache(context.Background(), state, time.CurrentEpoch(state)))
epoch := primitives.Epoch(0)
epoch := primitives.Epoch(1)
idx := primitives.CommitteeIndex(1)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
@@ -423,40 +423,6 @@ func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(indices)), "Did not save correct indices lengths")
}
func TestUpdateCommitteeCache_CanUpdateAcrossEpochs(t *testing.T) {
ClearCache()
defer ClearCache()
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
validators := make([]*ethpb.Validator, validatorCount)
indices := make([]primitives.ValidatorIndex, validatorCount)
for i := primitives.ValidatorIndex(0); uint64(i) < validatorCount; i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: 1,
}
indices[i] = i
}
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: validators,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
e := time.CurrentEpoch(state)
require.NoError(t, UpdateCommitteeCache(context.Background(), state, e))
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
require.Equal(t, true, committeeCache.HasEntry(string(seed[:])))
nextSeed, err := Seed(state, e+1, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
require.Equal(t, false, committeeCache.HasEntry(string(nextSeed[:])))
require.NoError(t, UpdateCommitteeCache(context.Background(), state, e+1))
require.Equal(t, true, committeeCache.HasEntry(string(nextSeed[:])))
}
func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
validators := make([]*ethpb.Validator, 300000)
for i := 0; i < len(validators); i++ {
@@ -673,7 +639,7 @@ func TestPrecomputeProposerIndices_Ok(t *testing.T) {
indices, err := ActiveValidatorIndices(context.Background(), state, 0)
require.NoError(t, err)
proposerIndices, err := precomputeProposerIndices(state, indices, time.CurrentEpoch(state))
proposerIndices, err := precomputeProposerIndices(state, indices)
require.NoError(t, err)
var wantedProposerIndices []primitives.ValidatorIndex

View File

@@ -184,7 +184,7 @@ func innerShuffleList(input []primitives.ValidatorIndex, seed [32]byte, shuffle
for {
buf[seedSize] = r
ph := hashfunc(buf[:pivotViewSize])
pivot := binary.LittleEndian.Uint64(ph[:8]) % listSize
pivot := bytesutil.FromBytes8(ph[:8]) % listSize
mirror := (pivot + 1) >> 1
binary.LittleEndian.PutUint32(buf[pivotViewSize:], uint32(pivot>>8))
source := hashfunc(buf)

View File

@@ -17,7 +17,6 @@ import (
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
log "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
var CommitteeCacheInProgressHit = promauto.NewCounter(prometheus.CounterOpts{
@@ -262,7 +261,7 @@ func BeaconProposerIndex(ctx context.Context, state state.ReadOnlyBeaconState) (
}
return proposerIndices[state.Slot()%params.BeaconConfig().SlotsPerEpoch], nil
}
if err := UpdateProposerIndicesInCache(ctx, state, time.CurrentEpoch(state)); err != nil {
if err := UpdateProposerIndicesInCache(ctx, state); err != nil {
return 0, errors.Wrap(err, "could not update committee cache")
}
}
@@ -397,22 +396,3 @@ func isEligibleForActivation(activationEligibilityEpoch, activationEpoch, finali
return activationEligibilityEpoch <= finalizedEpoch &&
activationEpoch == params.BeaconConfig().FarFutureEpoch
}
// LastActivatedValidatorIndex provides the last activated validator given a state
func LastActivatedValidatorIndex(ctx context.Context, st state.ReadOnlyBeaconState) (primitives.ValidatorIndex, error) {
_, span := trace.StartSpan(ctx, "helpers.LastActivatedValidatorIndex")
defer span.End()
var lastActivatedvalidatorIndex primitives.ValidatorIndex
// linear search because status are not sorted
for j := st.NumValidators() - 1; j >= 0; j-- {
val, err := st.ValidatorAtIndexReadOnly(primitives.ValidatorIndex(j))
if err != nil {
return 0, err
}
if IsActiveValidatorUsingTrie(val, time.CurrentEpoch(st)) {
lastActivatedvalidatorIndex = primitives.ValidatorIndex(j)
break
}
}
return lastActivatedvalidatorIndex, nil
}

View File

@@ -727,26 +727,3 @@ func computeProposerIndexWithValidators(validators []*ethpb.Validator, activeInd
}
}
}
func TestLastActivatedValidatorIndex_OK(t *testing.T) {
beaconState, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{})
require.NoError(t, err)
validators := make([]*ethpb.Validator, 4)
balances := make([]uint64, len(validators))
for i := uint64(0); i < 4; i++ {
validators[i] = &ethpb.Validator{
PublicKey: make([]byte, params.BeaconConfig().BLSPubkeyLength),
WithdrawalCredentials: make([]byte, 32),
EffectiveBalance: 32 * 1e9,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
balances[i] = validators[i].EffectiveBalance
}
require.NoError(t, beaconState.SetValidators(validators))
require.NoError(t, beaconState.SetBalances(balances))
index, err := LastActivatedValidatorIndex(context.Background(), beaconState)
require.NoError(t, err)
require.Equal(t, index, primitives.ValidatorIndex(3))
}

View File

@@ -16,7 +16,6 @@ go_library(
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
],

View File

@@ -11,7 +11,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
)
// ForkVersionByteLength length of fork version byte array.
@@ -57,18 +56,7 @@ const (
// ComputeDomainAndSign computes the domain and signing root and sign it using the passed in private key.
func ComputeDomainAndSign(st state.ReadOnlyBeaconState, epoch primitives.Epoch, obj fssz.HashRoot, domain [4]byte, key bls.SecretKey) ([]byte, error) {
fork := st.Fork()
// EIP-7044: Beginning in Deneb, fix the fork version to Capella for signed exits.
// This allows for signed validator exits to be valid forever.
if st.Version() >= version.Deneb && domain == params.BeaconConfig().DomainVoluntaryExit {
fork = &ethpb.Fork{
PreviousVersion: params.BeaconConfig().CapellaForkVersion,
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
Epoch: params.BeaconConfig().CapellaForkEpoch,
}
}
d, err := Domain(fork, epoch, domain, st.GenesisValidatorsRoot())
d, err := Domain(st.Fork(), epoch, domain, st.GenesisValidatorsRoot())
if err != nil {
return nil, err
}

Some files were not shown because too many files have changed in this diff Show More