mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 06:18:05 -05:00
Compare commits
1 Commits
mac-zig-fi
...
deadline_d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c65330edea |
2
.bazelrc
2
.bazelrc
@@ -15,7 +15,7 @@ coverage --define=coverage_enabled=1
|
||||
# Stamp binaries with git information
|
||||
build --workspace_status_command=./hack/workspace_status.sh
|
||||
|
||||
build --define blst_disabled=false
|
||||
build --define blst_disabled=false --define blst_modern=true
|
||||
run --define blst_disabled=false
|
||||
|
||||
build:blst_disabled --define blst_disabled=true
|
||||
|
||||
@@ -12,8 +12,7 @@
|
||||
#build:remote-cache --disk_cache=
|
||||
build:remote-cache --remote_download_toplevel
|
||||
build:remote-cache --remote_cache=grpc://bazel-remote-cache:9092
|
||||
# Does not work with rules_oci. See https://github.com/bazel-contrib/rules_oci/issues/292
|
||||
#build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
|
||||
build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
|
||||
build:remote-cache --remote_local_fallback
|
||||
build:remote-cache --experimental_remote_cache_async
|
||||
build:remote-cache --experimental_remote_merkle_tree_cache
|
||||
|
||||
8
.github/workflows/go.yml
vendored
8
.github/workflows/go.yml
vendored
@@ -75,14 +75,14 @@ jobs:
|
||||
- name: Build
|
||||
# Use blst tag to allow go and bazel builds for blst.
|
||||
run: go build -v ./...
|
||||
env:
|
||||
CGO_CFLAGS: "-O2 -D__BLST_PORTABLE__"
|
||||
env:
|
||||
CGO_CFLAGS: "-O -D__BLST_PORTABLE__"
|
||||
# fuzz leverage go tag based stubs at compile time.
|
||||
# Building and testing with these tags should be checked and enforced at pre-submit.
|
||||
- name: Test for fuzzing
|
||||
run: go test -tags=fuzz,develop ./... -test.run=^Fuzz
|
||||
env:
|
||||
CGO_CFLAGS: "-O2 -D__BLST_PORTABLE__"
|
||||
env:
|
||||
CGO_CFLAGS: "-O -D__BLST_PORTABLE__"
|
||||
|
||||
# Tests run via Bazel for now...
|
||||
# - name: Test
|
||||
|
||||
87
WORKSPACE
87
WORKSPACE
@@ -18,10 +18,10 @@ rules_pkg_dependencies()
|
||||
|
||||
http_archive(
|
||||
name = "hermetic_cc_toolchain",
|
||||
sha256 = "ff33e57a2de66f44b5daf7dff645a78eb7e3e320a5f0cda23f0ef49c222073c5",
|
||||
strip_prefix = "hermetic_cc_toolchain-3a0b2fe448d23195abeccae533eac798dc333b09",
|
||||
sha256 = "973ab22945b921ef45b8e1d6ce01ca7ce1b8a462167449a36e297438c4ec2755",
|
||||
strip_prefix = "hermetic_cc_toolchain-5098046bccc15d2962f3cc8e7e53d6a2a26072dc",
|
||||
urls = [
|
||||
"https://github.com/uber/hermetic_cc_toolchain/archive/3a0b2fe448d23195abeccae533eac798dc333b09.tar.gz", # 2023-10-16
|
||||
"https://github.com/uber/hermetic_cc_toolchain/archive/5098046bccc15d2962f3cc8e7e53d6a2a26072dc.tar.gz", # 2023-06-28
|
||||
],
|
||||
)
|
||||
|
||||
@@ -50,6 +50,8 @@ load("@prysm//tools/cross-toolchain:prysm_toolchains.bzl", "configure_prysm_tool
|
||||
|
||||
configure_prysm_toolchains()
|
||||
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "bazel_skylib",
|
||||
sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c",
|
||||
@@ -65,10 +67,10 @@ bazel_skylib_workspace()
|
||||
|
||||
http_archive(
|
||||
name = "bazel_gazelle",
|
||||
sha256 = "d3fa66a39028e97d76f9e2db8f1b0c11c099e8e01bf363a923074784e451f809",
|
||||
sha256 = "29d5dafc2a5582995488c6735115d1d366fcd6a0fc2e2a153f02988706349825",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.33.0/bazel-gazelle-v0.33.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.33.0/bazel-gazelle-v0.33.0.tar.gz",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.31.0/bazel-gazelle-v0.31.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.31.0/bazel-gazelle-v0.31.0.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -85,24 +87,6 @@ http_archive(
|
||||
urls = ["https://github.com/bazelbuild/rules_docker/releases/download/v0.25.0/rules_docker-v0.25.0.tar.gz"],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "rules_oci",
|
||||
sha256 = "c71c25ed333a4909d2dd77e0b16c39e9912525a98c7fa85144282be8d04ef54c",
|
||||
strip_prefix = "rules_oci-1.3.4",
|
||||
url = "https://github.com/bazel-contrib/rules_oci/releases/download/v1.3.4/rules_oci-v1.3.4.tar.gz",
|
||||
)
|
||||
|
||||
load("@rules_oci//oci:dependencies.bzl", "rules_oci_dependencies")
|
||||
|
||||
rules_oci_dependencies()
|
||||
|
||||
load("@rules_oci//oci:repositories.bzl", "LATEST_CRANE_VERSION", "oci_register_toolchains")
|
||||
|
||||
oci_register_toolchains(
|
||||
name = "oci",
|
||||
crane_version = LATEST_CRANE_VERSION,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_go",
|
||||
patch_args = ["-p1"],
|
||||
@@ -110,10 +94,10 @@ http_archive(
|
||||
# Expose internals of go_test for custom build transitions.
|
||||
"//third_party:io_bazel_rules_go_test.patch",
|
||||
],
|
||||
sha256 = "91585017debb61982f7054c9688857a2ad1fd823fc3f9cb05048b0025c47d023",
|
||||
sha256 = "bfc5ce70b9d1634ae54f4e7b495657a18a04e0d596785f672d35d5f505ab491a",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.42.0/rules_go-v0.42.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.42.0/rules_go-v0.42.0.zip",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.40.0/rules_go-v0.40.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.40.0/rules_go-v0.40.0.zip",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -183,30 +167,12 @@ container_pull(
|
||||
repository = "pinglamb/alpine-glibc",
|
||||
)
|
||||
|
||||
load("@rules_oci//oci:pull.bzl", "oci_pull")
|
||||
|
||||
# A multi-arch base image
|
||||
oci_pull(
|
||||
name = "linux_debian11_multiarch_base", # Debian bullseye
|
||||
digest = "sha256:9b8e0854865dcaf49470b4ec305df45957020fbcf17b71eeb50ffd3bc5bf885d", # 2023-05-17
|
||||
image = "gcr.io/distroless/cc-debian11",
|
||||
platforms = [
|
||||
"linux/amd64",
|
||||
"linux/arm64",
|
||||
],
|
||||
reproducible = True,
|
||||
)
|
||||
|
||||
load("@prysm//tools:image_deps.bzl", "prysm_image_deps")
|
||||
|
||||
prysm_image_deps()
|
||||
|
||||
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
|
||||
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.20.10",
|
||||
go_version = "1.20.7",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
@@ -247,7 +213,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.4.0-beta.3"
|
||||
consensus_spec_version = "v1.4.0-beta.1"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -263,7 +229,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "67ae5b8fc368853da23d4297e480a4b7f4722fb970d1c7e2b6a5b7faef9cb907",
|
||||
sha256 = "24399b60ce3fbeb2311952d213dc3731b6dcb0f8881b016c283de5b518d2bbba",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -279,7 +245,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "82474f29fff4abd09fb1e71bafa98827e2573cf0ad02cf119610961831dc3bb5",
|
||||
sha256 = "8e656ee48d2e2ebc9cf9baedb81f27925bc625b3e3fbb2883444a08758a5884a",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -295,7 +261,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "60e4b6eb6c341daab7ee5614a8e3f28567247c504c593b951bfe919622c8ef8f",
|
||||
sha256 = "8bd137da6cc57a25383bfac5bc37e31265098145278bd8002b88e24c8b4718b9",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -310,7 +276,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "fdab9756c93a250219ff6a10d5a9faee1e2e6878a14508410409e307362c6991",
|
||||
sha256 = "2bc1edb6e4a4f86c00317c04618a90b0ca29ee1eba833d0a64dd67fdd83fdbe3",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
@@ -371,23 +337,10 @@ http_archive(
|
||||
],
|
||||
)
|
||||
|
||||
# Group the sources of the library so that CMake rule have access to it
|
||||
all_content = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])"""
|
||||
|
||||
# External dependencies
|
||||
http_archive(
|
||||
name = "googleapis",
|
||||
sha256 = "9d1a930e767c93c825398b8f8692eca3fe353b9aaadedfbcf1fca2282c85df88",
|
||||
strip_prefix = "googleapis-64926d52febbf298cb82a8f472ade4a3969ba922",
|
||||
urls = [
|
||||
"https://github.com/googleapis/googleapis/archive/64926d52febbf298cb82a8f472ade4a3969ba922.zip",
|
||||
],
|
||||
)
|
||||
|
||||
load("@googleapis//:repository_rules.bzl", "switched_rules_by_language")
|
||||
|
||||
switched_rules_by_language(
|
||||
name = "com_google_googleapis_imports",
|
||||
go = True,
|
||||
)
|
||||
|
||||
load("//:deps.bzl", "prysm_deps")
|
||||
|
||||
# gazelle:repository_macro deps.bzl%prysm_deps
|
||||
|
||||
@@ -13,8 +13,6 @@ go_library(
|
||||
"//api/client:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/apimiddleware:go_default_library",
|
||||
"//beacon-chain/rpc/eth/beacon:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
|
||||
@@ -14,8 +14,6 @@ import (
|
||||
"text/template"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/beacon"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/forks"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
|
||||
@@ -150,14 +148,14 @@ func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fo
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting fork by state id = %s", stateId)
|
||||
}
|
||||
fr := &shared.Fork{}
|
||||
dataWrapper := &struct{ Data *shared.Fork }{Data: fr}
|
||||
fr := &forkResponse{}
|
||||
dataWrapper := &struct{ Data *forkResponse }{Data: fr}
|
||||
err = json.Unmarshal(body, dataWrapper)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error decoding json response in GetFork")
|
||||
}
|
||||
|
||||
return fr.ToConsensus()
|
||||
return fr.Fork()
|
||||
}
|
||||
|
||||
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
|
||||
@@ -285,7 +283,7 @@ func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData
|
||||
|
||||
// SubmitChangeBLStoExecution calls a beacon API endpoint to set the withdrawal addresses based on the given signed messages.
|
||||
// If the API responds with something other than OK there will be failure messages associated to the corresponding request message.
|
||||
func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*shared.SignedBLSToExecutionChange) error {
|
||||
func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apimiddleware.SignedBLSToExecutionChangeJson) error {
|
||||
u := c.BaseURL().ResolveReference(&url.URL{Path: changeBLStoExecutionPath})
|
||||
body, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
@@ -324,12 +322,12 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*shar
|
||||
|
||||
// GetBLStoExecutionChanges gets all the set withdrawal messages in the node's operation pool.
|
||||
// Returns a struct representation of json response.
|
||||
func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*beacon.BLSToExecutionChangesPoolResponse, error) {
|
||||
func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*apimiddleware.BLSToExecutionChangesPoolResponseJson, error) {
|
||||
body, err := c.Get(ctx, changeBLStoExecutionPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
poolResponse := &beacon.BLSToExecutionChangesPoolResponse{}
|
||||
poolResponse := &apimiddleware.BLSToExecutionChangesPoolResponseJson{}
|
||||
err = json.Unmarshal(body, poolResponse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -337,8 +335,40 @@ func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*beacon.BLSToExe
|
||||
return poolResponse, nil
|
||||
}
|
||||
|
||||
type forkResponse struct {
|
||||
PreviousVersion string `json:"previous_version"`
|
||||
CurrentVersion string `json:"current_version"`
|
||||
Epoch string `json:"epoch"`
|
||||
}
|
||||
|
||||
func (f *forkResponse) Fork() (*ethpb.Fork, error) {
|
||||
epoch, err := strconv.ParseUint(f.Epoch, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cSlice, err := hexutil.Decode(f.CurrentVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(cSlice) != 4 {
|
||||
return nil, fmt.Errorf("got %d byte version for CurrentVersion, expected 4 bytes. hex=%s", len(cSlice), f.CurrentVersion)
|
||||
}
|
||||
pSlice, err := hexutil.Decode(f.PreviousVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(pSlice) != 4 {
|
||||
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(pSlice), f.PreviousVersion)
|
||||
}
|
||||
return ðpb.Fork{
|
||||
CurrentVersion: cSlice,
|
||||
PreviousVersion: pSlice,
|
||||
Epoch: primitives.Epoch(epoch),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type forkScheduleResponse struct {
|
||||
Data []shared.Fork
|
||||
Data []forkResponse
|
||||
}
|
||||
|
||||
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
|
||||
|
||||
@@ -54,6 +54,7 @@ go_test(
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_golang_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v4/network"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/authorization"
|
||||
@@ -270,13 +269,9 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
vs := make([]*shared.SignedValidatorRegistration, len(svr))
|
||||
vs := make([]*SignedValidatorRegistration, len(svr))
|
||||
for i := 0; i < len(svr); i++ {
|
||||
svrJson, err := shared.SignedValidatorRegistrationFromConsensus(svr[i])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("failed to encode to SignedValidatorRegistration at index %d", i))
|
||||
}
|
||||
vs[i] = svrJson
|
||||
vs[i] = &SignedValidatorRegistration{SignedValidatorRegistrationV1: svr[i]}
|
||||
}
|
||||
body, err := json.Marshal(vs)
|
||||
if err != nil {
|
||||
@@ -301,14 +296,12 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
|
||||
}
|
||||
b, err := shared.SignedBlindedBeaconBlockBellatrixFromConsensus(ðpb.SignedBlindedBeaconBlockBellatrix{Block: psb.Block, Signature: bytesutil.SafeCopyBytes(psb.Signature)})
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not convert SignedBlindedBeaconBlockBellatrix to json marshalable type")
|
||||
}
|
||||
b := &SignedBlindedBeaconBlockBellatrix{SignedBlindedBeaconBlockBellatrix: psb}
|
||||
body, err := json.Marshal(b)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockBellatrix value body in SubmitBlindedBlock")
|
||||
}
|
||||
|
||||
versionOpt := func(r *http.Request) {
|
||||
r.Header.Add("Eth-Consensus-Version", version.String(version.Bellatrix))
|
||||
}
|
||||
@@ -338,14 +331,12 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
|
||||
}
|
||||
b, err := shared.SignedBlindedBeaconBlockCapellaFromConsensus(ðpb.SignedBlindedBeaconBlockCapella{Block: psb.Block, Signature: bytesutil.SafeCopyBytes(psb.Signature)})
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not convert SignedBlindedBeaconBlockCapella to json marshalable type")
|
||||
}
|
||||
b := &SignedBlindedBeaconBlockCapella{SignedBlindedBeaconBlockCapella: psb}
|
||||
body, err := json.Marshal(b)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockCapella value body in SubmitBlindedBlockCapella")
|
||||
}
|
||||
|
||||
versionOpt := func(r *http.Request) {
|
||||
r.Header.Add("Eth-Consensus-Version", version.String(version.Capella))
|
||||
}
|
||||
|
||||
@@ -876,7 +876,7 @@ func testSignedBlindedBeaconBlockAndBlobsDeneb(t *testing.T) *eth.SignedBlindedB
|
||||
},
|
||||
SyncAggregate: ð.SyncAggregate{
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
SyncCommitteeBits: ezDecode(t, "0x6451e9f951ebf05edc01de67e593484b672877054f055903ff0df1a1a945cf30ca26bb4d4b154f94a1bc776bcf5d0efb3603e1f9b8ee2499ccdcfe2a18cef458"),
|
||||
SyncCommitteeBits: bitfield.Bitvector512(ezDecode(t, "0x6451e9f951ebf05edc01de67e593484b672877054f055903ff0df1a1a945cf30ca26bb4d4b154f94a1bc776bcf5d0efb3603e1f9b8ee2499ccdcfe2a18cef458")),
|
||||
},
|
||||
ExecutionPayloadHeader: &v1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -16,6 +16,87 @@ import (
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// SignedValidatorRegistration a struct for signed validator registrations.
|
||||
type SignedValidatorRegistration struct {
|
||||
*eth.SignedValidatorRegistrationV1
|
||||
}
|
||||
|
||||
// ValidatorRegistration a struct for validator registrations.
|
||||
type ValidatorRegistration struct {
|
||||
*eth.ValidatorRegistrationV1
|
||||
}
|
||||
|
||||
// MarshalJSON returns a json representation copy of signed validator registration.
|
||||
func (r *SignedValidatorRegistration) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Message *ValidatorRegistration `json:"message"`
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
}{
|
||||
Message: &ValidatorRegistration{r.Message},
|
||||
Signature: r.SignedValidatorRegistrationV1.Signature,
|
||||
})
|
||||
}
|
||||
|
||||
// UnmarshalJSON returns a byte representation of signed validator registration from json.
|
||||
func (r *SignedValidatorRegistration) UnmarshalJSON(b []byte) error {
|
||||
if r.SignedValidatorRegistrationV1 == nil {
|
||||
r.SignedValidatorRegistrationV1 = ð.SignedValidatorRegistrationV1{}
|
||||
}
|
||||
o := struct {
|
||||
Message *ValidatorRegistration `json:"message"`
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
}{}
|
||||
if err := json.Unmarshal(b, &o); err != nil {
|
||||
return err
|
||||
}
|
||||
r.Message = o.Message.ValidatorRegistrationV1
|
||||
r.Signature = o.Signature
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON returns a json representation copy of validator registration.
|
||||
func (r *ValidatorRegistration) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
Pubkey hexutil.Bytes `json:"pubkey"`
|
||||
}{
|
||||
FeeRecipient: r.FeeRecipient,
|
||||
GasLimit: fmt.Sprintf("%d", r.GasLimit),
|
||||
Timestamp: fmt.Sprintf("%d", r.Timestamp),
|
||||
Pubkey: r.Pubkey,
|
||||
})
|
||||
}
|
||||
|
||||
// UnmarshalJSON returns a byte representation of validator registration from json.
|
||||
func (r *ValidatorRegistration) UnmarshalJSON(b []byte) error {
|
||||
if r.ValidatorRegistrationV1 == nil {
|
||||
r.ValidatorRegistrationV1 = ð.ValidatorRegistrationV1{}
|
||||
}
|
||||
o := struct {
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
Pubkey hexutil.Bytes `json:"pubkey"`
|
||||
}{}
|
||||
if err := json.Unmarshal(b, &o); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.FeeRecipient = o.FeeRecipient
|
||||
r.Pubkey = o.Pubkey
|
||||
var err error
|
||||
if r.GasLimit, err = strconv.ParseUint(o.GasLimit, 10, 64); err != nil {
|
||||
return errors.Wrap(err, "failed to parse gas limit")
|
||||
}
|
||||
if r.Timestamp, err = strconv.ParseUint(o.Timestamp, 10, 64); err != nil {
|
||||
return errors.Wrap(err, "failed to parse timestamp")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var errInvalidUint256 = errors.New("invalid Uint256")
|
||||
var errDecodeUint256 = errors.New("unable to decode into Uint256")
|
||||
|
||||
@@ -555,6 +636,44 @@ type SignedBlindedBeaconBlockBellatrix struct {
|
||||
*eth.SignedBlindedBeaconBlockBellatrix
|
||||
}
|
||||
|
||||
// BlindedBeaconBlockBellatrix is a field in SignedBlindedBeaconBlockBellatrix.
|
||||
type BlindedBeaconBlockBellatrix struct {
|
||||
*eth.BlindedBeaconBlockBellatrix
|
||||
}
|
||||
|
||||
// BlindedBeaconBlockBodyBellatrix is a field in BlindedBeaconBlockBellatrix.
|
||||
type BlindedBeaconBlockBodyBellatrix struct {
|
||||
*eth.BlindedBeaconBlockBodyBellatrix
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of SignedBlindedBeaconBlockBellatrix.
|
||||
func (r *SignedBlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Message *BlindedBeaconBlockBellatrix `json:"message"`
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
}{
|
||||
Message: &BlindedBeaconBlockBellatrix{r.SignedBlindedBeaconBlockBellatrix.Block},
|
||||
Signature: r.SignedBlindedBeaconBlockBellatrix.Signature,
|
||||
})
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of BlindedBeaconBlockBellatrix.
|
||||
func (b *BlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot hexutil.Bytes `json:"parent_root"`
|
||||
StateRoot hexutil.Bytes `json:"state_root"`
|
||||
Body *BlindedBeaconBlockBodyBellatrix `json:"body"`
|
||||
}{
|
||||
Slot: fmt.Sprintf("%d", b.Slot),
|
||||
ProposerIndex: fmt.Sprintf("%d", b.ProposerIndex),
|
||||
ParentRoot: b.ParentRoot,
|
||||
StateRoot: b.StateRoot,
|
||||
Body: &BlindedBeaconBlockBodyBellatrix{b.BlindedBeaconBlockBellatrix.Body},
|
||||
})
|
||||
}
|
||||
|
||||
// ProposerSlashing is a field in BlindedBeaconBlockBodyCapella.
|
||||
type ProposerSlashing struct {
|
||||
*eth.ProposerSlashing
|
||||
@@ -809,6 +928,53 @@ func (e *Eth1Data) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of BlindedBeaconBlockBodyBellatrix.
|
||||
func (b *BlindedBeaconBlockBodyBellatrix) MarshalJSON() ([]byte, error) {
|
||||
sve := make([]*SignedVoluntaryExit, len(b.BlindedBeaconBlockBodyBellatrix.VoluntaryExits))
|
||||
for i := range b.BlindedBeaconBlockBodyBellatrix.VoluntaryExits {
|
||||
sve[i] = &SignedVoluntaryExit{SignedVoluntaryExit: b.BlindedBeaconBlockBodyBellatrix.VoluntaryExits[i]}
|
||||
}
|
||||
deps := make([]*Deposit, len(b.BlindedBeaconBlockBodyBellatrix.Deposits))
|
||||
for i := range b.BlindedBeaconBlockBodyBellatrix.Deposits {
|
||||
deps[i] = &Deposit{Deposit: b.BlindedBeaconBlockBodyBellatrix.Deposits[i]}
|
||||
}
|
||||
atts := make([]*Attestation, len(b.BlindedBeaconBlockBodyBellatrix.Attestations))
|
||||
for i := range b.BlindedBeaconBlockBodyBellatrix.Attestations {
|
||||
atts[i] = &Attestation{Attestation: b.BlindedBeaconBlockBodyBellatrix.Attestations[i]}
|
||||
}
|
||||
atsl := make([]*AttesterSlashing, len(b.BlindedBeaconBlockBodyBellatrix.AttesterSlashings))
|
||||
for i := range b.BlindedBeaconBlockBodyBellatrix.AttesterSlashings {
|
||||
atsl[i] = &AttesterSlashing{AttesterSlashing: b.BlindedBeaconBlockBodyBellatrix.AttesterSlashings[i]}
|
||||
}
|
||||
pros := make([]*ProposerSlashing, len(b.BlindedBeaconBlockBodyBellatrix.ProposerSlashings))
|
||||
for i := range b.BlindedBeaconBlockBodyBellatrix.ProposerSlashings {
|
||||
pros[i] = &ProposerSlashing{ProposerSlashing: b.BlindedBeaconBlockBodyBellatrix.ProposerSlashings[i]}
|
||||
}
|
||||
return json.Marshal(struct {
|
||||
RandaoReveal hexutil.Bytes `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Graffiti hexutil.Bytes `json:"graffiti"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashing `json:"attester_slashings"`
|
||||
Attestations []*Attestation `json:"attestations"`
|
||||
Deposits []*Deposit `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeader `json:"execution_payload_header"`
|
||||
}{
|
||||
RandaoReveal: b.RandaoReveal,
|
||||
Eth1Data: &Eth1Data{b.BlindedBeaconBlockBodyBellatrix.Eth1Data},
|
||||
Graffiti: b.BlindedBeaconBlockBodyBellatrix.Graffiti,
|
||||
ProposerSlashings: pros,
|
||||
AttesterSlashings: atsl,
|
||||
Attestations: atts,
|
||||
Deposits: deps,
|
||||
VoluntaryExits: sve,
|
||||
SyncAggregate: &SyncAggregate{b.BlindedBeaconBlockBodyBellatrix.SyncAggregate},
|
||||
ExecutionPayloadHeader: &ExecutionPayloadHeader{ExecutionPayloadHeader: b.BlindedBeaconBlockBodyBellatrix.ExecutionPayloadHeader},
|
||||
})
|
||||
}
|
||||
|
||||
// SignedBLSToExecutionChange is a field in Beacon Block Body for capella and above.
|
||||
type SignedBLSToExecutionChange struct {
|
||||
*eth.SignedBLSToExecutionChange
|
||||
@@ -843,6 +1009,102 @@ func (ch *BLSToExecutionChange) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// SignedBlindedBeaconBlockCapella is part of the request object sent to builder API /eth/v1/builder/blinded_blocks for Capella.
|
||||
type SignedBlindedBeaconBlockCapella struct {
|
||||
*eth.SignedBlindedBeaconBlockCapella
|
||||
}
|
||||
|
||||
// BlindedBeaconBlockCapella is a field in SignedBlindedBeaconBlockCapella.
|
||||
type BlindedBeaconBlockCapella struct {
|
||||
*eth.BlindedBeaconBlockCapella
|
||||
}
|
||||
|
||||
// BlindedBeaconBlockBodyCapella is a field in BlindedBeaconBlockCapella.
|
||||
type BlindedBeaconBlockBodyCapella struct {
|
||||
*eth.BlindedBeaconBlockBodyCapella
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of SignedBlindedBeaconBlockCapella.
|
||||
func (b *SignedBlindedBeaconBlockCapella) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Message *BlindedBeaconBlockCapella `json:"message"`
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
}{
|
||||
Message: &BlindedBeaconBlockCapella{b.Block},
|
||||
Signature: b.Signature,
|
||||
})
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of BlindedBeaconBlockCapella
|
||||
func (b *BlindedBeaconBlockCapella) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot hexutil.Bytes `json:"parent_root"`
|
||||
StateRoot hexutil.Bytes `json:"state_root"`
|
||||
Body *BlindedBeaconBlockBodyCapella `json:"body"`
|
||||
}{
|
||||
Slot: fmt.Sprintf("%d", b.Slot),
|
||||
ProposerIndex: fmt.Sprintf("%d", b.ProposerIndex),
|
||||
ParentRoot: b.ParentRoot,
|
||||
StateRoot: b.StateRoot,
|
||||
Body: &BlindedBeaconBlockBodyCapella{b.Body},
|
||||
})
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of BlindedBeaconBlockBodyCapella
|
||||
func (b *BlindedBeaconBlockBodyCapella) MarshalJSON() ([]byte, error) {
|
||||
sve := make([]*SignedVoluntaryExit, len(b.VoluntaryExits))
|
||||
for i := range b.VoluntaryExits {
|
||||
sve[i] = &SignedVoluntaryExit{SignedVoluntaryExit: b.VoluntaryExits[i]}
|
||||
}
|
||||
deps := make([]*Deposit, len(b.Deposits))
|
||||
for i := range b.Deposits {
|
||||
deps[i] = &Deposit{Deposit: b.Deposits[i]}
|
||||
}
|
||||
atts := make([]*Attestation, len(b.Attestations))
|
||||
for i := range b.Attestations {
|
||||
atts[i] = &Attestation{Attestation: b.Attestations[i]}
|
||||
}
|
||||
atsl := make([]*AttesterSlashing, len(b.AttesterSlashings))
|
||||
for i := range b.AttesterSlashings {
|
||||
atsl[i] = &AttesterSlashing{AttesterSlashing: b.AttesterSlashings[i]}
|
||||
}
|
||||
pros := make([]*ProposerSlashing, len(b.ProposerSlashings))
|
||||
for i := range b.ProposerSlashings {
|
||||
pros[i] = &ProposerSlashing{ProposerSlashing: b.ProposerSlashings[i]}
|
||||
}
|
||||
chs := make([]*SignedBLSToExecutionChange, len(b.BlsToExecutionChanges))
|
||||
for i := range b.BlsToExecutionChanges {
|
||||
chs[i] = &SignedBLSToExecutionChange{SignedBLSToExecutionChange: b.BlsToExecutionChanges[i]}
|
||||
}
|
||||
return json.Marshal(struct {
|
||||
RandaoReveal hexutil.Bytes `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Graffiti hexutil.Bytes `json:"graffiti"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashing `json:"attester_slashings"`
|
||||
Attestations []*Attestation `json:"attestations"`
|
||||
Deposits []*Deposit `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeaderCapella `json:"execution_payload_header"`
|
||||
}{
|
||||
RandaoReveal: b.RandaoReveal,
|
||||
Eth1Data: &Eth1Data{b.Eth1Data},
|
||||
Graffiti: b.Graffiti,
|
||||
ProposerSlashings: pros,
|
||||
AttesterSlashings: atsl,
|
||||
Attestations: atts,
|
||||
Deposits: deps,
|
||||
VoluntaryExits: sve,
|
||||
BLSToExecutionChanges: chs,
|
||||
SyncAggregate: &SyncAggregate{b.SyncAggregate},
|
||||
ExecutionPayloadHeader: &ExecutionPayloadHeaderCapella{ExecutionPayloadHeaderCapella: b.ExecutionPayloadHeader},
|
||||
})
|
||||
}
|
||||
|
||||
// ExecHeaderResponseDeneb is the header response for builder API /eth/v1/builder/header/{slot}/{parent_hash}/{pubkey}.
|
||||
type ExecHeaderResponseDeneb struct {
|
||||
Data struct {
|
||||
|
||||
@@ -12,8 +12,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/math"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
@@ -38,8 +38,7 @@ func TestSignedValidatorRegistration_MarshalJSON(t *testing.T) {
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
a, err := shared.SignedValidatorRegistrationFromConsensus(svr)
|
||||
require.NoError(t, err)
|
||||
a := &SignedValidatorRegistration{SignedValidatorRegistrationV1: svr}
|
||||
je, err := json.Marshal(a)
|
||||
require.NoError(t, err)
|
||||
// decode with a struct w/ plain strings so we can check the string encoding of the hex fields
|
||||
@@ -56,11 +55,11 @@ func TestSignedValidatorRegistration_MarshalJSON(t *testing.T) {
|
||||
require.Equal(t, "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", un.Message.Pubkey)
|
||||
|
||||
t.Run("roundtrip", func(t *testing.T) {
|
||||
b := &shared.SignedValidatorRegistration{}
|
||||
b := &SignedValidatorRegistration{}
|
||||
if err := json.Unmarshal(je, b); err != nil {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.DeepEqual(t, a, b)
|
||||
require.Equal(t, proto.Equal(a.SignedValidatorRegistrationV1, b.SignedValidatorRegistrationV1), true)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1530,7 +1529,7 @@ func TestUint256UnmarshalTooBig(t *testing.T) {
|
||||
func TestMarshalBlindedBeaconBlockBodyBellatrix(t *testing.T) {
|
||||
expected, err := os.ReadFile("testdata/blinded-block.json")
|
||||
require.NoError(t, err)
|
||||
b, err := shared.BlindedBeaconBlockBellatrixFromConsensus(ð.BlindedBeaconBlockBellatrix{
|
||||
b := &BlindedBeaconBlockBellatrix{BlindedBeaconBlockBellatrix: ð.BlindedBeaconBlockBellatrix{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
@@ -1547,8 +1546,7 @@ func TestMarshalBlindedBeaconBlockBodyBellatrix(t *testing.T) {
|
||||
SyncAggregate: pbSyncAggregate(),
|
||||
ExecutionPayloadHeader: pbExecutionPayloadHeader(t),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}}
|
||||
m, err := json.Marshal(b)
|
||||
require.NoError(t, err)
|
||||
// string error output is easier to deal with
|
||||
@@ -1560,7 +1558,7 @@ func TestMarshalBlindedBeaconBlockBodyBellatrix(t *testing.T) {
|
||||
func TestMarshalBlindedBeaconBlockBodyCapella(t *testing.T) {
|
||||
expected, err := os.ReadFile("testdata/blinded-block-capella.json")
|
||||
require.NoError(t, err)
|
||||
b, err := shared.BlindedBeaconBlockCapellaFromConsensus(ð.BlindedBeaconBlockCapella{
|
||||
b := &BlindedBeaconBlockCapella{BlindedBeaconBlockCapella: ð.BlindedBeaconBlockCapella{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
@@ -1577,8 +1575,7 @@ func TestMarshalBlindedBeaconBlockBodyCapella(t *testing.T) {
|
||||
SyncAggregate: pbSyncAggregate(),
|
||||
ExecutionPayloadHeader: pbExecutionPayloadHeaderCapella(t),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}}
|
||||
m, err := json.Marshal(b)
|
||||
require.NoError(t, err)
|
||||
// string error output is easier to deal with
|
||||
|
||||
@@ -7,7 +7,6 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/client:go_default_library",
|
||||
"//validator/rpc:go_default_library",
|
||||
"//validator/rpc/apimiddleware:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v4/validator/rpc"
|
||||
"github.com/prysmaticlabs/prysm/v4/validator/rpc/apimiddleware"
|
||||
)
|
||||
|
||||
@@ -42,7 +41,7 @@ func (c *Client) GetValidatorPubKeys(ctx context.Context) ([]string, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(jsonlocal.Keystores) == 0 && len(jsonremote.Data) == 0 {
|
||||
if len(jsonlocal.Keystores) == 0 && len(jsonremote.Keystores) == 0 {
|
||||
return nil, errors.New("there are no local keys or remote keys on the validator")
|
||||
}
|
||||
|
||||
@@ -51,8 +50,8 @@ func (c *Client) GetValidatorPubKeys(ctx context.Context) ([]string, error) {
|
||||
for index := range jsonlocal.Keystores {
|
||||
hexKeys[jsonlocal.Keystores[index].ValidatingPubkey] = true
|
||||
}
|
||||
for index := range jsonremote.Data {
|
||||
hexKeys[jsonremote.Data[index].Pubkey] = true
|
||||
for index := range jsonremote.Keystores {
|
||||
hexKeys[jsonremote.Keystores[index].Pubkey] = true
|
||||
}
|
||||
keys := make([]string, 0)
|
||||
for k := range hexKeys {
|
||||
@@ -75,14 +74,14 @@ func (c *Client) GetLocalValidatorKeys(ctx context.Context) (*apimiddleware.List
|
||||
}
|
||||
|
||||
// GetRemoteValidatorKeys calls the keymanager APIs for web3signer validator keys
|
||||
func (c *Client) GetRemoteValidatorKeys(ctx context.Context) (*rpc.ListRemoteKeysResponse, error) {
|
||||
func (c *Client) GetRemoteValidatorKeys(ctx context.Context) (*apimiddleware.ListRemoteKeysResponseJson, error) {
|
||||
remoteBytes, err := c.Get(ctx, remoteKeysPath, client.WithAuthorizationToken(c.Token()))
|
||||
if err != nil {
|
||||
if !strings.Contains(err.Error(), "Prysm Wallet is not of type Web3Signer") {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
jsonremote := &rpc.ListRemoteKeysResponse{}
|
||||
jsonremote := &apimiddleware.ListRemoteKeysResponseJson{}
|
||||
if len(remoteBytes) != 0 {
|
||||
if err := json.Unmarshal(remoteBytes, jsonremote); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse remote keystore list")
|
||||
@@ -108,13 +107,13 @@ func (c *Client) GetFeeRecipientAddresses(ctx context.Context, validators []stri
|
||||
}
|
||||
|
||||
// GetFeeRecipientAddress takes a public key and calls the keymanager API to return its fee recipient.
|
||||
func (c *Client) GetFeeRecipientAddress(ctx context.Context, pubkey string) (*rpc.GetFeeRecipientByPubkeyResponse, error) {
|
||||
func (c *Client) GetFeeRecipientAddress(ctx context.Context, pubkey string) (*apimiddleware.GetFeeRecipientByPubkeyResponseJson, error) {
|
||||
path := strings.Replace(feeRecipientPath, "{pubkey}", pubkey, 1)
|
||||
b, err := c.Get(ctx, path, client.WithAuthorizationToken(c.Token()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
feejson := &rpc.GetFeeRecipientByPubkeyResponse{}
|
||||
feejson := &apimiddleware.GetFeeRecipientByPubkeyResponseJson{}
|
||||
if err := json.Unmarshal(b, feejson); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse fee recipient")
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ const (
|
||||
VersionHeader = "Eth-Consensus-Version"
|
||||
ExecutionPayloadBlindedHeader = "Eth-Execution-Payload-Blinded"
|
||||
ExecutionPayloadValueHeader = "Eth-Execution-Payload-Value"
|
||||
ConsensusBlockValueHeader = "Eth-Consensus-Block-Value"
|
||||
JsonMediaType = "application/json"
|
||||
OctetStreamMediaType = "application/octet-stream"
|
||||
)
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"middleware.go",
|
||||
"util.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/api/server",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"middleware_test.go",
|
||||
"util_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,15 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func NormalizeQueryValuesHandler(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
query := r.URL.Query()
|
||||
NormalizeQueryValues(query)
|
||||
r.URL.RawQuery = query.Encode()
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestNormalizeQueryValuesHandler(t *testing.T) {
|
||||
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, err := w.Write([]byte("next handler"))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
handler := NormalizeQueryValuesHandler(nextHandler)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
inputQuery string
|
||||
expectedQuery string
|
||||
}{
|
||||
{
|
||||
name: "3 values",
|
||||
inputQuery: "key=value1,value2,value3",
|
||||
expectedQuery: "key=value1&key=value2&key=value3", // replace with expected normalized value
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
req, err := http.NewRequest("GET", "/test?"+test.inputQuery, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusOK {
|
||||
t.Errorf("handler returned wrong status code: got %v want %v", rr.Code, http.StatusOK)
|
||||
}
|
||||
|
||||
if req.URL.RawQuery != test.expectedQuery {
|
||||
t.Errorf("query not normalized: got %v want %v", req.URL.RawQuery, test.expectedQuery)
|
||||
}
|
||||
|
||||
if rr.Body.String() != "next handler" {
|
||||
t.Errorf("next handler was not executed")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
19
beacon-chain/BUILD.bazel
Normal file
19
beacon-chain/BUILD.bazel
Normal file
@@ -0,0 +1,19 @@
|
||||
load("//tools:target_migration.bzl", "moved_targets")
|
||||
|
||||
moved_targets(
|
||||
[
|
||||
":push_images_debug",
|
||||
":push_images_alpine",
|
||||
":push_images",
|
||||
":image_bundle_debug",
|
||||
":image_debug",
|
||||
":image_bundle_alpine",
|
||||
":image_bundle",
|
||||
":image_with_creation_time",
|
||||
":image_alpine",
|
||||
":image",
|
||||
":go_default_test",
|
||||
":beacon-chain",
|
||||
],
|
||||
"//cmd/beacon-chain",
|
||||
)
|
||||
@@ -12,7 +12,6 @@ go_library(
|
||||
"head.go",
|
||||
"head_sync_committee_info.go",
|
||||
"init_sync_process_block.go",
|
||||
"lightclient.go",
|
||||
"log.go",
|
||||
"merge_ascii_art.go",
|
||||
"metrics.go",
|
||||
@@ -78,8 +77,6 @@ go_library(
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
|
||||
@@ -70,8 +70,11 @@ func IsInvalidBlock(e error) bool {
|
||||
if e == nil {
|
||||
return false
|
||||
}
|
||||
var d invalidBlockError
|
||||
return errors.As(e, &d)
|
||||
_, ok := e.(invalidBlockError)
|
||||
if !ok {
|
||||
return IsInvalidBlock(errors.Unwrap(e))
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// InvalidBlockLVH returns the invalid block last valid hash root. If the error
|
||||
@@ -80,8 +83,7 @@ func InvalidBlockLVH(e error) [32]byte {
|
||||
if e == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
var d invalidBlockError
|
||||
ok := errors.As(e, &d)
|
||||
d, ok := e.(invalidBlockError)
|
||||
if !ok {
|
||||
return [32]byte{}
|
||||
}
|
||||
@@ -94,8 +96,7 @@ func InvalidBlockRoot(e error) [32]byte {
|
||||
if e == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
var d invalidBlockError
|
||||
ok := errors.As(e, &d)
|
||||
d, ok := e.(invalidBlockError)
|
||||
if !ok {
|
||||
return [32]byte{}
|
||||
}
|
||||
@@ -107,8 +108,7 @@ func InvalidAncestorRoots(e error) [][32]byte {
|
||||
if e == nil {
|
||||
return [][32]byte{}
|
||||
}
|
||||
var d invalidBlockError
|
||||
ok := errors.As(e, &d)
|
||||
d, ok := e.(invalidBlockError)
|
||||
if !ok {
|
||||
return [][32]byte{}
|
||||
}
|
||||
|
||||
@@ -24,9 +24,6 @@ func TestInvalidBlockRoot(t *testing.T) {
|
||||
err := invalidBlock{error: ErrInvalidPayload, root: [32]byte{'a'}}
|
||||
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(err))
|
||||
require.DeepEqual(t, [][32]byte(nil), InvalidAncestorRoots(err))
|
||||
|
||||
newErr := errors.Wrap(err, "wrap me")
|
||||
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(newErr))
|
||||
}
|
||||
|
||||
func TestInvalidRoots(t *testing.T) {
|
||||
@@ -36,9 +33,4 @@ func TestInvalidRoots(t *testing.T) {
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(err))
|
||||
require.DeepEqual(t, roots, InvalidAncestorRoots(err))
|
||||
|
||||
newErr := errors.Wrap(err, "wrap me")
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(newErr))
|
||||
require.DeepEqual(t, roots, InvalidAncestorRoots(newErr))
|
||||
}
|
||||
|
||||
@@ -157,11 +157,6 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
if hasAttr && payloadID != nil {
|
||||
var pId [8]byte
|
||||
copy(pId[:], payloadID[:])
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(arg.headRoot[:])),
|
||||
"headSlot": headBlk.Slot(),
|
||||
"payloadID": fmt.Sprintf("%#x", bytesutil.Trunc(payloadID[:])),
|
||||
}).Info("Forkchoice updated with payload attributes for proposal")
|
||||
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nextSlot, proposerId, pId, arg.headRoot)
|
||||
} else if hasAttr && payloadID == nil && !features.Get().PrepareAllPayloads {
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -395,7 +390,7 @@ func (s *Service) removeInvalidBlockAndState(ctx context.Context, blkRoots [][32
|
||||
return err
|
||||
}
|
||||
// No op if the sidecar does not exist.
|
||||
if err := s.cfg.BeaconDB.DeleteBlobSidecars(ctx, root); err != nil {
|
||||
if err := s.cfg.BeaconDB.DeleteBlobSidecar(ctx, root); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,245 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
const (
|
||||
finalityBranchNumOfLeaves = 6
|
||||
)
|
||||
|
||||
// CreateLightClientFinalityUpdate - implements https://github.com/ethereum/consensus-specs/blob/3d235740e5f1e641d3b160c8688f26e7dc5a1894/specs/altair/light-client/full-node.md#create_light_client_finality_update
|
||||
// def create_light_client_finality_update(update: LightClientUpdate) -> LightClientFinalityUpdate:
|
||||
//
|
||||
// return LightClientFinalityUpdate(
|
||||
// attested_header=update.attested_header,
|
||||
// finalized_header=update.finalized_header,
|
||||
// finality_branch=update.finality_branch,
|
||||
// sync_aggregate=update.sync_aggregate,
|
||||
// signature_slot=update.signature_slot,
|
||||
// )
|
||||
func CreateLightClientFinalityUpdate(update *ethpbv2.LightClientUpdate) *ethpbv2.LightClientFinalityUpdate {
|
||||
return ðpbv2.LightClientFinalityUpdate{
|
||||
AttestedHeader: update.AttestedHeader,
|
||||
FinalizedHeader: update.FinalizedHeader,
|
||||
FinalityBranch: update.FinalityBranch,
|
||||
SyncAggregate: update.SyncAggregate,
|
||||
SignatureSlot: update.SignatureSlot,
|
||||
}
|
||||
}
|
||||
|
||||
// CreateLightClientOptimisticUpdate - implements https://github.com/ethereum/consensus-specs/blob/3d235740e5f1e641d3b160c8688f26e7dc5a1894/specs/altair/light-client/full-node.md#create_light_client_optimistic_update
|
||||
// def create_light_client_optimistic_update(update: LightClientUpdate) -> LightClientOptimisticUpdate:
|
||||
//
|
||||
// return LightClientOptimisticUpdate(
|
||||
// attested_header=update.attested_header,
|
||||
// sync_aggregate=update.sync_aggregate,
|
||||
// signature_slot=update.signature_slot,
|
||||
// )
|
||||
func CreateLightClientOptimisticUpdate(update *ethpbv2.LightClientUpdate) *ethpbv2.LightClientOptimisticUpdate {
|
||||
return ðpbv2.LightClientOptimisticUpdate{
|
||||
AttestedHeader: update.AttestedHeader,
|
||||
SyncAggregate: update.SyncAggregate,
|
||||
SignatureSlot: update.SignatureSlot,
|
||||
}
|
||||
}
|
||||
|
||||
func NewLightClientOptimisticUpdateFromBeaconState(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
attestedState state.BeaconState) (*ethpbv2.LightClientUpdate, error) {
|
||||
// assert compute_epoch_at_slot(attested_state.slot) >= ALTAIR_FORK_EPOCH
|
||||
attestedEpoch := slots.ToEpoch(attestedState.Slot())
|
||||
if attestedEpoch < params.BeaconConfig().AltairForkEpoch {
|
||||
return nil, fmt.Errorf("invalid attested epoch %d", attestedEpoch)
|
||||
}
|
||||
|
||||
// assert sum(block.message.body.sync_aggregate.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS
|
||||
syncAggregate, err := block.Block().Body().SyncAggregate()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get sync aggregate %v", err)
|
||||
}
|
||||
|
||||
if syncAggregate.SyncCommitteeBits.Count() < params.BeaconConfig().MinSyncCommitteeParticipants {
|
||||
return nil, fmt.Errorf("invalid sync committee bits count %d", syncAggregate.SyncCommitteeBits.Count())
|
||||
}
|
||||
|
||||
// assert state.slot == state.latest_block_header.slot
|
||||
if state.Slot() != state.LatestBlockHeader().Slot {
|
||||
return nil, fmt.Errorf("state slot %d not equal to latest block header slot %d", state.Slot(), state.LatestBlockHeader().Slot)
|
||||
}
|
||||
|
||||
// assert hash_tree_root(header) == hash_tree_root(block.message)
|
||||
header := state.LatestBlockHeader()
|
||||
stateRoot, err := state.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get state root %v", err)
|
||||
}
|
||||
header.StateRoot = stateRoot[:]
|
||||
|
||||
headerRoot, err := header.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get header root %v", err)
|
||||
}
|
||||
|
||||
blockRoot, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get block root %v", err)
|
||||
}
|
||||
|
||||
if headerRoot != blockRoot {
|
||||
return nil, fmt.Errorf("header root %#x not equal to block root %#x", headerRoot, blockRoot)
|
||||
}
|
||||
|
||||
// assert attested_state.slot == attested_state.latest_block_header.slot
|
||||
if attestedState.Slot() != attestedState.LatestBlockHeader().Slot {
|
||||
return nil, fmt.Errorf("attested state slot %d not equal to attested latest block header slot %d", attestedState.Slot(), attestedState.LatestBlockHeader().Slot)
|
||||
}
|
||||
|
||||
// attested_header = attested_state.latest_block_header.copy()
|
||||
attestedHeader := attestedState.LatestBlockHeader()
|
||||
|
||||
// attested_header.state_root = hash_tree_root(attested_state)
|
||||
attestedStateRoot, err := attestedState.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get attested state root %v", err)
|
||||
}
|
||||
attestedHeader.StateRoot = attestedStateRoot[:]
|
||||
|
||||
// assert hash_tree_root(attested_header) == block.message.parent_root
|
||||
attestedHeaderRoot, err := attestedHeader.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get attested header root %v", err)
|
||||
}
|
||||
|
||||
if attestedHeaderRoot != block.Block().ParentRoot() {
|
||||
return nil, fmt.Errorf("attested header root %#x not equal to block parent root %#x", attestedHeaderRoot, block.Block().ParentRoot())
|
||||
}
|
||||
|
||||
// Return result
|
||||
attestedHeaderResult := ðpbv1.BeaconBlockHeader{
|
||||
Slot: attestedHeader.Slot,
|
||||
ProposerIndex: attestedHeader.ProposerIndex,
|
||||
ParentRoot: attestedHeader.ParentRoot,
|
||||
StateRoot: attestedHeader.StateRoot,
|
||||
BodyRoot: attestedHeader.BodyRoot,
|
||||
}
|
||||
|
||||
syncAggregateResult := ðpbv1.SyncAggregate{
|
||||
SyncCommitteeBits: syncAggregate.SyncCommitteeBits,
|
||||
SyncCommitteeSignature: syncAggregate.SyncCommitteeSignature,
|
||||
}
|
||||
|
||||
result := ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: attestedHeaderResult,
|
||||
SyncAggregate: syncAggregateResult,
|
||||
SignatureSlot: block.Block().Slot(),
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func NewLightClientFinalityUpdateFromBeaconState(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
attestedState state.BeaconState,
|
||||
finalizedBlock interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.LightClientUpdate, error) {
|
||||
result, err := NewLightClientOptimisticUpdateFromBeaconState(
|
||||
ctx,
|
||||
state,
|
||||
block,
|
||||
attestedState,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Indicate finality whenever possible
|
||||
var finalizedHeader *ethpbv1.BeaconBlockHeader
|
||||
var finalityBranch [][]byte
|
||||
|
||||
if finalizedBlock != nil && !finalizedBlock.IsNil() {
|
||||
if finalizedBlock.Block().Slot() != 0 {
|
||||
tempFinalizedHeader, err := finalizedBlock.Header()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get finalized header %v", err)
|
||||
}
|
||||
finalizedHeader = migration.V1Alpha1SignedHeaderToV1(tempFinalizedHeader).GetMessage()
|
||||
|
||||
finalizedHeaderRoot, err := finalizedHeader.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get finalized header root %v", err)
|
||||
}
|
||||
|
||||
if finalizedHeaderRoot != bytesutil.ToBytes32(attestedState.FinalizedCheckpoint().Root) {
|
||||
return nil, fmt.Errorf("finalized header root %#x not equal to attested finalized checkpoint root %#x", finalizedHeaderRoot, bytesutil.ToBytes32(attestedState.FinalizedCheckpoint().Root))
|
||||
}
|
||||
} else {
|
||||
if !bytes.Equal(attestedState.FinalizedCheckpoint().Root, make([]byte, 32)) {
|
||||
return nil, fmt.Errorf("invalid finalized header root %v", attestedState.FinalizedCheckpoint().Root)
|
||||
}
|
||||
|
||||
finalizedHeader = ðpbv1.BeaconBlockHeader{
|
||||
Slot: 0,
|
||||
ProposerIndex: 0,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
}
|
||||
}
|
||||
|
||||
var bErr error
|
||||
finalityBranch, bErr = attestedState.FinalizedRootProof(ctx)
|
||||
if bErr != nil {
|
||||
return nil, fmt.Errorf("could not get finalized root proof %v", bErr)
|
||||
}
|
||||
} else {
|
||||
finalizedHeader = ðpbv1.BeaconBlockHeader{
|
||||
Slot: 0,
|
||||
ProposerIndex: 0,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
}
|
||||
|
||||
finalityBranch = make([][]byte, finalityBranchNumOfLeaves)
|
||||
for i := 0; i < finalityBranchNumOfLeaves; i++ {
|
||||
finalityBranch[i] = make([]byte, 32)
|
||||
}
|
||||
}
|
||||
|
||||
result.FinalizedHeader = finalizedHeader
|
||||
result.FinalityBranch = finalityBranch
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func NewLightClientUpdateFromFinalityUpdate(update *ethpbv2.LightClientFinalityUpdate) *ethpbv2.LightClientUpdate {
|
||||
return ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: update.AttestedHeader,
|
||||
FinalizedHeader: update.FinalizedHeader,
|
||||
FinalityBranch: update.FinalityBranch,
|
||||
SyncAggregate: update.SyncAggregate,
|
||||
SignatureSlot: update.SignatureSlot,
|
||||
}
|
||||
}
|
||||
|
||||
func NewLightClientUpdateFromOptimisticUpdate(update *ethpbv2.LightClientOptimisticUpdate) *ethpbv2.LightClientUpdate {
|
||||
return ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: update.AttestedHeader,
|
||||
SyncAggregate: update.SyncAggregate,
|
||||
SignatureSlot: update.SignatureSlot,
|
||||
}
|
||||
}
|
||||
@@ -1,160 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
)
|
||||
|
||||
type testlc struct {
|
||||
t *testing.T
|
||||
ctx context.Context
|
||||
state state.BeaconState
|
||||
block interfaces.ReadOnlySignedBeaconBlock
|
||||
attestedState state.BeaconState
|
||||
attestedHeader *ethpb.BeaconBlockHeader
|
||||
}
|
||||
|
||||
func newTestLc(t *testing.T) *testlc {
|
||||
return &testlc{t: t}
|
||||
}
|
||||
|
||||
func (l *testlc) setupTest() *testlc {
|
||||
ctx := context.Background()
|
||||
|
||||
slot := primitives.Slot(params.BeaconConfig().AltairForkEpoch * primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)).Add(1)
|
||||
|
||||
attestedState, err := util.NewBeaconStateCapella()
|
||||
require.NoError(l.t, err)
|
||||
err = attestedState.SetSlot(slot)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
parent := util.NewBeaconBlockCapella()
|
||||
parent.Block.Slot = slot
|
||||
|
||||
signedParent, err := blocks.NewSignedBeaconBlock(parent)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
parentHeader, err := signedParent.Header()
|
||||
require.NoError(l.t, err)
|
||||
attestedHeader := parentHeader.Header
|
||||
|
||||
err = attestedState.SetLatestBlockHeader(attestedHeader)
|
||||
require.NoError(l.t, err)
|
||||
attestedStateRoot, err := attestedState.HashTreeRoot(ctx)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
// get a new signed block so the root is updated with the new state root
|
||||
parent.Block.StateRoot = attestedStateRoot[:]
|
||||
signedParent, err = blocks.NewSignedBeaconBlock(parent)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
state, err := util.NewBeaconStateCapella()
|
||||
require.NoError(l.t, err)
|
||||
err = state.SetSlot(slot)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
parentRoot, err := signedParent.Block().HashTreeRoot()
|
||||
require.NoError(l.t, err)
|
||||
|
||||
block := util.NewBeaconBlockCapella()
|
||||
block.Block.Slot = slot
|
||||
block.Block.ParentRoot = parentRoot[:]
|
||||
|
||||
for i := uint64(0); i < params.BeaconConfig().MinSyncCommitteeParticipants; i++ {
|
||||
block.Block.Body.SyncAggregate.SyncCommitteeBits.SetBitAt(i, true)
|
||||
}
|
||||
|
||||
signedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
h, err := signedBlock.Header()
|
||||
require.NoError(l.t, err)
|
||||
|
||||
err = state.SetLatestBlockHeader(h.Header)
|
||||
require.NoError(l.t, err)
|
||||
stateRoot, err := state.HashTreeRoot(ctx)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
// get a new signed block so the root is updated with the new state root
|
||||
block.Block.StateRoot = stateRoot[:]
|
||||
signedBlock, err = blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
l.state = state
|
||||
l.attestedState = attestedState
|
||||
l.attestedHeader = attestedHeader
|
||||
l.block = signedBlock
|
||||
l.ctx = ctx
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *testlc) checkAttestedHeader(update *ethpbv2.LightClientUpdate) {
|
||||
require.Equal(l.t, l.attestedHeader.Slot, update.AttestedHeader.Slot, "Attested header slot is not equal")
|
||||
require.Equal(l.t, l.attestedHeader.ProposerIndex, update.AttestedHeader.ProposerIndex, "Attested header proposer index is not equal")
|
||||
require.DeepSSZEqual(l.t, l.attestedHeader.ParentRoot, update.AttestedHeader.ParentRoot, "Attested header parent root is not equal")
|
||||
require.DeepSSZEqual(l.t, l.attestedHeader.BodyRoot, update.AttestedHeader.BodyRoot, "Attested header body root is not equal")
|
||||
|
||||
attestedStateRoot, err := l.attestedState.HashTreeRoot(l.ctx)
|
||||
require.NoError(l.t, err)
|
||||
require.DeepSSZEqual(l.t, attestedStateRoot[:], update.AttestedHeader.StateRoot, "Attested header state root is not equal")
|
||||
}
|
||||
|
||||
func (l *testlc) checkSyncAggregate(update *ethpbv2.LightClientUpdate) {
|
||||
syncAggregate, err := l.block.Block().Body().SyncAggregate()
|
||||
require.NoError(l.t, err)
|
||||
require.DeepSSZEqual(l.t, syncAggregate.SyncCommitteeBits, update.SyncAggregate.SyncCommitteeBits, "SyncAggregate bits is not equal")
|
||||
require.DeepSSZEqual(l.t, syncAggregate.SyncCommitteeSignature, update.SyncAggregate.SyncCommitteeSignature, "SyncAggregate signature is not equal")
|
||||
}
|
||||
|
||||
func TestLightClient_NewLightClientOptimisticUpdateFromBeaconState(t *testing.T) {
|
||||
l := newTestLc(t).setupTest()
|
||||
|
||||
update, err := NewLightClientOptimisticUpdateFromBeaconState(l.ctx, l.state, l.block, l.attestedState)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
require.Equal(t, l.block.Block().Slot(), update.SignatureSlot, "Signature slot is not equal")
|
||||
|
||||
l.checkSyncAggregate(update)
|
||||
l.checkAttestedHeader(update)
|
||||
|
||||
require.Equal(t, (*v1.BeaconBlockHeader)(nil), update.FinalizedHeader, "Finalized header is not nil")
|
||||
require.DeepSSZEqual(t, ([][]byte)(nil), update.FinalityBranch, "Finality branch is not nil")
|
||||
}
|
||||
|
||||
func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
|
||||
l := newTestLc(t).setupTest()
|
||||
|
||||
update, err := NewLightClientFinalityUpdateFromBeaconState(l.ctx, l.state, l.block, l.attestedState, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
require.Equal(t, l.block.Block().Slot(), update.SignatureSlot, "Signature slot is not equal")
|
||||
|
||||
l.checkSyncAggregate(update)
|
||||
l.checkAttestedHeader(update)
|
||||
|
||||
zeroHash := params.BeaconConfig().ZeroHash[:]
|
||||
require.NotNil(t, update.FinalizedHeader, "Finalized header is nil")
|
||||
require.Equal(t, primitives.Slot(0), update.FinalizedHeader.Slot, "Finalized header slot is not zero")
|
||||
require.Equal(t, primitives.ValidatorIndex(0), update.FinalizedHeader.ProposerIndex, "Finalized header proposer index is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.ParentRoot, "Finalized header parent root is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.StateRoot, "Finalized header state root is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.BodyRoot, "Finalized header body root is not zero")
|
||||
require.Equal(t, finalityBranchNumOfLeaves, len(update.FinalityBranch), "Invalid finality branch leaves")
|
||||
for _, leaf := range update.FinalityBranch {
|
||||
require.DeepSSZEqual(t, zeroHash, leaf, "Leaf is not zero")
|
||||
}
|
||||
}
|
||||
@@ -16,7 +16,7 @@ func TestReportEpochMetrics_BadHeadState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, h.SetValidators(nil))
|
||||
err = reportEpochMetrics(context.Background(), s, h)
|
||||
require.ErrorContains(t, "failed to initialize precompute: state has nil validator slice", err)
|
||||
require.ErrorContains(t, "failed to initialize precompute: nil validators in state", err)
|
||||
}
|
||||
|
||||
func TestReportEpochMetrics_BadAttestation(t *testing.T) {
|
||||
|
||||
@@ -564,10 +564,6 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
if len(sidecars) >= expected {
|
||||
s.blobNotifiers.delete(root)
|
||||
if err := kzg.IsDataAvailable(kzgCommitments, sidecars); err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", root)).Warn("removing blob sidecars with invalid proofs")
|
||||
if err2 := s.cfg.BeaconDB.DeleteBlobSidecars(ctx, root); err2 != nil {
|
||||
log.WithError(err2).Error("could not delete sidecars")
|
||||
}
|
||||
return err
|
||||
}
|
||||
logBlobSidecar(sidecars, t)
|
||||
@@ -598,10 +594,6 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
return errors.Wrap(err, "could not get blob sidecars")
|
||||
}
|
||||
if err := kzg.IsDataAvailable(kzgCommitments, sidecars); err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", root)).Warn("removing blob sidecars with invalid proofs")
|
||||
if err2 := s.cfg.BeaconDB.DeleteBlobSidecars(ctx, root); err2 != nil {
|
||||
log.WithError(err2).Error("could not delete sidecars")
|
||||
}
|
||||
return err
|
||||
}
|
||||
logBlobSidecar(sidecars, t)
|
||||
@@ -657,13 +649,11 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
return
|
||||
}
|
||||
s.headLock.RUnlock()
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, ¬ifyForkchoiceUpdateArg{
|
||||
headState: headState,
|
||||
headRoot: headRoot,
|
||||
headBlock: headBlock.Block(),
|
||||
})
|
||||
s.cfg.ForkChoiceStore.RUnlock()
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
}
|
||||
|
||||
@@ -1103,15 +1103,12 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(4)
|
||||
var lock sync.Mutex
|
||||
go func() {
|
||||
preState, err := service.getBlockPreState(ctx, wsb1.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb1)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb1, r1, postState, true))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
@@ -1119,9 +1116,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb2)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb2, r2, postState, true))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
@@ -1129,9 +1124,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb3)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb3, r3, postState, true))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
@@ -1139,9 +1132,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb4)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb4, r4, postState, true))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
|
||||
@@ -1,23 +1,7 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// SendNewBlobEvent sends a message to the BlobNotifier channel that the blob
|
||||
// for the blocroot `root` is ready in the database
|
||||
func (s *Service) sendNewBlobEvent(root [32]byte, index uint64) {
|
||||
s.blobNotifiers.notifyIndex(root, index)
|
||||
}
|
||||
|
||||
// ReceiveBlob saves the blob to database and sends the new event
|
||||
func (s *Service) ReceiveBlob(ctx context.Context, b *ethpb.BlobSidecar) error {
|
||||
if err := s.cfg.BeaconDB.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{b}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.sendNewBlobEvent([32]byte(b.BlockRoot), b.Index)
|
||||
return nil
|
||||
func (s *Service) SendNewBlobEvent(root [32]byte, index uint64) {
|
||||
s.blobNotifiers.forRoot(root) <- index
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package blockchain
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -14,6 +13,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -43,7 +43,7 @@ type BlockReceiver interface {
|
||||
// BlobReceiver interface defines the methods of chain service for receiving new
|
||||
// blobs
|
||||
type BlobReceiver interface {
|
||||
ReceiveBlob(context.Context, *ethpb.BlobSidecar) error
|
||||
SendNewBlobEvent([32]byte, uint64)
|
||||
}
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
@@ -59,11 +59,6 @@ type SlashingReceiver interface {
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlock")
|
||||
defer span.End()
|
||||
// Return early if the block has been synced
|
||||
if s.InForkchoice(blockRoot) {
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Debug("Ignoring already synced block")
|
||||
return nil
|
||||
}
|
||||
receivedTime := time.Now()
|
||||
s.blockBeingSynced.set(blockRoot)
|
||||
defer s.blockBeingSynced.unset(blockRoot)
|
||||
@@ -106,7 +101,9 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
if err := eg.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.isDataAvailable(ctx, blockRoot, blockCopy); err != nil {
|
||||
slotCtx, cancel := context.WithTimeout(ctx, time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second)
|
||||
defer cancel()
|
||||
if err := s.isDataAvailable(slotCtx, blockRoot, blockCopy); err != nil {
|
||||
return errors.Wrap(err, "could not validate blob data availability")
|
||||
}
|
||||
// The rest of block processing takes a lock on forkchoice.
|
||||
|
||||
@@ -94,35 +94,6 @@ var ErrMissingClockSetter = errors.New("blockchain Service initialized without a
|
||||
type blobNotifierMap struct {
|
||||
sync.RWMutex
|
||||
notifiers map[[32]byte]chan uint64
|
||||
seenIndex map[[32]byte][fieldparams.MaxBlobsPerBlock]bool
|
||||
}
|
||||
|
||||
// notifyIndex notifies a blob by its index for a given root.
|
||||
// It uses internal maps to keep track of seen indices and notifier channels.
|
||||
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64) {
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return
|
||||
}
|
||||
|
||||
bn.Lock()
|
||||
seen := bn.seenIndex[root]
|
||||
if seen[idx] {
|
||||
bn.Unlock()
|
||||
return
|
||||
}
|
||||
seen[idx] = true
|
||||
bn.seenIndex[root] = seen
|
||||
|
||||
// Retrieve or create the notifier channel for the given root.
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, fieldparams.MaxBlobsPerBlock)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
|
||||
bn.Unlock()
|
||||
|
||||
c <- idx
|
||||
}
|
||||
|
||||
func (bn *blobNotifierMap) forRoot(root [32]byte) chan uint64 {
|
||||
@@ -139,7 +110,6 @@ func (bn *blobNotifierMap) forRoot(root [32]byte) chan uint64 {
|
||||
func (bn *blobNotifierMap) delete(root [32]byte) {
|
||||
bn.Lock()
|
||||
defer bn.Unlock()
|
||||
delete(bn.seenIndex, root)
|
||||
delete(bn.notifiers, root)
|
||||
}
|
||||
|
||||
@@ -156,7 +126,6 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
bn := &blobNotifierMap{
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
seenIndex: make(map[[32]byte][fieldparams.MaxBlobsPerBlock]bool),
|
||||
}
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
@@ -446,10 +445,11 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
s := &Service{
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
|
||||
}
|
||||
blk := util.NewBeaconBlock()
|
||||
blk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}}
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
beaconState, err := util.NewBeaconState()
|
||||
bs := ðpb.BeaconState{FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}}
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(bs)
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, r))
|
||||
|
||||
@@ -514,48 +514,3 @@ func (s *MockClockSetter) SetClock(g *startup.Clock) error {
|
||||
s.G = g
|
||||
return s.Err
|
||||
}
|
||||
|
||||
func TestNotifyIndex(t *testing.T) {
|
||||
// Initialize a blobNotifierMap
|
||||
bn := &blobNotifierMap{
|
||||
seenIndex: make(map[[32]byte][fieldparams.MaxBlobsPerBlock]bool),
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
}
|
||||
|
||||
// Sample root and index
|
||||
var root [32]byte
|
||||
copy(root[:], "exampleRoot")
|
||||
|
||||
// Test notifying a new index
|
||||
bn.notifyIndex(root, 1)
|
||||
if !bn.seenIndex[root][1] {
|
||||
t.Errorf("Index was not marked as seen")
|
||||
}
|
||||
|
||||
// Test that a new channel is created
|
||||
if _, ok := bn.notifiers[root]; !ok {
|
||||
t.Errorf("Notifier channel was not created")
|
||||
}
|
||||
|
||||
// Test notifying an already seen index
|
||||
bn.notifyIndex(root, 1)
|
||||
if len(bn.notifiers[root]) > 1 {
|
||||
t.Errorf("Notifier channel should not receive multiple messages for the same index")
|
||||
}
|
||||
|
||||
// Test notifying a new index again
|
||||
bn.notifyIndex(root, 2)
|
||||
if !bn.seenIndex[root][2] {
|
||||
t.Errorf("Index was not marked as seen")
|
||||
}
|
||||
|
||||
// Test that the notifier channel receives the index
|
||||
select {
|
||||
case idx := <-bn.notifiers[root]:
|
||||
if idx != 1 {
|
||||
t.Errorf("Received index on channel is incorrect")
|
||||
}
|
||||
default:
|
||||
t.Errorf("Notifier channel did not receive the index")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
@@ -24,13 +23,10 @@ import (
|
||||
|
||||
type mockBeaconNode struct {
|
||||
stateFeed *event.Feed
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// StateFeed mocks the same method in the beacon node.
|
||||
func (mbn *mockBeaconNode) StateFeed() *event.Feed {
|
||||
mbn.mu.Lock()
|
||||
defer mbn.mu.Unlock()
|
||||
if mbn.stateFeed == nil {
|
||||
mbn.stateFeed = new(event.Feed)
|
||||
}
|
||||
|
||||
@@ -72,7 +72,6 @@ type ChainService struct {
|
||||
OptimisticRoots map[[32]byte]bool
|
||||
BlockSlot primitives.Slot
|
||||
SyncingRoot [32]byte
|
||||
Blobs []*ethpb.BlobSidecar
|
||||
}
|
||||
|
||||
func (s *ChainService) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
|
||||
@@ -607,13 +606,10 @@ func (s *ChainService) UnrealizedJustifiedPayloadBlockHash() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// SendNewBlobEvent mocks the same method in the chain service
|
||||
func (*ChainService) SendNewBlobEvent(_ [32]byte, _ uint64) {}
|
||||
|
||||
// BlockBeingSynced mocks the same method in the chain service
|
||||
func (c *ChainService) BlockBeingSynced(root [32]byte) bool {
|
||||
return root == c.SyncingRoot
|
||||
}
|
||||
|
||||
// ReceiveBlob implements the same method in the chain service
|
||||
func (c *ChainService) ReceiveBlob(_ context.Context, b *ethpb.BlobSidecar) error {
|
||||
c.Blobs = append(c.Blobs, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
4
beacon-chain/cache/checkpoint_state.go
vendored
4
beacon-chain/cache/checkpoint_state.go
vendored
@@ -42,7 +42,7 @@ func NewCheckpointStateCache() *CheckpointStateCache {
|
||||
// StateByCheckpoint fetches state by checkpoint. Returns true with a
|
||||
// reference to the CheckpointState info, if exists. Otherwise returns false, nil.
|
||||
func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (state.BeaconState, error) {
|
||||
h, err := hash.Proto(cp)
|
||||
h, err := hash.HashProto(cp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -62,7 +62,7 @@ func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (state.Be
|
||||
// AddCheckpointState adds CheckpointState object to the cache. This method also trims the least
|
||||
// recently added CheckpointState object if the cache size has ready the max cache size limit.
|
||||
func (c *CheckpointStateCache) AddCheckpointState(cp *ethpb.Checkpoint, s state.ReadOnlyBeaconState) error {
|
||||
h, err := hash.Proto(cp)
|
||||
h, err := hash.HashProto(cp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -98,7 +98,7 @@ func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Depos
|
||||
return
|
||||
}
|
||||
|
||||
depRoot, err := hash.Proto(d)
|
||||
depRoot, err := hash.HashProto(d)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not remove deposit")
|
||||
return
|
||||
@@ -109,7 +109,7 @@ func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Depos
|
||||
|
||||
idx := -1
|
||||
for i, ctnr := range dc.pendingDeposits {
|
||||
h, err := hash.Proto(ctnr.Deposit)
|
||||
h, err := hash.HashProto(ctnr.Deposit)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not hash deposit")
|
||||
continue
|
||||
|
||||
@@ -769,7 +769,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
var ctrs []*ethpb.DepositContainer
|
||||
ctrs := []*ethpb.DepositContainer{}
|
||||
for i := 0; i < 2000; i++ {
|
||||
ctrs = append(ctrs, generateCtr(uint64(10+(i/2)), int64(i)))
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@ func Test_BaseReward(t *testing.T) {
|
||||
valIdx: 2,
|
||||
st: genState(1),
|
||||
want: 0,
|
||||
errString: "validator index 2 does not exist",
|
||||
errString: "index 2 out of range",
|
||||
},
|
||||
{
|
||||
name: "active balance is 32eth",
|
||||
@@ -89,7 +89,7 @@ func Test_BaseRewardWithTotalBalance(t *testing.T) {
|
||||
valIdx: 2,
|
||||
activeBalance: 1,
|
||||
want: 0,
|
||||
errString: "validator index 2 does not exist",
|
||||
errString: "index 2 out of range",
|
||||
},
|
||||
{
|
||||
name: "active balance is 1",
|
||||
|
||||
@@ -28,10 +28,10 @@ func TestSyncCommitteeIndices_CanGet(t *testing.T) {
|
||||
}
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoAltair(ðpb.BeaconStateAltair{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
return st
|
||||
}
|
||||
|
||||
@@ -69,15 +69,6 @@ func TestSyncCommitteeIndices_CanGet(t *testing.T) {
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "no active validators, epoch 100",
|
||||
args: args{
|
||||
state: getState(t, 0), // Regression test for divide by zero. Issue #13051.
|
||||
epoch: 100,
|
||||
},
|
||||
wantErr: true,
|
||||
errString: "no active validator indices",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
@@ -42,6 +42,7 @@ go_library(
|
||||
"//math:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/slashings:go_default_library",
|
||||
@@ -99,6 +100,7 @@ go_test(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation/aggregation:go_default_library",
|
||||
|
||||
@@ -51,11 +51,7 @@ func ProcessVoluntaryExits(
|
||||
beaconState state.BeaconState,
|
||||
exits []*ethpb.SignedVoluntaryExit,
|
||||
) (state.BeaconState, error) {
|
||||
// Avoid calculating the epoch churn if no exits exist.
|
||||
if len(exits) == 0 {
|
||||
return beaconState, nil
|
||||
}
|
||||
maxExitEpoch, churn := v.MaxExitEpochAndChurn(beaconState)
|
||||
maxExitEpoch, churn := v.ValidatorsMaxExitEpochAndChurn(beaconState)
|
||||
var exitEpoch primitives.Epoch
|
||||
for idx, exit := range exits {
|
||||
if exit == nil || exit.Exit == nil {
|
||||
|
||||
@@ -82,7 +82,7 @@ func ProcessRandaoNoVerify(
|
||||
for i, x := range blockRandaoReveal {
|
||||
latestMixSlice[i] ^= x
|
||||
}
|
||||
if err := beaconState.UpdateRandaoMixesAtIndex(uint64(currentEpoch%latestMixesLength), [32]byte(latestMixSlice)); err != nil {
|
||||
if err := beaconState.UpdateRandaoMixesAtIndex(uint64(currentEpoch%latestMixesLength), latestMixSlice); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return beaconState, nil
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/ssz"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
)
|
||||
@@ -235,7 +236,7 @@ func BLSChangesSignatureBatch(
|
||||
return nil, errors.Wrap(err, "could not convert bytes to public key")
|
||||
}
|
||||
batch.PublicKeys[i] = publicKey
|
||||
htr, err := signing.Data(change.Message.HashTreeRoot, domain)
|
||||
htr, err := signing.SigningData(change.Message.HashTreeRoot, domain)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute BLSToExecutionChange signing data")
|
||||
}
|
||||
@@ -250,7 +251,7 @@ func BLSChangesSignatureBatch(
|
||||
// is from a previous fork.
|
||||
func VerifyBLSChangeSignature(
|
||||
st state.ReadOnlyBeaconState,
|
||||
change *ethpb.SignedBLSToExecutionChange,
|
||||
change *ethpbv2.SignedBLSToExecutionChange,
|
||||
) error {
|
||||
c := params.BeaconConfig()
|
||||
domain, err := signing.ComputeDomain(c.DomainBLSToExecutionChange, c.GenesisForkVersion, st.GenesisValidatorsRoot())
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/ssz"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
@@ -151,7 +152,7 @@ func TestProcessBLSToExecutionChange(t *testing.T) {
|
||||
}
|
||||
|
||||
_, err = blocks.ProcessBLSToExecutionChange(st, signed)
|
||||
require.ErrorContains(t, "out of bounds", err)
|
||||
require.ErrorContains(t, "out of range", err)
|
||||
})
|
||||
|
||||
t.Run("signature does not verify", func(t *testing.T) {
|
||||
@@ -1208,7 +1209,8 @@ func TestBLSChangesSignatureBatch(t *testing.T) {
|
||||
require.Equal(t, true, verify)
|
||||
|
||||
// Verify a single change
|
||||
require.NoError(t, blocks.VerifyBLSChangeSignature(st, signedChanges[0]))
|
||||
change := migration.V1Alpha1SignedBLSToExecChangeToV2(signedChanges[0])
|
||||
require.NoError(t, blocks.VerifyBLSChangeSignature(st, change))
|
||||
}
|
||||
|
||||
func TestBLSChangesSignatureBatchWrongFork(t *testing.T) {
|
||||
@@ -1272,7 +1274,8 @@ func TestBLSChangesSignatureBatchWrongFork(t *testing.T) {
|
||||
require.Equal(t, false, verify)
|
||||
|
||||
// Verify a single change
|
||||
require.ErrorIs(t, signing.ErrSigFailedToVerify, blocks.VerifyBLSChangeSignature(st, signedChanges[0]))
|
||||
change := migration.V1Alpha1SignedBLSToExecChangeToV2(signedChanges[0])
|
||||
require.ErrorIs(t, signing.ErrSigFailedToVerify, blocks.VerifyBLSChangeSignature(st, change))
|
||||
}
|
||||
|
||||
func TestBLSChangesSignatureBatchFromBellatrix(t *testing.T) {
|
||||
@@ -1359,6 +1362,7 @@ func TestBLSChangesSignatureBatchFromBellatrix(t *testing.T) {
|
||||
require.Equal(t, true, verify)
|
||||
|
||||
// Verify a single change
|
||||
require.NoError(t, blocks.VerifyBLSChangeSignature(st, signedChanges[0]))
|
||||
change := migration.V1Alpha1SignedBLSToExecChangeToV2(signedChanges[0])
|
||||
require.NoError(t, blocks.VerifyBLSChangeSignature(st, change))
|
||||
params.OverrideBeaconConfig(savedConfig)
|
||||
}
|
||||
|
||||
@@ -112,7 +112,7 @@ func ProcessRegistryUpdates(ctx context.Context, state state.BeaconState) (state
|
||||
if isActive && belowEjectionBalance {
|
||||
// Here is fine to do a quadratic loop since this should
|
||||
// barely happen
|
||||
maxExitEpoch, churn := validators.MaxExitEpochAndChurn(state)
|
||||
maxExitEpoch, churn := validators.ValidatorsMaxExitEpochAndChurn(state)
|
||||
state, _, err = validators.InitiateValidatorExit(ctx, state, primitives.ValidatorIndex(idx), maxExitEpoch, churn)
|
||||
if err != nil && !errors.Is(err, validators.ValidatorAlreadyExitedErr) {
|
||||
return nil, errors.Wrapf(err, "could not initiate exit for validator %d", idx)
|
||||
@@ -137,10 +137,9 @@ func ProcessRegistryUpdates(ctx context.Context, state state.BeaconState) (state
|
||||
return nil, errors.Wrap(err, "could not get active validator count")
|
||||
}
|
||||
|
||||
churnLimit := helpers.ValidatorActivationChurnLimit(activeValidatorCount)
|
||||
|
||||
if state.Version() >= version.Deneb {
|
||||
churnLimit = helpers.ValidatorActivationChurnLimitDeneb(activeValidatorCount)
|
||||
churnLimit, err := helpers.ValidatorChurnLimit(activeValidatorCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get churn limit")
|
||||
}
|
||||
|
||||
// Prevent churn limit cause index out of bound.
|
||||
@@ -349,7 +348,7 @@ func ProcessRandaoMixesReset(state state.BeaconState) (state.BeaconState, error)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := state.UpdateRandaoMixesAtIndex(uint64(nextEpoch%randaoMixLength), [32]byte(mix)); err != nil {
|
||||
if err := state.UpdateRandaoMixesAtIndex(uint64(nextEpoch%randaoMixLength), mix); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -311,7 +311,8 @@ func TestProcessRegistryUpdates_EligibleToActivate(t *testing.T) {
|
||||
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 6, Root: make([]byte, fieldparams.RootLength)},
|
||||
}
|
||||
limit := helpers.ValidatorActivationChurnLimit(0)
|
||||
limit, err := helpers.ValidatorChurnLimit(0)
|
||||
require.NoError(t, err)
|
||||
for i := uint64(0); i < limit+10; i++ {
|
||||
base.Validators = append(base.Validators, ðpb.Validator{
|
||||
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
@@ -337,42 +338,6 @@ func TestProcessRegistryUpdates_EligibleToActivate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessRegistryUpdates_EligibleToActivate_Cancun(t *testing.T) {
|
||||
base := ðpb.BeaconStateDeneb{
|
||||
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 6, Root: make([]byte, fieldparams.RootLength)},
|
||||
}
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.MinPerEpochChurnLimit = 10
|
||||
cfg.ChurnLimitQuotient = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
for i := uint64(0); i < 10; i++ {
|
||||
base.Validators = append(base.Validators, ðpb.Validator{
|
||||
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ActivationEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
})
|
||||
}
|
||||
beaconState, err := state_native.InitializeFromProtoDeneb(base)
|
||||
require.NoError(t, err)
|
||||
currentEpoch := time.CurrentEpoch(beaconState)
|
||||
newState, err := epoch.ProcessRegistryUpdates(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
for i, validator := range newState.Validators() {
|
||||
assert.Equal(t, currentEpoch+1, validator.ActivationEligibilityEpoch, "Could not update registry %d, unexpected activation eligibility epoch", i)
|
||||
// Note: In Deneb, only validators indices before `MaxPerEpochActivationChurnLimit` should be activated.
|
||||
if uint64(i) < params.BeaconConfig().MaxPerEpochActivationChurnLimit && validator.ActivationEpoch != helpers.ActivationExitEpoch(currentEpoch) {
|
||||
t.Errorf("Could not update registry %d, validators failed to activate: wanted activation epoch %d, got %d",
|
||||
i, helpers.ActivationExitEpoch(currentEpoch), validator.ActivationEpoch)
|
||||
}
|
||||
if uint64(i) >= params.BeaconConfig().MaxPerEpochActivationChurnLimit && validator.ActivationEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
t.Errorf("Could not update registry %d, validators should not have been activated, wanted activation epoch: %d, got %d",
|
||||
i, params.BeaconConfig().FarFutureEpoch, validator.ActivationEpoch)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessRegistryUpdates_ActivationCompletes(t *testing.T) {
|
||||
base := ðpb.BeaconState{
|
||||
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
|
||||
@@ -99,7 +99,7 @@ func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := IsCurrentPeriodSyncCommittee(state, 12390192)
|
||||
require.ErrorContains(t, "validator index 12390192 does not exist", err)
|
||||
require.ErrorContains(t, "index 12390192 out of range", err)
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
|
||||
@@ -182,7 +182,7 @@ func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := IsNextPeriodSyncCommittee(state, 120391029)
|
||||
require.ErrorContains(t, "validator index 120391029 does not exist", err)
|
||||
require.ErrorContains(t, "index 120391029 out of range", err)
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
|
||||
@@ -282,7 +282,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 129301923)
|
||||
require.ErrorContains(t, "validator index 129301923 does not exist", err)
|
||||
require.ErrorContains(t, "index 129301923 out of range", err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
|
||||
}
|
||||
|
||||
@@ -367,7 +367,7 @@ func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
index, err := NextPeriodSyncSubcommitteeIndices(state, 21093019)
|
||||
require.ErrorContains(t, "validator index 21093019 does not exist", err)
|
||||
require.ErrorContains(t, "index 21093019 out of range", err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
|
||||
}
|
||||
|
||||
|
||||
@@ -136,10 +136,6 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(indices) == 0 {
|
||||
return nil, errors.New("no active validator indices")
|
||||
}
|
||||
|
||||
if err := UpdateCommitteeCache(ctx, s, epoch); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update committee cache")
|
||||
}
|
||||
@@ -210,7 +206,10 @@ func ActivationExitEpoch(epoch primitives.Epoch) primitives.Epoch {
|
||||
return epoch + 1 + params.BeaconConfig().MaxSeedLookahead
|
||||
}
|
||||
|
||||
// calculateChurnLimit based on the formula in the spec.
|
||||
// ValidatorChurnLimit returns the number of validators that are allowed to
|
||||
// enter and exit validator pool for an epoch.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def get_validator_churn_limit(state: BeaconState) -> uint64:
|
||||
// """
|
||||
@@ -218,32 +217,12 @@ func ActivationExitEpoch(epoch primitives.Epoch) primitives.Epoch {
|
||||
// """
|
||||
// active_validator_indices = get_active_validator_indices(state, get_current_epoch(state))
|
||||
// return max(MIN_PER_EPOCH_CHURN_LIMIT, uint64(len(active_validator_indices)) // CHURN_LIMIT_QUOTIENT)
|
||||
func calculateChurnLimit(activeValidatorCount uint64) uint64 {
|
||||
func ValidatorChurnLimit(activeValidatorCount uint64) (uint64, error) {
|
||||
churnLimit := activeValidatorCount / params.BeaconConfig().ChurnLimitQuotient
|
||||
if churnLimit < params.BeaconConfig().MinPerEpochChurnLimit {
|
||||
return params.BeaconConfig().MinPerEpochChurnLimit
|
||||
churnLimit = params.BeaconConfig().MinPerEpochChurnLimit
|
||||
}
|
||||
return churnLimit
|
||||
}
|
||||
|
||||
// ValidatorActivationChurnLimit returns the maximum number of validators that can be activated in a slot.
|
||||
func ValidatorActivationChurnLimit(activeValidatorCount uint64) uint64 {
|
||||
return calculateChurnLimit(activeValidatorCount)
|
||||
}
|
||||
|
||||
// ValidatorExitChurnLimit returns the maximum number of validators that can be exited in a slot.
|
||||
func ValidatorExitChurnLimit(activeValidatorCount uint64) uint64 {
|
||||
return calculateChurnLimit(activeValidatorCount)
|
||||
}
|
||||
|
||||
// ValidatorActivationChurnLimitDeneb returns the maximum number of validators that can be activated in a slot post Deneb.
|
||||
func ValidatorActivationChurnLimitDeneb(activeValidatorCount uint64) uint64 {
|
||||
limit := calculateChurnLimit(activeValidatorCount)
|
||||
// New in Deneb.
|
||||
if limit > params.BeaconConfig().MaxPerEpochActivationChurnLimit {
|
||||
return params.BeaconConfig().MaxPerEpochActivationChurnLimit
|
||||
}
|
||||
return limit
|
||||
return churnLimit, nil
|
||||
}
|
||||
|
||||
// BeaconProposerIndex returns proposer index of a current slot.
|
||||
|
||||
@@ -367,50 +367,9 @@ func TestChurnLimit_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
|
||||
require.NoError(t, err)
|
||||
resultChurn := ValidatorActivationChurnLimit(validatorCount)
|
||||
assert.Equal(t, test.wantedChurn, resultChurn, "ValidatorActivationChurnLimit(%d)", test.validatorCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChurnLimitDeneb_OK(t *testing.T) {
|
||||
tests := []struct {
|
||||
validatorCount int
|
||||
wantedChurn uint64
|
||||
}{
|
||||
{1000, 4},
|
||||
{100000, 4},
|
||||
{1000000, params.BeaconConfig().MaxPerEpochActivationChurnLimit},
|
||||
{2000000, params.BeaconConfig().MaxPerEpochActivationChurnLimit},
|
||||
}
|
||||
|
||||
defer ClearCache()
|
||||
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
|
||||
// Create validators
|
||||
validators := make([]*ethpb.Validator, test.validatorCount)
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize beacon state
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Slot: 1,
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
resultChurn, err := ValidatorChurnLimit(validatorCount)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get active validator count
|
||||
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test churn limit calculation
|
||||
resultChurn := ValidatorActivationChurnLimitDeneb(validatorCount)
|
||||
assert.Equal(t, test.wantedChurn, resultChurn)
|
||||
assert.Equal(t, test.wantedChurn, resultChurn, "ValidatorChurnLimit(%d)", test.validatorCount)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -560,24 +519,12 @@ func TestActiveValidatorIndices(t *testing.T) {
|
||||
},
|
||||
want: []primitives.ValidatorIndex{0, 2, 3},
|
||||
},
|
||||
{
|
||||
name: "impossible_zero_validators", // Regression test for issue #13051
|
||||
args: args{
|
||||
state: ðpb.BeaconState{
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
Validators: make([]*ethpb.Validator, 0),
|
||||
},
|
||||
epoch: 10,
|
||||
},
|
||||
wantedErr: "no active validator indices",
|
||||
},
|
||||
}
|
||||
defer ClearCache()
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.args.state)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetValidators(tt.args.state.Validators))
|
||||
got, err := ActiveValidatorIndices(context.Background(), s, tt.args.epoch)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
|
||||
@@ -80,7 +80,10 @@ func ComputeWeakSubjectivityPeriod(ctx context.Context, st state.ReadOnlyBeaconS
|
||||
T := cfg.MaxEffectiveBalance / cfg.GweiPerEth
|
||||
|
||||
// Validator churn limit.
|
||||
delta := ValidatorExitChurnLimit(N)
|
||||
delta, err := ValidatorChurnLimit(N)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot obtain active validator churn limit: %w", err)
|
||||
}
|
||||
|
||||
// Balance top-ups.
|
||||
Delta := uint64(cfg.SlotsPerEpoch.Mul(cfg.MaxDeposits))
|
||||
|
||||
@@ -92,12 +92,12 @@ func ComputeDomainAndSign(st state.ReadOnlyBeaconState, epoch primitives.Epoch,
|
||||
// domain=domain,
|
||||
// ))
|
||||
func ComputeSigningRoot(object fssz.HashRoot, domain []byte) ([32]byte, error) {
|
||||
return Data(object.HashTreeRoot, domain)
|
||||
return SigningData(object.HashTreeRoot, domain)
|
||||
}
|
||||
|
||||
// Data computes the signing data by utilising the provided root function and then
|
||||
// SigningData computes the signing data by utilising the provided root function and then
|
||||
// returning the signing data of the container object.
|
||||
func Data(rootFunc func() ([32]byte, error), domain []byte) ([32]byte, error) {
|
||||
func SigningData(rootFunc func() ([32]byte, error), domain []byte) ([32]byte, error) {
|
||||
objRoot, err := rootFunc()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
@@ -152,7 +152,7 @@ func VerifyBlockHeaderSigningRoot(blkHdr *ethpb.BeaconBlockHeader, pub, signatur
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not convert bytes to signature")
|
||||
}
|
||||
root, err := Data(blkHdr.HashTreeRoot, domain)
|
||||
root, err := SigningData(blkHdr.HashTreeRoot, domain)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute signing root")
|
||||
}
|
||||
@@ -191,7 +191,7 @@ func BlockSignatureBatch(pub, signature, domain []byte, rootFunc func() ([32]byt
|
||||
return nil, errors.Wrap(err, "could not convert bytes to public key")
|
||||
}
|
||||
// utilize custom block hashing function
|
||||
root, err := Data(rootFunc, domain)
|
||||
root, err := SigningData(rootFunc, domain)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute signing root")
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stateutil"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/container/trie"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
@@ -222,18 +221,6 @@ func OptimizedGenesisBeaconState(genesisTime uint64, preState state.BeaconState,
|
||||
|
||||
// EmptyGenesisState returns an empty beacon state object.
|
||||
func EmptyGenesisState() (state.BeaconState, error) {
|
||||
blockRoots := make([][]byte, fieldparams.BlockRootsLength)
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
stateRoots := make([][]byte, fieldparams.StateRootsLength)
|
||||
for i := range stateRoots {
|
||||
stateRoots[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
mixes := make([][]byte, fieldparams.RandaoMixesLength)
|
||||
for i := range mixes {
|
||||
mixes[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
st := ðpb.BeaconState{
|
||||
// Misc fields.
|
||||
Slot: 0,
|
||||
@@ -242,9 +229,6 @@ func EmptyGenesisState() (state.BeaconState, error) {
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: 0,
|
||||
},
|
||||
BlockRoots: blockRoots,
|
||||
StateRoots: stateRoots,
|
||||
RandaoMixes: mixes,
|
||||
// Validator registry fields.
|
||||
Validators: []*ethpb.Validator{},
|
||||
Balances: []uint64{},
|
||||
|
||||
@@ -103,8 +103,8 @@ func TestGenesisState_HashEquality(t *testing.T) {
|
||||
pbstate, err := state_native.ProtobufBeaconStatePhase0(state.ToProto())
|
||||
require.NoError(t, err)
|
||||
|
||||
root1, err1 := hash.Proto(pbState1)
|
||||
root2, err2 := hash.Proto(pbstate)
|
||||
root1, err1 := hash.HashProto(pbState1)
|
||||
root2, err2 := hash.HashProto(pbstate)
|
||||
|
||||
if err1 != nil || err2 != nil {
|
||||
t.Fatalf("Failed to marshal state to bytes: %v %v", err1, err2)
|
||||
|
||||
@@ -382,18 +382,10 @@ func TestProcessEpochPrecompute_CanProcess(t *testing.T) {
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
JustificationBits: bitfield.Bitvector4{0x00},
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
Validators: []*ethpb.Validator{
|
||||
{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MinDepositAmount,
|
||||
},
|
||||
},
|
||||
Balances: []uint64{
|
||||
params.BeaconConfig().MinDepositAmount,
|
||||
},
|
||||
}
|
||||
s, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetValidators([]*ethpb.Validator{}))
|
||||
newState, err := transition.ProcessEpochPrecompute(context.Background(), s)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, uint64(0), newState.Slashings()[2], "Unexpected slashed balance")
|
||||
|
||||
@@ -21,9 +21,9 @@ import (
|
||||
// an already exited validator
|
||||
var ValidatorAlreadyExitedErr = errors.New("validator already exited")
|
||||
|
||||
// MaxExitEpochAndChurn returns the maximum non-FAR_FUTURE_EPOCH exit
|
||||
// ValidatorsMaxExitEpochAndChurn returns the maximum non-FAR_FUTURE_EPOCH exit
|
||||
// epoch and the number of them
|
||||
func MaxExitEpochAndChurn(s state.BeaconState) (maxExitEpoch primitives.Epoch, churn uint64) {
|
||||
func ValidatorsMaxExitEpochAndChurn(s state.BeaconState) (maxExitEpoch primitives.Epoch, churn uint64) {
|
||||
farFutureEpoch := params.BeaconConfig().FarFutureEpoch
|
||||
err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
e := val.ExitEpoch()
|
||||
@@ -82,7 +82,10 @@ func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primiti
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrap(err, "could not get active validator count")
|
||||
}
|
||||
currentChurn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
|
||||
currentChurn, err := helpers.ValidatorChurnLimit(activeValidatorCount)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrap(err, "could not get churn limit")
|
||||
}
|
||||
|
||||
if churn >= currentChurn {
|
||||
exitQueueEpoch, err = exitQueueEpoch.SafeAdd(1)
|
||||
@@ -134,7 +137,7 @@ func SlashValidator(
|
||||
slashedIdx primitives.ValidatorIndex,
|
||||
penaltyQuotient uint64,
|
||||
proposerRewardQuotient uint64) (state.BeaconState, error) {
|
||||
maxExitEpoch, churn := MaxExitEpochAndChurn(s)
|
||||
maxExitEpoch, churn := ValidatorsMaxExitEpochAndChurn(s)
|
||||
s, _, err := InitiateValidatorExit(ctx, s, slashedIdx, maxExitEpoch, churn)
|
||||
if err != nil && !errors.Is(err, ValidatorAlreadyExitedErr) {
|
||||
return nil, errors.Wrapf(err, "could not initiate validator %d exit", slashedIdx)
|
||||
@@ -232,7 +235,10 @@ func ExitedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validato
|
||||
exitQueueChurn++
|
||||
}
|
||||
}
|
||||
churn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
|
||||
churn, err := helpers.ValidatorChurnLimit(activeValidatorCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get churn limit")
|
||||
}
|
||||
if churn < exitQueueChurn {
|
||||
exitQueueEpoch++
|
||||
}
|
||||
@@ -270,7 +276,10 @@ func EjectedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validat
|
||||
exitQueueChurn++
|
||||
}
|
||||
}
|
||||
churn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
|
||||
churn, err := helpers.ValidatorChurnLimit(activeValidatorCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get churn limit")
|
||||
}
|
||||
if churn < exitQueueChurn {
|
||||
exitQueueEpoch++
|
||||
}
|
||||
|
||||
@@ -410,7 +410,7 @@ func TestValidatorMaxExitEpochAndChurn(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.state)
|
||||
require.NoError(t, err)
|
||||
epoch, churn := MaxExitEpochAndChurn(s)
|
||||
epoch, churn := ValidatorsMaxExitEpochAndChurn(s)
|
||||
require.Equal(t, tt.wantedEpoch, epoch)
|
||||
require.Equal(t, tt.wantedChurn, churn)
|
||||
}
|
||||
|
||||
@@ -13,9 +13,9 @@ func NewDB(ctx context.Context, dirPath string) (Database, error) {
|
||||
return kv.NewKVStore(ctx, dirPath)
|
||||
}
|
||||
|
||||
// NewFileName uses the KVStoreDatafilePath so that if this layer of
|
||||
// NewDBFilename uses the KVStoreDatafilePath so that if this layer of
|
||||
// indirection between db.NewDB->kv.NewKVStore ever changes, it will be easy to remember
|
||||
// to also change this filename indirection at the same time.
|
||||
func NewFileName(dirPath string) string {
|
||||
return kv.StoreDatafilePath(dirPath)
|
||||
func NewDBFilename(dirPath string) string {
|
||||
return kv.KVStoreDatafilePath(dirPath)
|
||||
}
|
||||
|
||||
@@ -96,7 +96,7 @@ type NoHeadAccessDatabase interface {
|
||||
|
||||
// Blob operations.
|
||||
SaveBlobSidecar(ctx context.Context, sidecars []*ethpb.BlobSidecar) error
|
||||
DeleteBlobSidecars(ctx context.Context, beaconBlockRoot [32]byte) error
|
||||
DeleteBlobSidecar(ctx context.Context, beaconBlockRoot [32]byte) error
|
||||
|
||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
|
||||
}
|
||||
@@ -170,7 +170,7 @@ type SlasherDatabase interface {
|
||||
// Database interface with full access.
|
||||
type Database interface {
|
||||
io.Closer
|
||||
backup.Exporter
|
||||
backup.BackupExporter
|
||||
HeadAccessDatabase
|
||||
|
||||
DatabasePath() string
|
||||
|
||||
@@ -13,7 +13,6 @@ go_library(
|
||||
"error.go",
|
||||
"execution_chain.go",
|
||||
"finalized_block_roots.go",
|
||||
"flags.go",
|
||||
"genesis.go",
|
||||
"key.go",
|
||||
"kv.go",
|
||||
@@ -39,7 +38,6 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/genesis:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -67,7 +65,6 @@ go_library(
|
||||
"@com_github_prysmaticlabs_prombbolt//:go_default_library",
|
||||
"@com_github_schollz_progressbar_v3//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@io_etcd_go_bbolt//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
@@ -86,7 +83,6 @@ go_test(
|
||||
"encoding_test.go",
|
||||
"execution_chain_test.go",
|
||||
"finalized_block_roots_test.go",
|
||||
"flags_test.go",
|
||||
"genesis_test.go",
|
||||
"init_test.go",
|
||||
"kv_test.go",
|
||||
@@ -107,7 +103,6 @@ go_test(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/genesis:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -124,8 +119,6 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
|
||||
"@io_etcd_go_bbolt//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
|
||||
@@ -48,7 +48,7 @@ func (rk blobRotatingKey) BlockRoot() []byte {
|
||||
|
||||
// SaveBlobSidecar saves the blobs for a given epoch in the sidecar bucket. When we receive a blob:
|
||||
//
|
||||
// 1. Convert slot using a modulo operator to [0, maxSlots] where maxSlots = MAX_EPOCHS_TO_PERSIST_BLOBS*SLOTS_PER_EPOCH
|
||||
// 1. Convert slot using a modulo operator to [0, maxSlots] where maxSlots = MAX_BLOB_EPOCHS*SLOTS_PER_EPOCH
|
||||
//
|
||||
// 2. Compute key for blob as bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
|
||||
//
|
||||
@@ -121,8 +121,7 @@ func (s *Store) SaveBlobSidecar(ctx context.Context, scs []*ethpb.BlobSidecar) e
|
||||
})
|
||||
}
|
||||
|
||||
// validUniqueSidecars ensures that all sidecars have the same slot, parent root, block root, and proposer index, and
|
||||
// there are no more than MAX_BLOBS_PER_BLOCK sidecars.
|
||||
// validUniqueSidecars ensures that all sidecars have the same slot, parent root, block root, and proposer index, and no more than MAX_BLOB_EPOCHS.
|
||||
func validUniqueSidecars(scs []*ethpb.BlobSidecar) ([]*ethpb.BlobSidecar, error) {
|
||||
if len(scs) == 0 {
|
||||
return nil, errEmptySidecar
|
||||
@@ -226,7 +225,7 @@ func filterForIndices(sc *ethpb.BlobSidecars, indices ...uint64) ([]*ethpb.BlobS
|
||||
}
|
||||
|
||||
// BlobSidecarsBySlot retrieves BlobSidecars for the given slot.
|
||||
// If the `indices` argument is omitted, all blobs for the slot will be returned.
|
||||
// If the `indices` argument is omitted, all blobs for the root will be returned.
|
||||
// Otherwise, the result will be filtered to only include the specified indices.
|
||||
// An error will result if an invalid index is specified.
|
||||
// The bucket size is bounded by 131072 entries. That's the most blobs a node will keep before rotating it out.
|
||||
@@ -261,8 +260,8 @@ func (s *Store) BlobSidecarsBySlot(ctx context.Context, slot types.Slot, indices
|
||||
return filterForIndices(sc, indices...)
|
||||
}
|
||||
|
||||
// DeleteBlobSidecars returns true if the blobs are in the db.
|
||||
func (s *Store) DeleteBlobSidecars(ctx context.Context, beaconBlockRoot [32]byte) error {
|
||||
// DeleteBlobSidecar returns true if the blobs are in the db.
|
||||
func (s *Store) DeleteBlobSidecar(ctx context.Context, beaconBlockRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.DeleteBlobSidecar")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
@@ -290,6 +289,7 @@ func blobSidecarKey(blob *ethpb.BlobSidecar) blobRotatingKey {
|
||||
|
||||
func slotKey(slot types.Slot) []byte {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
maxEpochsToPersistBlobs := params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
|
||||
maxSlotsToPersistBlobs := types.Slot(maxEpochsToPersistBlobs.Mul(uint64(slotsPerEpoch)))
|
||||
return bytesutil.SlotToBytesBigEndian(slot.ModSlot(maxSlotsToPersistBlobs))
|
||||
}
|
||||
@@ -299,14 +299,14 @@ func checkEpochsForBlobSidecarsRequestBucket(db *bolt.DB) error {
|
||||
b := tx.Bucket(chainMetadataBucket)
|
||||
v := b.Get(blobRetentionEpochsKey)
|
||||
if v == nil {
|
||||
if err := b.Put(blobRetentionEpochsKey, bytesutil.Uint64ToBytesBigEndian(uint64(maxEpochsToPersistBlobs))); err != nil {
|
||||
if err := b.Put(blobRetentionEpochsKey, bytesutil.Uint64ToBytesBigEndian(uint64(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest))); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
e := bytesutil.BytesToUint64BigEndian(v)
|
||||
if e != uint64(maxEpochsToPersistBlobs) {
|
||||
return fmt.Errorf("epochs for blobs request value in DB %d does not match config value %d", e, maxEpochsToPersistBlobs)
|
||||
if e != uint64(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest) {
|
||||
return fmt.Errorf("epochs for blobs request value in DB %d does not match config value %d", e, params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
|
||||
@@ -3,13 +3,10 @@ package kv
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"flag"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -18,7 +15,6 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assertions"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/urfave/cli/v2"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
@@ -91,7 +87,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
|
||||
// we'll request indices 0 and 3, so make a slice with those indices for comparison
|
||||
expect := make([]*ethpb.BlobSidecar, 2)
|
||||
@@ -108,7 +104,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot), uint64(len(scs)))
|
||||
require.ErrorIs(t, err, ErrNotFound)
|
||||
@@ -127,7 +123,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
@@ -147,7 +143,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
|
||||
// we'll request indices 0 and 3, so make a slice with those indices for comparison
|
||||
expect := make([]*ethpb.BlobSidecar, 2)
|
||||
@@ -165,7 +161,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
|
||||
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot, uint64(len(scs)))
|
||||
require.ErrorIs(t, err, ErrNotFound)
|
||||
@@ -175,11 +171,11 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
require.NoError(t, db.DeleteBlobSidecars(ctx, bytesutil.ToBytes32(scs[0].BlockRoot)))
|
||||
require.NoError(t, db.DeleteBlobSidecar(ctx, bytesutil.ToBytes32(scs[0].BlockRoot)))
|
||||
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.ErrorIs(t, ErrNotFound, err)
|
||||
require.Equal(t, 0, len(got))
|
||||
@@ -519,12 +515,9 @@ func Test_checkEpochsForBlobSidecarsRequestBucket(t *testing.T) {
|
||||
require.NoError(t, checkEpochsForBlobSidecarsRequestBucket(dbStore.db)) // First write
|
||||
require.NoError(t, checkEpochsForBlobSidecarsRequestBucket(dbStore.db)) // First check
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.Uint64(flags.BlobRetentionEpoch.Name, 0, "")
|
||||
require.NoError(t, set.Set(flags.BlobRetentionEpoch.Name, strconv.FormatUint(42069, 10)))
|
||||
cliCtx := cli.NewContext(&cli.App{}, set, nil)
|
||||
require.NoError(t, ConfigureBlobRetentionEpoch(cliCtx))
|
||||
nConfig := params.BeaconNetworkConfig()
|
||||
nConfig.MinEpochsForBlobsSidecarsRequest = 42069
|
||||
params.OverrideBeaconNetworkConfig(nConfig)
|
||||
require.ErrorContains(t, "epochs for blobs request value in DB 4096 does not match config value 42069", checkEpochsForBlobSidecarsRequestBucket(dbStore.db))
|
||||
}
|
||||
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var maxEpochsToPersistBlobs = params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
|
||||
|
||||
// ConfigureBlobRetentionEpoch sets the for blob retention based on command-line context. It sets the local config `maxEpochsToPersistBlobs`.
|
||||
// If the flag is not set, the spec default `MinEpochsForBlobsSidecarsRequest` is used.
|
||||
// An error if the input epoch is smaller than the spec default value.
|
||||
func ConfigureBlobRetentionEpoch(cliCtx *cli.Context) error {
|
||||
// Check if the blob retention epoch flag is set.
|
||||
if cliCtx.IsSet(flags.BlobRetentionEpoch.Name) {
|
||||
// Retrieve and cast the epoch value.
|
||||
epochValue := cliCtx.Uint64(flags.BlobRetentionEpoch.Name)
|
||||
e := primitives.Epoch(epochValue)
|
||||
|
||||
// Validate the epoch value against the spec default.
|
||||
if e < params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest {
|
||||
return fmt.Errorf("%s smaller than spec default, %d < %d", flags.BlobRetentionEpoch.Name, e, params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
}
|
||||
|
||||
maxEpochsToPersistBlobs = e
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func TestConfigureBlobRetentionEpoch(t *testing.T) {
|
||||
maxEpochsToPersistBlobs = params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
|
||||
params.SetupTestConfigCleanup(t)
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
|
||||
// Test case: Spec default.
|
||||
require.NoError(t, ConfigureBlobRetentionEpoch(cli.NewContext(&app, set, nil)))
|
||||
require.Equal(t, params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest, maxEpochsToPersistBlobs)
|
||||
|
||||
set.Uint64(flags.BlobRetentionEpoch.Name, 0, "")
|
||||
minEpochsForSidecarRequest := uint64(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
require.NoError(t, set.Set(flags.BlobRetentionEpoch.Name, strconv.FormatUint(2*minEpochsForSidecarRequest, 10)))
|
||||
cliCtx := cli.NewContext(&app, set, nil)
|
||||
|
||||
// Test case: Input epoch is greater than or equal to spec value.
|
||||
require.NoError(t, ConfigureBlobRetentionEpoch(cliCtx))
|
||||
require.Equal(t, primitives.Epoch(2*minEpochsForSidecarRequest), maxEpochsToPersistBlobs)
|
||||
|
||||
// Test case: Input epoch is less than spec value.
|
||||
require.NoError(t, set.Set(flags.BlobRetentionEpoch.Name, strconv.FormatUint(minEpochsForSidecarRequest-1, 10)))
|
||||
cliCtx = cli.NewContext(&app, set, nil)
|
||||
err := ConfigureBlobRetentionEpoch(cliCtx)
|
||||
require.ErrorContains(t, "blob-retention-epochs smaller than spec default", err)
|
||||
}
|
||||
@@ -1,12 +1,7 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -15,9 +10,3 @@ func init() {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(io.Discard)
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
@@ -92,10 +92,10 @@ type Store struct {
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// StoreDatafilePath is the canonical construction of a full
|
||||
// KVStoreDatafilePath is the canonical construction of a full
|
||||
// database file path from the directory path, so that code outside
|
||||
// this package can find the full path in a consistent way.
|
||||
func StoreDatafilePath(dirPath string) string {
|
||||
func KVStoreDatafilePath(dirPath string) string {
|
||||
return path.Join(dirPath, DatabaseFileName)
|
||||
}
|
||||
|
||||
@@ -146,7 +146,7 @@ func NewKVStore(ctx context.Context, dirPath string) (*Store, error) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
datafile := StoreDatafilePath(dirPath)
|
||||
datafile := KVStoreDatafilePath(dirPath)
|
||||
log.Infof("Opening Bolt DB at %s", datafile)
|
||||
boltDB, err := bolt.Open(
|
||||
datafile,
|
||||
|
||||
@@ -22,7 +22,7 @@ func Restore(cliCtx *cli.Context) error {
|
||||
targetDir := cliCtx.String(cmd.RestoreTargetDirFlag.Name)
|
||||
|
||||
restoreDir := path.Join(targetDir, kv.BeaconNodeDbDirName)
|
||||
if file.Exists(path.Join(restoreDir, kv.DatabaseFileName)) {
|
||||
if file.FileExists(path.Join(restoreDir, kv.DatabaseFileName)) {
|
||||
resp, err := prompt.ValidatePrompt(
|
||||
os.Stdin, dbExistsYesNoPrompt, prompt.ValidateYesOrNo,
|
||||
)
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"block_cache.go",
|
||||
"block_reader.go",
|
||||
"check_transition_config.go",
|
||||
"deposit.go",
|
||||
"engine_client.go",
|
||||
"errors.go",
|
||||
@@ -81,6 +82,7 @@ go_test(
|
||||
srcs = [
|
||||
"block_cache_test.go",
|
||||
"block_reader_test.go",
|
||||
"check_transition_config_test.go",
|
||||
"deposit_test.go",
|
||||
"engine_client_fuzz_test.go",
|
||||
"engine_client_test.go",
|
||||
@@ -94,6 +96,7 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
@@ -137,5 +140,6 @@ go_test(
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -48,8 +48,6 @@ func TestLatestMainchainInfo_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
testAcc.Backend.Commit()
|
||||
|
||||
tickerChan := make(chan time.Time)
|
||||
web3Service.eth1HeadTicker = &time.Ticker{C: tickerChan}
|
||||
exitRoutine := make(chan bool)
|
||||
|
||||
go func() {
|
||||
@@ -60,6 +58,8 @@ func TestLatestMainchainInfo_OK(t *testing.T) {
|
||||
header, err := web3Service.HeaderByNumber(web3Service.ctx, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
tickerChan := make(chan time.Time)
|
||||
web3Service.eth1HeadTicker = &time.Ticker{C: tickerChan}
|
||||
tickerChan <- time.Now()
|
||||
web3Service.cancel()
|
||||
exitRoutine <- true
|
||||
|
||||
168
beacon-chain/execution/check_transition_config.go
Normal file
168
beacon-chain/execution/check_transition_config.go
Normal file
@@ -0,0 +1,168 @@
|
||||
package execution
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/network"
|
||||
pb "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
checkTransitionPollingInterval = time.Second * 10
|
||||
logTtdInterval = time.Minute
|
||||
configMismatchLog = "Configuration mismatch between your execution client and Prysm. " +
|
||||
"Please check your execution client and restart it with the proper configuration. If this is not done, " +
|
||||
"your node will not be able to complete the proof-of-stake transition"
|
||||
needsEnginePortLog = "Could not check execution client configuration. " +
|
||||
"You are probably connecting to your execution client on the wrong port. For the Ethereum " +
|
||||
"merge, you will need to connect to your " +
|
||||
"execution client on port 8551 rather than 8545. This is known as the 'engine API' port and needs to be " +
|
||||
"authenticated if connecting via HTTP. See our documentation on how to set up this up here " +
|
||||
"https://docs.prylabs.network/docs/execution-node/authentication"
|
||||
)
|
||||
|
||||
// Checks the transition configuration between Prysm and the connected execution node to ensure
|
||||
// there are no differences in terminal block difficulty and block hash.
|
||||
// If there are any discrepancies, we must log errors to ensure users can resolve
|
||||
// the problem and be ready for the merge transition.
|
||||
func (s *Service) checkTransitionConfiguration(
|
||||
ctx context.Context, blockNotifications chan *feed.Event,
|
||||
) {
|
||||
// If Bellatrix fork epoch is not set, we do not run this check.
|
||||
if params.BeaconConfig().BellatrixForkEpoch == math.MaxUint64 {
|
||||
return
|
||||
}
|
||||
i := new(big.Int)
|
||||
i.SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
|
||||
ttd := new(uint256.Int)
|
||||
ttd.SetFromBig(i)
|
||||
cfg := &pb.TransitionConfiguration{
|
||||
TerminalTotalDifficulty: ttd.Hex(),
|
||||
TerminalBlockHash: params.BeaconConfig().TerminalBlockHash[:],
|
||||
TerminalBlockNumber: big.NewInt(0).Bytes(), // A value of 0 is recommended in the request.
|
||||
}
|
||||
err := s.ExchangeTransitionConfiguration(ctx, cfg)
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.Is(err, ErrConfigMismatch):
|
||||
log.WithError(err).Fatal(configMismatchLog)
|
||||
case errors.Is(err, ErrMethodNotFound):
|
||||
log.WithError(err).Error(needsEnginePortLog)
|
||||
default:
|
||||
log.WithError(err).Error("Could not check configuration values between execution and consensus client")
|
||||
}
|
||||
}
|
||||
|
||||
// We poll the execution client to see if the transition configuration has changed.
|
||||
// This serves as a heartbeat to ensure the execution client and Prysm are ready for the
|
||||
// Bellatrix hard-fork transition.
|
||||
ticker := time.NewTicker(checkTransitionPollingInterval)
|
||||
logTtdTicker := time.NewTicker(logTtdInterval)
|
||||
hasTtdReached := false
|
||||
defer ticker.Stop()
|
||||
defer logTtdTicker.Stop()
|
||||
sub := s.cfg.stateNotifier.StateFeed().Subscribe(blockNotifications)
|
||||
defer sub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-sub.Err():
|
||||
return
|
||||
case ev := <-blockNotifications:
|
||||
data, ok := ev.Data.(*statefeed.BlockProcessedData)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
isExecutionBlock, err := blocks.IsExecutionBlock(data.SignedBlock.Block().Body())
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not check whether signed block is execution block")
|
||||
continue
|
||||
}
|
||||
if isExecutionBlock {
|
||||
log.Debug("PoS transition is complete, no longer checking for configuration changes")
|
||||
return
|
||||
}
|
||||
case tm := <-ticker.C:
|
||||
ctx, cancel := context.WithDeadline(ctx, tm.Add(network.DefaultRPCHTTPTimeout))
|
||||
err = s.ExchangeTransitionConfiguration(ctx, cfg)
|
||||
s.handleExchangeConfigurationError(err)
|
||||
cancel()
|
||||
case <-logTtdTicker.C:
|
||||
currentEpoch := slots.ToEpoch(slots.CurrentSlot(s.chainStartData.GetGenesisTime()))
|
||||
if currentEpoch >= params.BeaconConfig().BellatrixForkEpoch && !hasTtdReached {
|
||||
hasTtdReached, err = s.logTtdStatus(ctx, ttd)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not log ttd status")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We check if there is a configuration mismatch error between the execution client
|
||||
// and the Prysm beacon node. If so, we need to log errors in the node as it cannot successfully
|
||||
// complete the merge transition for the Bellatrix hard fork.
|
||||
func (s *Service) handleExchangeConfigurationError(err error) {
|
||||
if err == nil {
|
||||
// If there is no error in checking the exchange configuration error, we clear
|
||||
// the run error of the service if we had previously set it to ErrConfigMismatch.
|
||||
if errors.Is(s.runError, ErrConfigMismatch) {
|
||||
s.runError = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
// If the error is a configuration mismatch, we set a runtime error in the service.
|
||||
if errors.Is(err, ErrConfigMismatch) {
|
||||
s.runError = err
|
||||
log.WithError(err).Error(configMismatchLog)
|
||||
return
|
||||
} else if errors.Is(err, ErrMethodNotFound) {
|
||||
log.WithError(err).Error(needsEnginePortLog)
|
||||
return
|
||||
}
|
||||
log.WithError(err).Error("Could not check configuration values between execution and consensus client")
|
||||
}
|
||||
|
||||
// Logs the terminal total difficulty status.
|
||||
func (s *Service) logTtdStatus(ctx context.Context, ttd *uint256.Int) (bool, error) {
|
||||
latest, err := s.LatestExecutionBlock(ctx)
|
||||
switch {
|
||||
case errors.Is(err, hexutil.ErrEmptyString):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, err
|
||||
case latest == nil:
|
||||
return false, errors.New("latest block is nil")
|
||||
case latest.TotalDifficulty == "":
|
||||
return false, nil
|
||||
default:
|
||||
}
|
||||
latestTtd, err := hexutil.DecodeBig(latest.TotalDifficulty)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if latestTtd.Cmp(ttd.ToBig()) >= 0 {
|
||||
return true, nil
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"latestDifficulty": latestTtd.String(),
|
||||
"terminalDifficulty": ttd.ToBig().String(),
|
||||
"network": params.BeaconConfig().ConfigName,
|
||||
}).Info("Ready for The Merge")
|
||||
|
||||
totalTerminalDifficulty.Set(float64(latestTtd.Uint64()))
|
||||
return false, nil
|
||||
}
|
||||
260
beacon-chain/execution/check_transition_config_test.go
Normal file
260
beacon-chain/execution/check_transition_config_test.go
Normal file
@@ -0,0 +1,260 @@
|
||||
package execution
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
gethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/holiman/uint256"
|
||||
mockChain "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
mocks "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
pb "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func Test_checkTransitionConfiguration(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.BellatrixForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
t.Run("context canceled", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
m := &mocks.EngineClient{}
|
||||
m.Err = errors.New("something went wrong")
|
||||
|
||||
srv := setupTransitionConfigTest(t)
|
||||
srv.cfg.stateNotifier = &mockChain.MockStateNotifier{}
|
||||
checkTransitionPollingInterval = time.Millisecond
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
go srv.checkTransitionConfiguration(ctx, make(chan *feed.Event, 1))
|
||||
<-time.After(100 * time.Millisecond)
|
||||
cancel()
|
||||
require.LogsContain(t, hook, "Could not check configuration values")
|
||||
})
|
||||
|
||||
t.Run("block containing execution payload exits routine", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
m := &mocks.EngineClient{}
|
||||
m.Err = errors.New("something went wrong")
|
||||
srv := setupTransitionConfigTest(t)
|
||||
srv.cfg.stateNotifier = &mockChain.MockStateNotifier{}
|
||||
|
||||
checkTransitionPollingInterval = time.Millisecond
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
exit := make(chan bool)
|
||||
notification := make(chan *feed.Event)
|
||||
go func() {
|
||||
srv.checkTransitionConfiguration(ctx, notification)
|
||||
exit <- true
|
||||
}()
|
||||
payload := emptyPayload()
|
||||
payload.GasUsed = 21000
|
||||
wrappedBlock, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: payload,
|
||||
},
|
||||
}},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
notification <- &feed.Event{
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
SignedBlock: wrappedBlock,
|
||||
},
|
||||
Type: statefeed.BlockProcessed,
|
||||
}
|
||||
<-exit
|
||||
cancel()
|
||||
require.LogsContain(t, hook, "PoS transition is complete, no longer checking")
|
||||
})
|
||||
}
|
||||
|
||||
func TestService_handleExchangeConfigurationError(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
t.Run("clears existing service error", func(t *testing.T) {
|
||||
srv := setupTransitionConfigTest(t)
|
||||
srv.isRunning = true
|
||||
srv.runError = ErrConfigMismatch
|
||||
srv.handleExchangeConfigurationError(nil)
|
||||
require.Equal(t, true, srv.Status() == nil)
|
||||
})
|
||||
t.Run("does not clear existing service error if wrong kind", func(t *testing.T) {
|
||||
srv := setupTransitionConfigTest(t)
|
||||
srv.isRunning = true
|
||||
err := errors.New("something else went wrong")
|
||||
srv.runError = err
|
||||
srv.handleExchangeConfigurationError(nil)
|
||||
require.ErrorIs(t, err, srv.Status())
|
||||
})
|
||||
t.Run("sets service error on config mismatch", func(t *testing.T) {
|
||||
srv := setupTransitionConfigTest(t)
|
||||
srv.isRunning = true
|
||||
srv.handleExchangeConfigurationError(ErrConfigMismatch)
|
||||
require.Equal(t, ErrConfigMismatch, srv.Status())
|
||||
require.LogsContain(t, hook, configMismatchLog)
|
||||
})
|
||||
t.Run("does not set service error if unrelated problem", func(t *testing.T) {
|
||||
srv := setupTransitionConfigTest(t)
|
||||
srv.isRunning = true
|
||||
srv.handleExchangeConfigurationError(errors.New("foo"))
|
||||
require.Equal(t, true, srv.Status() == nil)
|
||||
require.LogsContain(t, hook, "Could not check configuration values")
|
||||
})
|
||||
}
|
||||
|
||||
func setupTransitionConfigTest(t testing.TB) *Service {
|
||||
fix := fixtures()
|
||||
request, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
resp, ok := proto.Clone(request).(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
// Change the terminal block hash.
|
||||
h := common.BytesToHash([]byte("foo"))
|
||||
resp.TerminalBlockHash = h[:]
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": resp,
|
||||
}
|
||||
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
service := &Service{
|
||||
cfg: &config{},
|
||||
}
|
||||
service.rpcClient = rpcClient
|
||||
return service
|
||||
}
|
||||
|
||||
func TestService_logTtdStatus(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
resp := &pb.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.Hash{},
|
||||
UncleHash: common.Hash{},
|
||||
Coinbase: common.Address{},
|
||||
Root: common.Hash{},
|
||||
TxHash: common.Hash{},
|
||||
ReceiptHash: common.Hash{},
|
||||
Bloom: gethtypes.Bloom{},
|
||||
Difficulty: big.NewInt(1),
|
||||
Number: big.NewInt(2),
|
||||
GasLimit: 3,
|
||||
GasUsed: 4,
|
||||
Time: 5,
|
||||
Extra: nil,
|
||||
MixDigest: common.Hash{},
|
||||
Nonce: gethtypes.BlockNonce{},
|
||||
BaseFee: big.NewInt(6),
|
||||
},
|
||||
TotalDifficulty: "0x12345678",
|
||||
}
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": resp,
|
||||
}
|
||||
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
service := &Service{
|
||||
cfg: &config{},
|
||||
}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
ttd := new(uint256.Int)
|
||||
reached, err := service.logTtdStatus(context.Background(), ttd.SetUint64(24343))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, reached)
|
||||
|
||||
reached, err = service.logTtdStatus(context.Background(), ttd.SetUint64(323423484))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, reached)
|
||||
}
|
||||
|
||||
func TestService_logTtdStatus_NotSyncedClient(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
resp := (*pb.ExecutionBlock)(nil) // Nil response when a client is not synced
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": resp,
|
||||
}
|
||||
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
service := &Service{
|
||||
cfg: &config{},
|
||||
}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
ttd := new(uint256.Int)
|
||||
reached, err := service.logTtdStatus(context.Background(), ttd.SetUint64(24343))
|
||||
require.ErrorContains(t, "missing required field 'parentHash' for Header", err)
|
||||
require.Equal(t, false, reached)
|
||||
}
|
||||
|
||||
func emptyPayload() *pb.ExecutionPayload {
|
||||
return &pb.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
ExtraData: make([]byte, 0),
|
||||
}
|
||||
}
|
||||
@@ -39,6 +39,7 @@ var (
|
||||
ForkchoiceUpdatedMethodV2,
|
||||
GetPayloadMethod,
|
||||
GetPayloadMethodV2,
|
||||
ExchangeTransitionConfigurationMethod,
|
||||
GetPayloadBodiesByHashV1,
|
||||
GetPayloadBodiesByRangeV1,
|
||||
}
|
||||
@@ -61,10 +62,12 @@ const (
|
||||
// GetPayloadMethodV2 v2 request string for JSON-RPC.
|
||||
GetPayloadMethodV2 = "engine_getPayloadV2"
|
||||
GetPayloadMethodV3 = "engine_getPayloadV3"
|
||||
// BlockByHashMethod request string for JSON-RPC.
|
||||
BlockByHashMethod = "eth_getBlockByHash"
|
||||
// BlockByNumberMethod request string for JSON-RPC.
|
||||
BlockByNumberMethod = "eth_getBlockByNumber"
|
||||
// ExchangeTransitionConfigurationMethod v1 request string for JSON-RPC.
|
||||
ExchangeTransitionConfigurationMethod = "engine_exchangeTransitionConfigurationV1"
|
||||
// ExecutionBlockByHashMethod request string for JSON-RPC.
|
||||
ExecutionBlockByHashMethod = "eth_getBlockByHash"
|
||||
// ExecutionBlockByNumberMethod request string for JSON-RPC.
|
||||
ExecutionBlockByNumberMethod = "eth_getBlockByNumber"
|
||||
// GetPayloadBodiesByHashV1 v1 request string for JSON-RPC.
|
||||
GetPayloadBodiesByHashV1 = "engine_getPayloadBodiesByHashV1"
|
||||
// GetPayloadBodiesByRangeV1 v1 request string for JSON-RPC.
|
||||
@@ -86,7 +89,7 @@ type ForkchoiceUpdatedResponse struct {
|
||||
// ExecutionPayloadReconstructor defines a service that can reconstruct a full beacon
|
||||
// block with an execution payload from a signed beacon block and a connection
|
||||
// to an execution client's engine API.
|
||||
type PayloadReconstructor interface {
|
||||
type ExecutionPayloadReconstructor interface {
|
||||
ReconstructFullBlock(
|
||||
ctx context.Context, blindedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
) (interfaces.SignedBeaconBlock, error)
|
||||
@@ -103,6 +106,9 @@ type EngineCaller interface {
|
||||
ctx context.Context, state *pb.ForkchoiceState, attrs payloadattribute.Attributer,
|
||||
) (*pb.PayloadIDBytes, []byte, error)
|
||||
GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (interfaces.ExecutionData, *pb.BlobsBundle, bool, error)
|
||||
ExchangeTransitionConfiguration(
|
||||
ctx context.Context, cfg *pb.TransitionConfiguration,
|
||||
) error
|
||||
ExecutionBlockByHash(ctx context.Context, hash common.Hash, withTxs bool) (*pb.ExecutionBlock, error)
|
||||
GetTerminalBlockHash(ctx context.Context, transitionTime uint64) ([]byte, bool, error)
|
||||
}
|
||||
@@ -293,6 +299,51 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primit
|
||||
return ed, nil, false, nil
|
||||
}
|
||||
|
||||
// ExchangeTransitionConfiguration calls the engine_exchangeTransitionConfigurationV1 method via JSON-RPC.
|
||||
func (s *Service) ExchangeTransitionConfiguration(
|
||||
ctx context.Context, cfg *pb.TransitionConfiguration,
|
||||
) error {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.ExchangeTransitionConfiguration")
|
||||
defer span.End()
|
||||
|
||||
// We set terminal block number to 0 as the parameter is not set on the consensus layer.
|
||||
zeroBigNum := big.NewInt(0)
|
||||
cfg.TerminalBlockNumber = zeroBigNum.Bytes()
|
||||
d := time.Now().Add(defaultEngineTimeout)
|
||||
ctx, cancel := context.WithDeadline(ctx, d)
|
||||
defer cancel()
|
||||
result := &pb.TransitionConfiguration{}
|
||||
if err := s.rpcClient.CallContext(ctx, result, ExchangeTransitionConfigurationMethod, cfg); err != nil {
|
||||
return handleRPCError(err)
|
||||
}
|
||||
|
||||
// We surface an error to the user if local configuration settings mismatch
|
||||
// according to the response from the execution node.
|
||||
cfgTerminalHash := params.BeaconConfig().TerminalBlockHash[:]
|
||||
if !bytes.Equal(cfgTerminalHash, result.TerminalBlockHash) {
|
||||
return errors.Wrapf(
|
||||
ErrConfigMismatch,
|
||||
"got %#x from execution node, wanted %#x",
|
||||
result.TerminalBlockHash,
|
||||
cfgTerminalHash,
|
||||
)
|
||||
}
|
||||
ttdCfg := params.BeaconConfig().TerminalTotalDifficulty
|
||||
ttdResult, err := hexutil.DecodeBig(result.TerminalTotalDifficulty)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not decode received terminal total difficulty")
|
||||
}
|
||||
if ttdResult.String() != ttdCfg {
|
||||
return errors.Wrapf(
|
||||
ErrConfigMismatch,
|
||||
"got %s from execution node, wanted %s",
|
||||
ttdResult.String(),
|
||||
ttdCfg,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) ExchangeCapabilities(ctx context.Context) ([]string, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.ExchangeCapabilities")
|
||||
defer span.End()
|
||||
@@ -412,7 +463,7 @@ func (s *Service) LatestExecutionBlock(ctx context.Context) (*pb.ExecutionBlock,
|
||||
err := s.rpcClient.CallContext(
|
||||
ctx,
|
||||
result,
|
||||
BlockByNumberMethod,
|
||||
ExecutionBlockByNumberMethod,
|
||||
"latest",
|
||||
false, /* no full transaction objects */
|
||||
)
|
||||
@@ -425,7 +476,7 @@ func (s *Service) ExecutionBlockByHash(ctx context.Context, hash common.Hash, wi
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.ExecutionBlockByHash")
|
||||
defer span.End()
|
||||
result := &pb.ExecutionBlock{}
|
||||
err := s.rpcClient.CallContext(ctx, result, BlockByHashMethod, hash, withTxs)
|
||||
err := s.rpcClient.CallContext(ctx, result, ExecutionBlockByHashMethod, hash, withTxs)
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
@@ -444,7 +495,7 @@ func (s *Service) ExecutionBlocksByHashes(ctx context.Context, hashes []common.H
|
||||
blk := &pb.ExecutionBlock{}
|
||||
newH := h
|
||||
elems = append(elems, gethRPC.BatchElem{
|
||||
Method: BlockByHashMethod,
|
||||
Method: ExecutionBlockByHashMethod,
|
||||
Args: []interface{}{newH, withTxs},
|
||||
Result: blk,
|
||||
Error: error(nil),
|
||||
@@ -466,7 +517,7 @@ func (s *Service) ExecutionBlocksByHashes(ctx context.Context, hashes []common.H
|
||||
// HeaderByHash returns the relevant header details for the provided block hash.
|
||||
func (s *Service) HeaderByHash(ctx context.Context, hash common.Hash) (*types.HeaderInfo, error) {
|
||||
var hdr *types.HeaderInfo
|
||||
err := s.rpcClient.CallContext(ctx, &hdr, BlockByHashMethod, hash, false /* no transactions */)
|
||||
err := s.rpcClient.CallContext(ctx, &hdr, ExecutionBlockByHashMethod, hash, false /* no transactions */)
|
||||
if err == nil && hdr == nil {
|
||||
err = ethereum.NotFound
|
||||
}
|
||||
@@ -476,7 +527,7 @@ func (s *Service) HeaderByHash(ctx context.Context, hash common.Hash) (*types.He
|
||||
// HeaderByNumber returns the relevant header details for the provided block number.
|
||||
func (s *Service) HeaderByNumber(ctx context.Context, number *big.Int) (*types.HeaderInfo, error) {
|
||||
var hdr *types.HeaderInfo
|
||||
err := s.rpcClient.CallContext(ctx, &hdr, BlockByNumberMethod, toBlockNumArg(number), false /* no transactions */)
|
||||
err := s.rpcClient.CallContext(ctx, &hdr, ExecutionBlockByNumberMethod, toBlockNumArg(number), false /* no transactions */)
|
||||
if err == nil && hdr == nil {
|
||||
err = ethereum.NotFound
|
||||
}
|
||||
@@ -571,9 +622,9 @@ func (s *Service) ReconstructFullBellatrixBlockBatch(
|
||||
if len(blindedBlocks) == 0 {
|
||||
return []interfaces.SignedBeaconBlock{}, nil
|
||||
}
|
||||
var executionHashes []common.Hash
|
||||
var validExecPayloads []int
|
||||
var zeroExecPayloads []int
|
||||
executionHashes := []common.Hash{}
|
||||
validExecPayloads := []int{}
|
||||
zeroExecPayloads := []int{}
|
||||
for i, b := range blindedBlocks {
|
||||
if err := blocks.BeaconBlockIsNil(b); err != nil {
|
||||
return nil, errors.Wrap(err, "cannot reconstruct bellatrix block from nil data")
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/beacon/engine"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
@@ -71,6 +72,47 @@ func FuzzForkChoiceResponse(f *testing.F) {
|
||||
})
|
||||
}
|
||||
|
||||
func FuzzExchangeTransitionConfiguration(f *testing.F) {
|
||||
valHash := common.Hash([32]byte{0xFF, 0x01})
|
||||
ttd := hexutil.Big(*big.NewInt(math.MaxInt))
|
||||
seed := &engine.TransitionConfigurationV1{
|
||||
TerminalTotalDifficulty: &ttd,
|
||||
TerminalBlockHash: valHash,
|
||||
TerminalBlockNumber: hexutil.Uint64(math.MaxUint64),
|
||||
}
|
||||
|
||||
output, err := json.Marshal(seed)
|
||||
assert.NoError(f, err)
|
||||
f.Add(output)
|
||||
f.Fuzz(func(t *testing.T, jsonBlob []byte) {
|
||||
gethResp := &engine.TransitionConfigurationV1{}
|
||||
prysmResp := &pb.TransitionConfiguration{}
|
||||
gethErr := json.Unmarshal(jsonBlob, gethResp)
|
||||
prysmErr := json.Unmarshal(jsonBlob, prysmResp)
|
||||
assert.Equal(t, gethErr != nil, prysmErr != nil, fmt.Sprintf("geth and prysm unmarshaller return inconsistent errors. %v and %v", gethErr, prysmErr))
|
||||
// Nothing to marshal if we have an error.
|
||||
if gethErr != nil {
|
||||
return
|
||||
}
|
||||
gethBlob, gethErr := json.Marshal(gethResp)
|
||||
prysmBlob, prysmErr := json.Marshal(prysmResp)
|
||||
if gethErr != nil {
|
||||
t.Errorf("%s %s", gethResp.TerminalTotalDifficulty.String(), prysmResp.TerminalTotalDifficulty)
|
||||
}
|
||||
assert.Equal(t, gethErr != nil, prysmErr != nil, fmt.Sprintf("geth and prysm unmarshaller return inconsistent errors. %v and %v", gethErr, prysmErr))
|
||||
if gethErr != nil {
|
||||
t.Errorf("%s %s", gethResp.TerminalTotalDifficulty.String(), prysmResp.TerminalTotalDifficulty)
|
||||
}
|
||||
newGethResp := &engine.TransitionConfigurationV1{}
|
||||
newGethErr := json.Unmarshal(prysmBlob, newGethResp)
|
||||
assert.NoError(t, newGethErr)
|
||||
|
||||
newGethResp2 := &engine.TransitionConfigurationV1{}
|
||||
newGethErr = json.Unmarshal(gethBlob, newGethResp2)
|
||||
assert.NoError(t, newGethErr)
|
||||
})
|
||||
}
|
||||
|
||||
func FuzzExecutionPayload(f *testing.F) {
|
||||
logsBloom := [256]byte{'j', 'u', 'n', 'k'}
|
||||
execData := &engine.ExecutionPayloadEnvelope{
|
||||
|
||||
@@ -33,12 +33,13 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var (
|
||||
_ = PayloadReconstructor(&Service{})
|
||||
_ = ExecutionPayloadReconstructor(&Service{})
|
||||
_ = EngineCaller(&Service{})
|
||||
_ = PayloadReconstructor(&Service{})
|
||||
_ = ExecutionPayloadReconstructor(&Service{})
|
||||
_ = EngineCaller(&mocks.EngineClient{})
|
||||
)
|
||||
|
||||
@@ -134,14 +135,20 @@ func TestClient_IPC(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bytesutil.ToBytes32(want.LatestValidHash), bytesutil.ToBytes32(latestValidHash))
|
||||
})
|
||||
t.Run(BlockByNumberMethod, func(t *testing.T) {
|
||||
t.Run(ExchangeTransitionConfigurationMethod, func(t *testing.T) {
|
||||
want, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
err := srv.ExchangeTransitionConfiguration(ctx, want)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
t.Run(ExecutionBlockByNumberMethod, func(t *testing.T) {
|
||||
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
|
||||
require.Equal(t, true, ok)
|
||||
resp, err := srv.LatestExecutionBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resp)
|
||||
})
|
||||
t.Run(BlockByHashMethod, func(t *testing.T) {
|
||||
t.Run(ExecutionBlockByHashMethod, func(t *testing.T) {
|
||||
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
|
||||
require.Equal(t, true, ok)
|
||||
arg := common.BytesToHash([]byte("foo"))
|
||||
@@ -637,7 +644,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.ErrorIs(t, ErrUnknownPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(BlockByNumberMethod, func(t *testing.T) {
|
||||
t.Run(ExecutionBlockByNumberMethod, func(t *testing.T) {
|
||||
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
|
||||
require.Equal(t, true, ok)
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -667,7 +674,45 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resp)
|
||||
})
|
||||
t.Run(BlockByHashMethod, func(t *testing.T) {
|
||||
t.Run(ExchangeTransitionConfigurationMethod, func(t *testing.T) {
|
||||
want, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
encodedReq, err := json.Marshal(want)
|
||||
require.NoError(t, err)
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
enc, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
// We expect the JSON string RPC request contains the right arguments.
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, string(encodedReq),
|
||||
))
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": want,
|
||||
}
|
||||
err = json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
client := &Service{}
|
||||
client.rpcClient = rpcClient
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
err = client.ExchangeTransitionConfiguration(ctx, want)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
t.Run(ExecutionBlockByHashMethod, func(t *testing.T) {
|
||||
arg := common.BytesToHash([]byte("foo"))
|
||||
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
|
||||
require.Equal(t, true, ok)
|
||||
@@ -1145,6 +1190,78 @@ func Test_tDStringToUint256(t *testing.T) {
|
||||
require.ErrorContains(t, "hex number > 256 bits", err)
|
||||
}
|
||||
|
||||
func TestExchangeTransitionConfiguration(t *testing.T) {
|
||||
fix := fixtures()
|
||||
ctx := context.Background()
|
||||
t.Run("wrong terminal block hash", func(t *testing.T) {
|
||||
request, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
resp, ok := proto.Clone(request).(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
// Change the terminal block hash.
|
||||
h := common.BytesToHash([]byte("foo"))
|
||||
resp.TerminalBlockHash = h[:]
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": resp,
|
||||
}
|
||||
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
err = service.ExchangeTransitionConfiguration(ctx, request)
|
||||
require.Equal(t, true, errors.Is(err, ErrConfigMismatch))
|
||||
})
|
||||
t.Run("wrong terminal total difficulty", func(t *testing.T) {
|
||||
request, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
resp, ok := proto.Clone(request).(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
// Change the terminal block hash.
|
||||
resp.TerminalTotalDifficulty = "0x1"
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": resp,
|
||||
}
|
||||
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
err = service.ExchangeTransitionConfiguration(ctx, request)
|
||||
require.Equal(t, true, errors.Is(err, ErrConfigMismatch))
|
||||
})
|
||||
}
|
||||
|
||||
type customError struct {
|
||||
code int
|
||||
timeout bool
|
||||
@@ -1432,6 +1549,13 @@ func fixtures() map[string]interface{} {
|
||||
},
|
||||
PayloadId: &id,
|
||||
}
|
||||
b, _ := new(big.Int).SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
|
||||
ttd, _ := uint256.FromBig(b)
|
||||
transitionCfg := &pb.TransitionConfiguration{
|
||||
TerminalBlockHash: params.BeaconConfig().TerminalBlockHash[:],
|
||||
TerminalTotalDifficulty: ttd.Hex(),
|
||||
TerminalBlockNumber: big.NewInt(0).Bytes(),
|
||||
}
|
||||
validStatus := &pb.PayloadStatus{
|
||||
Status: pb.PayloadStatus_VALID,
|
||||
LatestValidHash: foo[:],
|
||||
@@ -1474,6 +1598,7 @@ func fixtures() map[string]interface{} {
|
||||
"ForkchoiceUpdatedSyncingResponse": forkChoiceSyncingResp,
|
||||
"ForkchoiceUpdatedAcceptedResponse": forkChoiceAcceptedResp,
|
||||
"ForkchoiceUpdatedInvalidResponse": forkChoiceInvalidResp,
|
||||
"TransitionConfiguration": transitionCfg,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1780,6 +1905,17 @@ func (*testEngineService) GetPayloadV2(
|
||||
return item
|
||||
}
|
||||
|
||||
func (*testEngineService) ExchangeTransitionConfigurationV1(
|
||||
_ context.Context, _ *pb.TransitionConfiguration,
|
||||
) *pb.TransitionConfiguration {
|
||||
fix := fixtures()
|
||||
item, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
|
||||
if !ok {
|
||||
panic("not found")
|
||||
}
|
||||
return item
|
||||
}
|
||||
|
||||
func (*testEngineService) ForkchoiceUpdatedV1(
|
||||
_ context.Context, _ *pb.ForkchoiceState, _ *pb.PayloadAttributes,
|
||||
) *ForkchoiceUpdatedResponse {
|
||||
|
||||
@@ -32,7 +32,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
depositEventSignature = hash.Keccak256([]byte("DepositEvent(bytes,bytes,bytes,bytes,bytes)"))
|
||||
depositEventSignature = hash.HashKeccak256([]byte("DepositEvent(bytes,bytes,bytes,bytes,bytes)"))
|
||||
)
|
||||
|
||||
const eth1DataSavingInterval = 1000
|
||||
|
||||
@@ -6,6 +6,10 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
totalTerminalDifficulty = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "total_terminal_difficulty",
|
||||
Help: "The total terminal difficulty of the execution chain before merge",
|
||||
})
|
||||
newPayloadLatency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "new_payload_v1_latency_milliseconds",
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositsnapshot"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
@@ -207,10 +208,14 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
}
|
||||
}
|
||||
|
||||
eth1Data, err := s.validPowchainData(ctx)
|
||||
if err != nil {
|
||||
if err := s.ensureValidPowchainData(ctx); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to validate powchain data")
|
||||
}
|
||||
|
||||
eth1Data, err := s.cfg.beaconDB.ExecutionChainData(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to retrieve eth1 data")
|
||||
}
|
||||
if err := s.initializeEth1Data(ctx, eth1Data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -241,6 +246,9 @@ func (s *Service) Start() {
|
||||
// Poll the execution client connection and fallback if errors occur.
|
||||
s.pollConnectionStatus(s.ctx)
|
||||
|
||||
// Check transition configuration for the engine API client in the background.
|
||||
go s.checkTransitionConfiguration(s.ctx, make(chan *feed.Event, 1))
|
||||
|
||||
go s.run(s.ctx.Done())
|
||||
}
|
||||
|
||||
@@ -750,10 +758,6 @@ func (s *Service) initializeEth1Data(ctx context.Context, eth1DataInDB *ethpb.ET
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if eth1DataInDB.Trie == nil && eth1DataInDB.DepositSnapshot != nil {
|
||||
return errors.Errorf("trying to use old deposit trie after migration to the new trie. "+
|
||||
"Run with the --%s flag to resume normal operations.", features.EnableEIP4881.Name)
|
||||
}
|
||||
s.depositTrie, err = trie.CreateTrieFromProto(eth1DataInDB.Trie)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -818,22 +822,23 @@ func validateDepositContainers(ctrs []*ethpb.DepositContainer) bool {
|
||||
|
||||
// Validates the current powchain data is saved and makes sure that any
|
||||
// embedded genesis state is correctly accounted for.
|
||||
func (s *Service) validPowchainData(ctx context.Context) (*ethpb.ETH1ChainData, error) {
|
||||
func (s *Service) ensureValidPowchainData(ctx context.Context) error {
|
||||
genState, err := s.cfg.beaconDB.GenesisState(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
// Exit early if no genesis state is saved.
|
||||
if genState == nil || genState.IsNil() {
|
||||
return nil
|
||||
}
|
||||
eth1Data, err := s.cfg.beaconDB.ExecutionChainData(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to retrieve eth1 data")
|
||||
}
|
||||
if genState == nil || genState.IsNil() {
|
||||
return eth1Data, nil
|
||||
return errors.Wrap(err, "unable to retrieve eth1 data")
|
||||
}
|
||||
if eth1Data == nil || !eth1Data.ChainstartData.Chainstarted || !validateDepositContainers(eth1Data.DepositContainers) {
|
||||
pbState, err := native.ProtobufBeaconStatePhase0(s.preGenesisState.ToProtoUnsafe())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
s.chainStartData = ðpb.ChainStartData{
|
||||
Chainstarted: true,
|
||||
@@ -851,24 +856,22 @@ func (s *Service) validPowchainData(ctx context.Context) (*ethpb.ETH1ChainData,
|
||||
if features.Get().EnableEIP4881 {
|
||||
trie, ok := s.depositTrie.(*depositsnapshot.DepositTree)
|
||||
if !ok {
|
||||
return nil, errors.New("deposit trie was not EIP4881 DepositTree")
|
||||
return errors.New("deposit trie was not EIP4881 DepositTree")
|
||||
}
|
||||
eth1Data.DepositSnapshot, err = trie.ToProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
trie, ok := s.depositTrie.(*trie.SparseMerkleTrie)
|
||||
if !ok {
|
||||
return nil, errors.New("deposit trie was not SparseMerkleTrie")
|
||||
return errors.New("deposit trie was not SparseMerkleTrie")
|
||||
}
|
||||
eth1Data.Trie = trie.ToProto()
|
||||
}
|
||||
if err := s.cfg.beaconDB.SaveExecutionChainData(ctx, eth1Data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.cfg.beaconDB.SaveExecutionChainData(ctx, eth1Data)
|
||||
}
|
||||
return eth1Data, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func dedupEndpoints(endpoints []string) []string {
|
||||
|
||||
@@ -571,8 +571,7 @@ func TestService_EnsureConsistentPowchainData(t *testing.T) {
|
||||
assert.NoError(t, genState.SetSlot(1000))
|
||||
|
||||
require.NoError(t, s1.cfg.beaconDB.SaveGenesisData(context.Background(), genState))
|
||||
_, err = s1.validPowchainData(context.Background())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s1.ensureValidPowchainData(context.Background()))
|
||||
|
||||
eth1Data, err := s1.cfg.beaconDB.ExecutionChainData(context.Background())
|
||||
assert.NoError(t, err)
|
||||
@@ -602,8 +601,7 @@ func TestService_InitializeCorrectly(t *testing.T) {
|
||||
assert.NoError(t, genState.SetSlot(1000))
|
||||
|
||||
require.NoError(t, s1.cfg.beaconDB.SaveGenesisData(context.Background(), genState))
|
||||
_, err = s1.validPowchainData(context.Background())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s1.ensureValidPowchainData(context.Background()))
|
||||
|
||||
eth1Data, err := s1.cfg.beaconDB.ExecutionChainData(context.Background())
|
||||
assert.NoError(t, err)
|
||||
@@ -638,8 +636,7 @@ func TestService_EnsureValidPowchainData(t *testing.T) {
|
||||
DepositContainers: []*ethpb.DepositContainer{{Index: 1}},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, err = s1.validPowchainData(context.Background())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s1.ensureValidPowchainData(context.Background()))
|
||||
|
||||
eth1Data, err := s1.cfg.beaconDB.ExecutionChainData(context.Background())
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -83,6 +83,11 @@ func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, s primitives.Slo
|
||||
return p, nil, e.BuilderOverride, e.ErrGetPayload
|
||||
}
|
||||
|
||||
// ExchangeTransitionConfiguration --
|
||||
func (e *EngineClient) ExchangeTransitionConfiguration(_ context.Context, _ *pb.TransitionConfiguration) error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
// LatestExecutionBlock --
|
||||
func (e *EngineClient) LatestExecutionBlock(_ context.Context) (*pb.ExecutionBlock, error) {
|
||||
return e.ExecutionBlock, e.ErrLatestExecBlock
|
||||
|
||||
@@ -13,7 +13,10 @@ func (f *ForkChoice) LastRoot(epoch primitives.Epoch) [32]byte {
|
||||
if err != nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
if headEpoch <= epoch {
|
||||
if headEpoch < epoch {
|
||||
return [32]byte{}
|
||||
}
|
||||
if headEpoch == epoch {
|
||||
return head.root
|
||||
}
|
||||
for head != nil && head.slot > epochEnd {
|
||||
|
||||
@@ -35,5 +35,5 @@ func TestLastRoot(t *testing.T) {
|
||||
require.Equal(t, [32]byte{'6'}, f.store.headNode.root)
|
||||
require.Equal(t, [32]byte{'2'}, f.LastRoot(0))
|
||||
require.Equal(t, [32]byte{'6'}, f.LastRoot(1))
|
||||
require.Equal(t, [32]byte{'6'}, f.LastRoot(2))
|
||||
require.Equal(t, [32]byte{}, f.LastRoot(2))
|
||||
}
|
||||
|
||||
@@ -49,7 +49,6 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot := primitives.Slot(1)
|
||||
driftGenesisTime(f, slot, 0)
|
||||
newRoot := indexToHash(1)
|
||||
f.store.proposerBoostRoot = [32]byte{}
|
||||
state, blkRoot, err := prepareForkchoiceState(
|
||||
ctx,
|
||||
slot,
|
||||
@@ -75,7 +74,6 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot = primitives.Slot(2)
|
||||
driftGenesisTime(f, slot, 0)
|
||||
newRoot = indexToHash(2)
|
||||
f.store.proposerBoostRoot = [32]byte{}
|
||||
state, blkRoot, err = prepareForkchoiceState(
|
||||
ctx,
|
||||
slot,
|
||||
@@ -103,7 +101,6 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot = primitives.Slot(3)
|
||||
driftGenesisTime(f, slot, 0)
|
||||
newRoot = indexToHash(3)
|
||||
f.store.proposerBoostRoot = [32]byte{}
|
||||
state, blkRoot, err = prepareForkchoiceState(
|
||||
ctx,
|
||||
slot,
|
||||
@@ -132,7 +129,6 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot = primitives.Slot(4)
|
||||
driftGenesisTime(f, slot, 0)
|
||||
newRoot = indexToHash(4)
|
||||
f.store.proposerBoostRoot = [32]byte{}
|
||||
state, blkRoot, err = prepareForkchoiceState(
|
||||
ctx,
|
||||
slot,
|
||||
@@ -339,7 +335,6 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
cSlot := primitives.Slot(2)
|
||||
driftGenesisTime(f, cSlot, 0)
|
||||
c := indexToHash(2)
|
||||
f.store.proposerBoostRoot = [32]byte{}
|
||||
state, blkRoot, err := prepareForkchoiceState(
|
||||
ctx,
|
||||
cSlot,
|
||||
@@ -359,7 +354,6 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
|
||||
bSlot := primitives.Slot(1)
|
||||
b := indexToHash(1)
|
||||
f.store.proposerBoostRoot = [32]byte{}
|
||||
state, blkRoot, err = prepareForkchoiceState(
|
||||
ctx,
|
||||
bSlot,
|
||||
@@ -384,7 +378,6 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
// A block D, building on B, is received at slot N+3. It should not be able to win without boosting.
|
||||
dSlot := primitives.Slot(3)
|
||||
d := indexToHash(3)
|
||||
f.store.proposerBoostRoot = [32]byte{}
|
||||
state, blkRoot, err = prepareForkchoiceState(
|
||||
ctx,
|
||||
dSlot,
|
||||
@@ -405,7 +398,6 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
// If the same block arrives with boosting then it becomes head:
|
||||
driftGenesisTime(f, dSlot, 0)
|
||||
d2 := indexToHash(30)
|
||||
f.store.proposerBoostRoot = [32]byte{}
|
||||
state, blkRoot, err = prepareForkchoiceState(
|
||||
ctx,
|
||||
dSlot,
|
||||
|
||||
@@ -109,8 +109,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
secondsIntoSlot := (timeNow - s.genesisTime) % params.BeaconConfig().SecondsPerSlot
|
||||
currentSlot := slots.CurrentSlot(s.genesisTime)
|
||||
boostThreshold := params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().IntervalsPerSlot
|
||||
isFirstBlock := s.proposerBoostRoot == [32]byte{}
|
||||
if currentSlot == slot && secondsIntoSlot < boostThreshold && isFirstBlock {
|
||||
if currentSlot == slot && secondsIntoSlot < boostThreshold {
|
||||
s.proposerBoostRoot = root
|
||||
}
|
||||
|
||||
|
||||
@@ -52,6 +52,7 @@ func DefaultConfig(enableDebugRPCEndpoints bool, httpModules string) MuxConfig {
|
||||
}
|
||||
if flags.EnableHTTPEthAPI(httpModules) {
|
||||
ethRegistrations := []gateway.PbHandlerRegistration{
|
||||
ethpbservice.RegisterBeaconNodeHandler,
|
||||
ethpbservice.RegisterBeaconChainHandler,
|
||||
ethpbservice.RegisterBeaconValidatorHandler,
|
||||
ethpbservice.RegisterEventsHandler,
|
||||
|
||||
@@ -15,7 +15,7 @@ func TestDefaultConfig(t *testing.T) {
|
||||
require.Equal(t, 2, len(cfg.EthPbMux.Patterns))
|
||||
assert.Equal(t, "/internal/eth/v1/", cfg.EthPbMux.Patterns[0])
|
||||
assert.Equal(t, "/internal/eth/v2/", cfg.EthPbMux.Patterns[1])
|
||||
assert.Equal(t, 3, len(cfg.EthPbMux.Registrations))
|
||||
assert.Equal(t, 4, len(cfg.EthPbMux.Registrations))
|
||||
assert.NotNil(t, cfg.V1AlphaPbMux.Mux)
|
||||
require.Equal(t, 2, len(cfg.V1AlphaPbMux.Patterns))
|
||||
assert.Equal(t, "/eth/v1alpha1/", cfg.V1AlphaPbMux.Patterns[0])
|
||||
@@ -29,7 +29,7 @@ func TestDefaultConfig(t *testing.T) {
|
||||
require.Equal(t, 2, len(cfg.EthPbMux.Patterns))
|
||||
assert.Equal(t, "/internal/eth/v1/", cfg.EthPbMux.Patterns[0])
|
||||
assert.Equal(t, "/internal/eth/v2/", cfg.EthPbMux.Patterns[1])
|
||||
assert.Equal(t, 4, len(cfg.EthPbMux.Registrations))
|
||||
assert.Equal(t, 5, len(cfg.EthPbMux.Registrations))
|
||||
assert.NotNil(t, cfg.V1AlphaPbMux.Mux)
|
||||
require.Equal(t, 2, len(cfg.V1AlphaPbMux.Patterns))
|
||||
assert.Equal(t, "/eth/v1alpha1/", cfg.V1AlphaPbMux.Patterns[0])
|
||||
@@ -41,7 +41,7 @@ func TestDefaultConfig(t *testing.T) {
|
||||
assert.NotNil(t, cfg.EthPbMux.Mux)
|
||||
require.Equal(t, 2, len(cfg.EthPbMux.Patterns))
|
||||
assert.Equal(t, "/internal/eth/v1/", cfg.EthPbMux.Patterns[0])
|
||||
assert.Equal(t, 4, len(cfg.EthPbMux.Registrations))
|
||||
assert.Equal(t, 5, len(cfg.EthPbMux.Registrations))
|
||||
assert.Equal(t, (*gateway.PbMux)(nil), cfg.V1AlphaPbMux)
|
||||
})
|
||||
t.Run("Without Eth API", func(t *testing.T) {
|
||||
|
||||
@@ -16,7 +16,6 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//api/gateway:go_default_library",
|
||||
"//api/server:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/pkg/errors"
|
||||
apigateway "github.com/prysmaticlabs/prysm/v4/api/gateway"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/server"
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/builder"
|
||||
@@ -154,9 +153,6 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
if err := configureExecutionSetting(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := kv.ConfigureBlobRetentionEpoch(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configureFastSSZHashingAlgorithm()
|
||||
|
||||
// Initializes any forks here.
|
||||
@@ -272,7 +268,6 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
|
||||
log.Debugln("Registering RPC Service")
|
||||
router := mux.NewRouter()
|
||||
router.Use(server.NormalizeQueryValuesHandler)
|
||||
if err := beacon.registerRPCService(router); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -295,9 +290,9 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
}
|
||||
|
||||
// db.DatabasePath is the path to the containing directory
|
||||
// db.NewFileName expands that to the canonical full path using
|
||||
// db.NewDBFilename expands that to the canonical full path using
|
||||
// the same construction as NewDB()
|
||||
c, err := newBeaconNodePromCollector(db.NewFileName(beacon.db.DatabasePath()))
|
||||
c, err := newBeaconNodePromCollector(db.NewDBFilename(beacon.db.DatabasePath()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -514,7 +509,7 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
}
|
||||
|
||||
func (b *BeaconNode) startStateGen(ctx context.Context, bfs *backfill.Status, fc forkchoice.ForkChoicer) error {
|
||||
opts := []stategen.Option{stategen.WithBackfillStatus(bfs)}
|
||||
opts := []stategen.StateGenOption{stategen.WithBackfillStatus(bfs)}
|
||||
sg := stategen.New(b.db, fc, opts...)
|
||||
|
||||
cp, err := b.db.FinalizedCheckpoint(ctx)
|
||||
@@ -713,10 +708,9 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}) erro
|
||||
regularsync.WithStateGen(b.stateGen),
|
||||
regularsync.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
|
||||
regularsync.WithSlasherBlockHeadersFeed(b.slasherBlockHeadersFeed),
|
||||
regularsync.WithPayloadReconstructor(web3Service),
|
||||
regularsync.WithExecutionPayloadReconstructor(web3Service),
|
||||
regularsync.WithClockWaiter(b.clockWaiter),
|
||||
regularsync.WithInitialSyncComplete(initialSyncComplete),
|
||||
regularsync.WithStateNotifier(b),
|
||||
)
|
||||
return b.services.RegisterService(rs)
|
||||
}
|
||||
|
||||
@@ -32,6 +32,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"aggregated_test.go",
|
||||
"benchmark_test.go",
|
||||
"block_test.go",
|
||||
"forkchoice_test.go",
|
||||
"seen_bits_test.go",
|
||||
|
||||
20
beacon-chain/operations/attestations/kv/benchmark_test.go
Normal file
20
beacon-chain/operations/attestations/kv/benchmark_test.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package kv_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations/kv"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
)
|
||||
|
||||
func BenchmarkAttCaches(b *testing.B) {
|
||||
ac := kv.NewAttCaches()
|
||||
|
||||
att := ðpb.Attestation{}
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
assert.NoError(b, ac.SaveUnaggregatedAttestation(att))
|
||||
assert.NoError(b, ac.DeleteAggregatedAttestation(att))
|
||||
}
|
||||
}
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
var hashFn = hash.Proto
|
||||
var hashFn = hash.HashProto
|
||||
|
||||
// AttCaches defines the caches used to satisfy attestation pool interface.
|
||||
// These caches are KV store for various attestations
|
||||
|
||||
@@ -119,7 +119,7 @@ func (s *Service) aggregateAndSaveForkChoiceAtts(atts []*ethpb.Attestation) erro
|
||||
// This checks if the attestation has previously been aggregated for fork choice
|
||||
// return true if yes, false if no.
|
||||
func (s *Service) seen(att *ethpb.Attestation) (bool, error) {
|
||||
attRoot, err := hash.Proto(att.Data)
|
||||
attRoot, err := hash.HashProto(att.Data)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -74,8 +74,7 @@ func (s *Store) SaveSyncCommitteeMessage(msg *ethpb.SyncCommitteeMessage) error
|
||||
}
|
||||
|
||||
// SyncCommitteeMessages returns sync committee messages by slot from the priority queue.
|
||||
// When calling this method a copy is avoided as the caller is assumed to be only reading the
|
||||
// messages from the store rather than modifying it.
|
||||
// Upon retrieval, the message is removed from the queue.
|
||||
func (s *Store) SyncCommitteeMessages(slot primitives.Slot) ([]*ethpb.SyncCommitteeMessage, error) {
|
||||
s.messageLock.RLock()
|
||||
defer s.messageLock.RUnlock()
|
||||
|
||||
@@ -461,7 +461,7 @@ func convertToUdpMultiAddr(node *enode.Node) ([]ma.Multiaddr, error) {
|
||||
}
|
||||
|
||||
func peerIdsFromMultiAddrs(addrs []ma.Multiaddr) []peer.ID {
|
||||
var peers []peer.ID
|
||||
peers := []peer.ID{}
|
||||
for _, a := range addrs {
|
||||
info, err := peer.AddrInfoFromP2pAddr(a)
|
||||
if err != nil {
|
||||
|
||||
@@ -118,7 +118,7 @@ func (s *Store) SetTrustedPeers(peers []peer.ID) {
|
||||
// GetTrustedPeers gets our desired trusted peer ids.
|
||||
// Important: it is assumed that store mutex is locked when calling this method.
|
||||
func (s *Store) GetTrustedPeers() []peer.ID {
|
||||
var peers []peer.ID
|
||||
peers := []peer.ID{}
|
||||
for p := range s.trustedPeers {
|
||||
peers = append(peers, p)
|
||||
}
|
||||
|
||||
@@ -788,7 +788,7 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
|
||||
var trustedPeers []peer.ID
|
||||
trustedPeers := []peer.ID{}
|
||||
// Set up bad scores for inbound peers.
|
||||
inboundPeers := p.InboundConnected()
|
||||
for i, pid := range inboundPeers {
|
||||
|
||||
@@ -2,8 +2,6 @@ package testing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -11,53 +9,33 @@ import (
|
||||
|
||||
// MockBroadcaster implements p2p.Broadcaster for testing.
|
||||
type MockBroadcaster struct {
|
||||
BroadcastCalled atomic.Bool
|
||||
BroadcastCalled bool
|
||||
BroadcastMessages []proto.Message
|
||||
BroadcastAttestations []*ethpb.Attestation
|
||||
msgLock sync.Mutex
|
||||
attLock sync.Mutex
|
||||
}
|
||||
|
||||
// Broadcast records a broadcast occurred.
|
||||
func (m *MockBroadcaster) Broadcast(_ context.Context, msg proto.Message) error {
|
||||
m.BroadcastCalled.Store(true)
|
||||
m.msgLock.Lock()
|
||||
defer m.msgLock.Unlock()
|
||||
m.BroadcastCalled = true
|
||||
m.BroadcastMessages = append(m.BroadcastMessages, msg)
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastAttestation records a broadcast occurred.
|
||||
func (m *MockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, a *ethpb.Attestation) error {
|
||||
m.BroadcastCalled.Store(true)
|
||||
m.attLock.Lock()
|
||||
defer m.attLock.Unlock()
|
||||
m.BroadcastCalled = true
|
||||
m.BroadcastAttestations = append(m.BroadcastAttestations, a)
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastSyncCommitteeMessage records a broadcast occurred.
|
||||
func (m *MockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
|
||||
m.BroadcastCalled.Store(true)
|
||||
m.BroadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastBlob broadcasts a blob for mock.
|
||||
func (m *MockBroadcaster) BroadcastBlob(context.Context, uint64, *ethpb.SignedBlobSidecar) error {
|
||||
m.BroadcastCalled.Store(true)
|
||||
m.BroadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// NumMessages returns the number of messages broadcasted.
|
||||
func (m *MockBroadcaster) NumMessages() int {
|
||||
m.msgLock.Lock()
|
||||
defer m.msgLock.Unlock()
|
||||
return len(m.BroadcastMessages)
|
||||
}
|
||||
|
||||
// NumAttestations returns the number of attestations broadcasted.
|
||||
func (m *MockBroadcaster) NumAttestations() int {
|
||||
m.attLock.Lock()
|
||||
defer m.attLock.Unlock()
|
||||
return len(m.BroadcastAttestations)
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -42,7 +41,7 @@ type TestP2P struct {
|
||||
BHost host.Host
|
||||
pubsub *pubsub.PubSub
|
||||
joinedTopics map[string]*pubsub.Topic
|
||||
BroadcastCalled atomic.Bool
|
||||
BroadcastCalled bool
|
||||
DelaySend bool
|
||||
Digest [4]byte
|
||||
peers *peers.Status
|
||||
@@ -161,25 +160,25 @@ func (p *TestP2P) ReceivePubSub(topic string, msg proto.Message) {
|
||||
|
||||
// Broadcast a message.
|
||||
func (p *TestP2P) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
p.BroadcastCalled.Store(true)
|
||||
p.BroadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastAttestation broadcasts an attestation.
|
||||
func (p *TestP2P) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Attestation) error {
|
||||
p.BroadcastCalled.Store(true)
|
||||
p.BroadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastSyncCommitteeMessage broadcasts a sync committee message.
|
||||
func (p *TestP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
|
||||
p.BroadcastCalled.Store(true)
|
||||
p.BroadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastBlob broadcasts a blob for mock.
|
||||
func (p *TestP2P) BroadcastBlob(context.Context, uint64, *ethpb.SignedBlobSidecar) error {
|
||||
p.BroadcastCalled.Store(true)
|
||||
p.BroadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ go_library(
|
||||
"//api/gateway/apimiddleware:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
"//beacon-chain/rpc/eth/events:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//network/http:go_default_library",
|
||||
@@ -32,7 +33,6 @@ go_test(
|
||||
srcs = [
|
||||
"custom_handlers_test.go",
|
||||
"custom_hooks_test.go",
|
||||
"endpoint_factory_test.go",
|
||||
"structs_marshalling_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
|
||||
@@ -25,6 +25,64 @@ type sszConfig struct {
|
||||
responseJson SszResponse
|
||||
}
|
||||
|
||||
func handleGetBeaconStateSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
config := sszConfig{
|
||||
fileName: "beacon_state.ssz",
|
||||
responseJson: &SszResponseJson{},
|
||||
}
|
||||
return handleGetSSZ(m, endpoint, w, req, config)
|
||||
}
|
||||
|
||||
func handleGetBeaconBlockSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
config := sszConfig{
|
||||
fileName: "beacon_block.ssz",
|
||||
responseJson: &SszResponseJson{},
|
||||
}
|
||||
return handleGetSSZ(m, endpoint, w, req, config)
|
||||
}
|
||||
|
||||
func handleGetBeaconStateSSZV2(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
config := sszConfig{
|
||||
fileName: "beacon_state.ssz",
|
||||
responseJson: &VersionedSSZResponseJson{},
|
||||
}
|
||||
return handleGetSSZ(m, endpoint, w, req, config)
|
||||
}
|
||||
|
||||
func handleGetBeaconBlockSSZV2(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
config := sszConfig{
|
||||
fileName: "beacon_block.ssz",
|
||||
responseJson: &VersionedSSZResponseJson{},
|
||||
}
|
||||
return handleGetSSZ(m, endpoint, w, req, config)
|
||||
}
|
||||
|
||||
func handleGetBlindedBeaconBlockSSZ(
|
||||
m *apimiddleware.ApiProxyMiddleware,
|
||||
endpoint apimiddleware.Endpoint,
|
||||
w http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) (handled bool) {
|
||||
config := sszConfig{
|
||||
fileName: "beacon_block.ssz",
|
||||
responseJson: &VersionedSSZResponseJson{},
|
||||
}
|
||||
return handleGetSSZ(m, endpoint, w, req, config)
|
||||
}
|
||||
|
||||
func handleSubmitBlockSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
return handlePostSSZ(m, endpoint, w, req)
|
||||
}
|
||||
|
||||
func handleSubmitBlindedBlockSSZ(
|
||||
m *apimiddleware.ApiProxyMiddleware,
|
||||
endpoint apimiddleware.Endpoint,
|
||||
w http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) (handled bool) {
|
||||
return handlePostSSZ(m, endpoint, w, req)
|
||||
}
|
||||
|
||||
func handleProduceBlockSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
config := sszConfig{
|
||||
fileName: "produce_beacon_block.ssz",
|
||||
@@ -53,7 +111,11 @@ func handleGetSSZ(
|
||||
req *http.Request,
|
||||
config sszConfig,
|
||||
) (handled bool) {
|
||||
ssz := http2.SszRequested(req)
|
||||
ssz, err := http2.SszRequested(req)
|
||||
if err != nil {
|
||||
apimiddleware.WriteError(w, apimiddleware.InternalServerError(err), nil)
|
||||
return true
|
||||
}
|
||||
if !ssz {
|
||||
return false
|
||||
}
|
||||
@@ -101,6 +163,58 @@ func handleGetSSZ(
|
||||
return true
|
||||
}
|
||||
|
||||
func handlePostSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
if !sszPosted(req) {
|
||||
return false
|
||||
}
|
||||
|
||||
if errJson := prepareSSZRequestForProxying(m, endpoint, req); errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
prepareCustomHeaders(req)
|
||||
if errJson := preparePostedSSZData(req); errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
|
||||
grpcResponse, errJson := m.ProxyRequest(req)
|
||||
if errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
grpcResponseBody, errJson := apimiddleware.ReadGrpcResponseBody(grpcResponse.Body)
|
||||
if errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
respHasError, errJson := apimiddleware.HandleGrpcResponseError(endpoint.Err, grpcResponse, grpcResponseBody, w)
|
||||
if errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
if respHasError {
|
||||
return true
|
||||
}
|
||||
if errJson := apimiddleware.Cleanup(grpcResponse.Body); errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func sszPosted(req *http.Request) bool {
|
||||
ct, ok := req.Header["Content-Type"]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if len(ct) != 1 {
|
||||
return false
|
||||
}
|
||||
return ct[0] == api.OctetStreamMediaType
|
||||
}
|
||||
|
||||
func prepareSSZRequestForProxying(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, req *http.Request) apimiddleware.ErrorJson {
|
||||
req.URL.Scheme = "http"
|
||||
req.URL.Host = m.GatewayAddress
|
||||
@@ -116,6 +230,14 @@ func prepareSSZRequestForProxying(m *apimiddleware.ApiProxyMiddleware, endpoint
|
||||
return nil
|
||||
}
|
||||
|
||||
func prepareCustomHeaders(req *http.Request) {
|
||||
ver := req.Header.Get(api.VersionHeader)
|
||||
if ver != "" {
|
||||
req.Header.Del(api.VersionHeader)
|
||||
req.Header.Add(grpc.WithPrefix(api.VersionHeader), ver)
|
||||
}
|
||||
}
|
||||
|
||||
func preparePostedSSZData(req *http.Request) apimiddleware.ErrorJson {
|
||||
buf, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
@@ -268,8 +390,6 @@ func receiveEvents(eventChan <-chan *sse.Event, w http.ResponseWriter, req *http
|
||||
default:
|
||||
return apimiddleware.InternalServerError(errors.New("payload version unsupported"))
|
||||
}
|
||||
case events.BlobSidecarTopic:
|
||||
data = &EventBlobSidecarJson{}
|
||||
case "error":
|
||||
data = &EventErrorJson{}
|
||||
default:
|
||||
|
||||
@@ -19,6 +19,26 @@ import (
|
||||
|
||||
// https://ethereum.github.io/beacon-APIs/?urls.primaryName=dev#/Beacon/submitPoolBLSToExecutionChange
|
||||
// expects posting a top-level array. We make it more proto-friendly by wrapping it in a struct.
|
||||
func wrapBLSChangesArray(
|
||||
endpoint *apimiddleware.Endpoint,
|
||||
_ http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
|
||||
if _, ok := endpoint.PostRequest.(*SubmitBLSToExecutionChangesRequest); !ok {
|
||||
return true, nil
|
||||
}
|
||||
changes := make([]*SignedBLSToExecutionChangeJson, 0)
|
||||
if err := json.NewDecoder(req.Body).Decode(&changes); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
|
||||
}
|
||||
j := &SubmitBLSToExecutionChangesRequest{Changes: changes}
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
|
||||
}
|
||||
req.Body = io.NopCloser(bytes.NewReader(b))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
type v1alpha1SignedPhase0Block struct {
|
||||
Block *BeaconBlockJson `json:"block"` // tech debt on phase 0 called this block instead of "message"
|
||||
@@ -279,6 +299,306 @@ func preparePublishedBlindedBlock(endpoint *apimiddleware.Endpoint, _ http.Respo
|
||||
return apimiddleware.InternalServerError(errors.New("unsupported block type"))
|
||||
}
|
||||
|
||||
type tempSyncCommitteesResponseJson struct {
|
||||
Data *tempSyncCommitteeValidatorsJson `json:"data"`
|
||||
}
|
||||
|
||||
type tempSyncCommitteeValidatorsJson struct {
|
||||
Validators []string `json:"validators"`
|
||||
ValidatorAggregates []*tempSyncSubcommitteeValidatorsJson `json:"validator_aggregates"`
|
||||
}
|
||||
|
||||
type tempSyncSubcommitteeValidatorsJson struct {
|
||||
Validators []string `json:"validators"`
|
||||
}
|
||||
|
||||
// https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.0.0#/Beacon/getEpochSyncCommittees returns validator_aggregates as a nested array.
|
||||
// grpc-gateway returns a struct with nested fields which we have to transform into a plain 2D array.
|
||||
func prepareValidatorAggregates(body []byte, responseContainer interface{}) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
|
||||
tempContainer := &tempSyncCommitteesResponseJson{}
|
||||
if err := json.Unmarshal(body, tempContainer); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not unmarshal response into temp container")
|
||||
}
|
||||
container, ok := responseContainer.(*SyncCommitteesResponseJson)
|
||||
if !ok {
|
||||
return false, apimiddleware.InternalServerError(errors.New("container is not of the correct type"))
|
||||
}
|
||||
|
||||
container.Data = &SyncCommitteeValidatorsJson{}
|
||||
container.Data.Validators = tempContainer.Data.Validators
|
||||
container.Data.ValidatorAggregates = make([][]string, len(tempContainer.Data.ValidatorAggregates))
|
||||
for i, srcValAgg := range tempContainer.Data.ValidatorAggregates {
|
||||
dstValAgg := make([]string, len(srcValAgg.Validators))
|
||||
copy(dstValAgg, tempContainer.Data.ValidatorAggregates[i].Validators)
|
||||
container.Data.ValidatorAggregates[i] = dstValAgg
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
type phase0BlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *SignedBeaconBlockJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type altairBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *SignedBeaconBlockAltairJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type bellatrixBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *SignedBeaconBlockBellatrixJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type capellaBlockResponseJson struct {
|
||||
Version string `json:"version"`
|
||||
Data *SignedBeaconBlockCapellaJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type denebBlockResponseJson struct {
|
||||
Version string `json:"version"`
|
||||
Data *SignedBeaconBlockDenebJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type bellatrixBlindedBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *SignedBlindedBeaconBlockBellatrixJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type capellaBlindedBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *SignedBlindedBeaconBlockCapellaJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type denebBlindedBlockResponseJson struct {
|
||||
Version string `json:"version"`
|
||||
Data *SignedBlindedBeaconBlockDenebJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
func serializeV2Block(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
|
||||
respContainer, ok := response.(*BlockV2ResponseJson)
|
||||
if !ok {
|
||||
return false, nil, apimiddleware.InternalServerError(errors.New("container is not of the correct type"))
|
||||
}
|
||||
|
||||
var actualRespContainer interface{}
|
||||
switch {
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_PHASE0.String())):
|
||||
actualRespContainer = &phase0BlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBeaconBlockJson{
|
||||
Message: respContainer.Data.Phase0Block,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_ALTAIR.String())):
|
||||
actualRespContainer = &altairBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBeaconBlockAltairJson{
|
||||
Message: respContainer.Data.AltairBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_BELLATRIX.String())):
|
||||
actualRespContainer = &bellatrixBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBeaconBlockBellatrixJson{
|
||||
Message: respContainer.Data.BellatrixBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_CAPELLA.String())):
|
||||
actualRespContainer = &capellaBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBeaconBlockCapellaJson{
|
||||
Message: respContainer.Data.CapellaBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_DENEB.String())):
|
||||
actualRespContainer = &denebBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBeaconBlockDenebJson{
|
||||
Message: respContainer.Data.DenebBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
default:
|
||||
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
|
||||
}
|
||||
|
||||
j, err := json.Marshal(actualRespContainer)
|
||||
if err != nil {
|
||||
return false, nil, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal response")
|
||||
}
|
||||
return false, j, nil
|
||||
}
|
||||
|
||||
func serializeBlindedBlock(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
|
||||
respContainer, ok := response.(*BlindedBlockResponseJson)
|
||||
if !ok {
|
||||
return false, nil, apimiddleware.InternalServerError(errors.New("container is not of the correct type"))
|
||||
}
|
||||
|
||||
var actualRespContainer interface{}
|
||||
switch {
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_PHASE0.String())):
|
||||
actualRespContainer = &phase0BlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBeaconBlockJson{
|
||||
Message: respContainer.Data.Phase0Block,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_ALTAIR.String())):
|
||||
actualRespContainer = &altairBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBeaconBlockAltairJson{
|
||||
Message: respContainer.Data.AltairBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_BELLATRIX.String())):
|
||||
actualRespContainer = &bellatrixBlindedBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBlindedBeaconBlockBellatrixJson{
|
||||
Message: respContainer.Data.BellatrixBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_CAPELLA.String())):
|
||||
actualRespContainer = &capellaBlindedBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBlindedBeaconBlockCapellaJson{
|
||||
Message: respContainer.Data.CapellaBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_DENEB.String())):
|
||||
actualRespContainer = &denebBlindedBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBlindedBeaconBlockDenebJson{
|
||||
Message: respContainer.Data.DenebBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
default:
|
||||
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
|
||||
}
|
||||
|
||||
j, err := json.Marshal(actualRespContainer)
|
||||
if err != nil {
|
||||
return false, nil, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal response")
|
||||
}
|
||||
return false, j, nil
|
||||
}
|
||||
|
||||
type phase0StateResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *BeaconStateJson `json:"data"`
|
||||
}
|
||||
|
||||
type altairStateResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *BeaconStateAltairJson `json:"data"`
|
||||
}
|
||||
|
||||
type bellatrixStateResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *BeaconStateBellatrixJson `json:"data"`
|
||||
}
|
||||
|
||||
type capellaStateResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *BeaconStateCapellaJson `json:"data"`
|
||||
}
|
||||
|
||||
type denebStateResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *BeaconStateDenebJson `json:"data"`
|
||||
}
|
||||
|
||||
func serializeV2State(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
|
||||
respContainer, ok := response.(*BeaconStateV2ResponseJson)
|
||||
if !ok {
|
||||
return false, nil, apimiddleware.InternalServerError(errors.New("container is not of the correct type"))
|
||||
}
|
||||
|
||||
var actualRespContainer interface{}
|
||||
switch {
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_PHASE0.String())):
|
||||
actualRespContainer = &phase0StateResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.Phase0State,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_ALTAIR.String())):
|
||||
actualRespContainer = &altairStateResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.AltairState,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_BELLATRIX.String())):
|
||||
actualRespContainer = &bellatrixStateResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.BellatrixState,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_CAPELLA.String())):
|
||||
actualRespContainer = &capellaStateResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.CapellaState,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_DENEB.String())):
|
||||
actualRespContainer = &denebStateResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.DenebState,
|
||||
}
|
||||
default:
|
||||
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported state version '%s'", respContainer.Version))
|
||||
}
|
||||
|
||||
j, err := json.Marshal(actualRespContainer)
|
||||
if err != nil {
|
||||
return false, nil, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal response")
|
||||
}
|
||||
return false, j, nil
|
||||
}
|
||||
|
||||
type phase0ProduceBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *BeaconBlockJson `json:"data"`
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user