Compare commits

..

1 Commits

Author SHA1 Message Date
Potuz
7614707f9b Community requested feature flags
Following up from internal Slack discussions. This PR implements several
of the community requested feature flags.

- Adds a flag `--optimistic-execution` to allow the beacon node to run
  without an EL connected. Proposers **must** use an external builder.
  This results in approximately 1.4TB storage reduction in my locally
tested node.
- Adds a flag `--blobs-are-available` to disable blob gossip, RPC
  requests and data availability validation. This results in 70%
bandwidth reduction as well as extra 300Gb of storage.
- Adds a flag `--allow-deep-reorgs` which removes the 1-slot limit for
  the honest reorg feature. It should be safe to reorg longer branches
as long as they haven't been voted heavily. This allows for larger MEV
extraction if the previous proposers were slow or purposely broadcast
their blocks late.

Other flags that are commonly requested are added but since they are not
thoroughly tested yet I decided to leave them hidden. For the reviewer:
if you think they are safe enough, we can change the visibility.
2024-04-01 07:07:30 -03:00
941 changed files with 36266 additions and 61096 deletions

View File

@@ -12,7 +12,8 @@
build:remote-cache --remote_download_minimal
build:remote-cache --remote_build_event_upload=minimal
build:remote-cache --remote_cache=grpc://bazel-remote-cache:9092
build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
# Does not work with rules_oci. See https://github.com/bazel-contrib/rules_oci/issues/292
#build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
build:remote-cache --remote_local_fallback
build:remote-cache --experimental_remote_cache_async
build:remote-cache --experimental_remote_merkle_tree_cache

View File

@@ -1,4 +1,3 @@
load("@prysm//tools/go:def.bzl", "go_library")
load("@bazel_gazelle//:def.bzl", "gazelle")
load("@com_github_atlassian_bazel_tools//gometalinter:def.bzl", "gometalinter")
load("@com_github_atlassian_bazel_tools//goimports:def.bzl", "goimports")
@@ -287,14 +286,3 @@ sh_binary(
srcs = ["prysm.sh"],
visibility = ["//visibility:public"],
)
go_library(
name = "go_default_library",
srcs = ["derp.go"],
importpath = "github.com/prysmaticlabs/prysm/v5",
visibility = ["//visibility:public"],
deps = [
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
],
)

21
MODULE.bazel.lock generated
View File

@@ -1123,27 +1123,6 @@
"recordedRepoMappingEntries": []
}
},
"@@bazel_tools//tools/test:extensions.bzl%remote_coverage_tools_extension": {
"general": {
"bzlTransitiveDigest": "l5mcjH2gWmbmIycx97bzI2stD0Q0M5gpDc0aLOHKIm8=",
"recordedFileInputs": {},
"recordedDirentsInputs": {},
"envVariables": {},
"generatedRepoSpecs": {
"remote_coverage_tools": {
"bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl",
"ruleClassName": "http_archive",
"attributes": {
"sha256": "7006375f6756819b7013ca875eab70a541cf7d89142d9c511ed78ea4fefa38af",
"urls": [
"https://mirror.bazel.build/bazel_coverage_output_generator/releases/coverage_output_generator-v2.6.zip"
]
}
}
},
"recordedRepoMappingEntries": []
}
},
"@@rules_java~//java:extensions.bzl%toolchains": {
"general": {
"bzlTransitiveDigest": "tJHbmWnq7m+9eUBnUdv7jZziQ26FmcGL9C5/hU3Q9UQ=",

View File

@@ -29,7 +29,23 @@ http_archive(
load("@hermetic_cc_toolchain//toolchain:defs.bzl", zig_toolchains = "toolchains")
zig_toolchains()
# Temporarily use a nightly build until 0.12.0 is released.
# See: https://github.com/prysmaticlabs/prysm/issues/13130
zig_toolchains(
host_platform_sha256 = {
"linux-aarch64": "45afb8e32adde825165f4f293fcea9ecea503f7f9ec0e9bf4435afe70e67fb70",
"linux-x86_64": "f136c6a8a0f6adcb057d73615fbcd6f88281b3593f7008d5f7ed514ff925c02e",
"macos-aarch64": "05d995853c05243151deff47b60bdc2674f1e794a939eaeca0f42312da031cee",
"macos-x86_64": "721754ba5a50f31e8a1f0e1a74cace26f8246576878ac4a8591b0ee7b6db1fc1",
"windows-x86_64": "93f5248b2ea8c5ee8175e15b1384e133edc1cd49870b3ea259062a2e04164343",
},
url_formats = [
"https://ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
"https://mirror.bazel.build/ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
"https://prysmaticlabs.com/mirror/ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
],
version = "0.12.0-dev.1349+fa022d1ec",
)
# Register zig sdk toolchains with support for Ubuntu 20.04 (Focal Fossa) which has an EOL date of April, 2025.
# For ubuntu glibc support, see https://launchpad.net/ubuntu/+source/glibc
@@ -227,7 +243,7 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.5.0-alpha.2"
consensus_spec_version = "v1.4.0"
bls_test_version = "v0.1.1"
@@ -243,7 +259,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-NNXBa7SZ2sFb68HPNahgu1p0yDBpjuKJuLfRCl7vvoQ=",
sha256 = "c282c0f86f23f3d2e0f71f5975769a4077e62a7e3c7382a16bd26a7e589811a0",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -259,7 +275,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-7BnlBvGWU92iAB100cMaAXVQhRrqpMQbavgrI+/paCw=",
sha256 = "4649c35aa3b8eb0cfdc81bee7c05649f90ef36bede5b0513e1f2e8baf37d6033",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -275,7 +291,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-VCHhcNt+fynf/sHK11qbRBAy608u9T1qAafvAGfxQhA=",
sha256 = "c5a03f724f757456ffaabd2a899992a71d2baf45ee4db65ca3518f2b7ee928c8",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
@@ -290,7 +306,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-a2aCNFyFkYLtf6QSwGOHdx7xXHjA2NNT8x8ZuxB0aes=",
sha256 = "cd1c9d97baccbdde1d2454a7dceb8c6c61192a3b581eee12ffc94969f2db8453",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)
@@ -326,6 +342,22 @@ filegroup(
url = "https://github.com/eth-clients/eth2-networks/archive/934c948e69205dcf2deb87e4ae6cc140c335f94d.tar.gz",
)
http_archive(
name = "goerli_testnet",
build_file_content = """
filegroup(
name = "configs",
srcs = [
"prater/config.yaml",
],
visibility = ["//visibility:public"],
)
""",
sha256 = "43fc0f55ddff7b511713e2de07aa22846a67432df997296fb4fc09cd8ed1dcdb",
strip_prefix = "goerli-6522ac6684693740cd4ddcc2a0662e03702aa4a1",
url = "https://github.com/eth-clients/goerli/archive/6522ac6684693740cd4ddcc2a0662e03702aa4a1.tar.gz",
)
http_archive(
name = "holesky_testnet",
build_file_content = """

View File

@@ -1,24 +1,11 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"constants.go",
"headers.go",
"jwt.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/api",
visibility = ["//visibility:public"],
deps = [
"//crypto/rand:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["jwt_test.go"],
embed = [":go_default_library"],
deps = ["//testing/require:go_default_library"],
)

View File

@@ -36,25 +36,20 @@ func (n *NodeHealthTracker) IsHealthy() bool {
}
func (n *NodeHealthTracker) CheckHealth(ctx context.Context) bool {
n.Lock()
defer n.Unlock()
n.RLock()
newStatus := n.node.IsHealthy(ctx)
if n.isHealthy == nil {
n.isHealthy = &newStatus
}
isStatusChanged := newStatus != *n.isHealthy
n.RUnlock()
if isStatusChanged {
// Update the health status
n.Lock()
// Double-check the condition to ensure it hasn't changed since the first check.
n.isHealthy = &newStatus
// Send the new status to the health channel, potentially overwriting the existing value
select {
case <-n.healthChan:
n.healthChan <- newStatus
default:
n.healthChan <- newStatus
}
n.Unlock() // It's better to unlock as soon as the protected section is over.
n.healthChan <- newStatus
}
return newStatus
}

View File

@@ -87,15 +87,21 @@ func TestNodeHealth_Concurrency(t *testing.T) {
// Number of goroutines to spawn for both reading and writing
numGoroutines := 6
go func() {
for range n.HealthUpdates() {
// Consume values to avoid blocking on channel send.
}
}()
wg.Add(numGoroutines * 2) // for readers and writers
// Concurrently update health status
for i := 0; i < numGoroutines; i++ {
go func() {
defer wg.Done()
client.EXPECT().IsHealthy(gomock.Any()).Return(false).Times(1)
client.EXPECT().IsHealthy(gomock.Any()).Return(false)
n.CheckHealth(context.Background())
client.EXPECT().IsHealthy(gomock.Any()).Return(true).Times(1)
client.EXPECT().IsHealthy(gomock.Any()).Return(true)
n.CheckHealth(context.Background())
}()
}

View File

@@ -3,7 +3,6 @@ package testing
import (
"context"
"reflect"
"sync"
"github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface"
"go.uber.org/mock/gomock"
@@ -17,7 +16,6 @@ var (
type MockHealthClient struct {
ctrl *gomock.Controller
recorder *MockHealthClientMockRecorder
sync.Mutex
}
// MockHealthClientMockRecorder is the mock recorder for MockHealthClient.
@@ -27,8 +25,6 @@ type MockHealthClientMockRecorder struct {
// IsHealthy mocks base method.
func (m *MockHealthClient) IsHealthy(arg0 context.Context) bool {
m.Lock()
defer m.Unlock()
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IsHealthy", arg0)
ret0, ok := ret[0].(bool)
@@ -45,8 +41,6 @@ func (m *MockHealthClient) EXPECT() *MockHealthClientMockRecorder {
// IsHealthy indicates an expected call of IsHealthy.
func (mr *MockHealthClientMockRecorder) IsHealthy(arg0 any) *gomock.Call {
mr.mock.Lock()
defer mr.mock.Unlock()
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsHealthy", reflect.TypeOf((*MockHealthClient)(nil).IsHealthy), arg0)
}

View File

@@ -11,7 +11,6 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/builder",
visibility = ["//visibility:public"],
deps = [
"//api:go_default_library",
"//api/server/structs:go_default_library",
"//config/fieldparams:go_default_library",
"//consensus-types:go_default_library",
@@ -29,7 +28,6 @@ go_library(
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)
@@ -51,7 +49,6 @@ go_test(
"//math:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",

View File

@@ -6,7 +6,6 @@ import (
consensus_types "github.com/prysmaticlabs/prysm/v5/consensus-types"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
)
@@ -23,7 +22,7 @@ type SignedBid interface {
type Bid interface {
Header() (interfaces.ExecutionData, error)
BlobKzgCommitments() ([][]byte, error)
Value() primitives.Wei
Value() []byte
Pubkey() []byte
Version() int
IsNil() bool
@@ -126,8 +125,8 @@ func (b builderBid) Version() int {
}
// Value --
func (b builderBid) Value() primitives.Wei {
return primitives.LittleEndianBytesToWei(b.p.Value)
func (b builderBid) Value() []byte {
return b.p.Value
}
// Pubkey --
@@ -166,7 +165,7 @@ func WrappedBuilderBidCapella(p *ethpb.BuilderBidCapella) (Bid, error) {
// Header returns the execution data interface.
func (b builderBidCapella) Header() (interfaces.ExecutionData, error) {
// We have to convert big endian to little endian because the value is coming from the execution layer.
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header)
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, blocks.PayloadValueToWei(b.p.Value))
}
// BlobKzgCommitments --
@@ -180,8 +179,8 @@ func (b builderBidCapella) Version() int {
}
// Value --
func (b builderBidCapella) Value() primitives.Wei {
return primitives.LittleEndianBytesToWei(b.p.Value)
func (b builderBidCapella) Value() []byte {
return b.p.Value
}
// Pubkey --
@@ -223,8 +222,8 @@ func (b builderBidDeneb) Version() int {
}
// Value --
func (b builderBidDeneb) Value() primitives.Wei {
return primitives.LittleEndianBytesToWei(b.p.Value)
func (b builderBidDeneb) Value() []byte {
return b.p.Value
}
// Pubkey --
@@ -250,7 +249,7 @@ func (b builderBidDeneb) HashTreeRootWith(hh *ssz.Hasher) error {
// Header --
func (b builderBidDeneb) Header() (interfaces.ExecutionData, error) {
// We have to convert big endian to little endian because the value is coming from the execution layer.
return blocks.WrappedExecutionPayloadHeaderDeneb(b.p.Header)
return blocks.WrappedExecutionPayloadHeaderDeneb(b.p.Header, blocks.PayloadValueToWei(b.p.Value))
}
// BlobKzgCommitments --

View File

@@ -6,6 +6,7 @@ import (
"encoding/json"
"fmt"
"io"
"math/big"
"net"
"net/http"
"net/url"
@@ -13,11 +14,11 @@ import (
"text/template"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api"
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
@@ -281,68 +282,133 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
return err
}
var errResponseVersionMismatch = errors.New("builder API response uses a different version than requested in " + api.VersionHeader + " header")
// SubmitBlindedBlock calls the builder API endpoint that binds the validator to the builder and submits the block.
// The response is the full execution payload used to create the blinded block.
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
if !sb.IsBlinded() {
return nil, nil, errNotBlinded
}
// massage the proto struct type data into the api response type.
mj, err := structs.SignedBeaconBlockMessageJsoner(sb)
if err != nil {
return nil, nil, errors.Wrap(err, "error generating blinded beacon block post request")
}
body, err := json.Marshal(mj)
if err != nil {
return nil, nil, errors.Wrap(err, "error marshaling blinded block post request to json")
}
postOpts := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(sb.Version()))
r.Header.Set("Content-Type", api.JsonMediaType)
r.Header.Set("Accept", api.JsonMediaType)
}
// post the blinded block - the execution payload response should contain the unblinded payload, along with the
// blobs bundle if it is post deneb.
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), postOpts)
if err != nil {
return nil, nil, errors.Wrap(err, "error posting the blinded block to the builder api")
}
// ExecutionPayloadResponse parses just the outer container and the Value key, enabling it to use the .Value
// key to determine which underlying data type to use to finish the unmarshaling.
ep := &ExecutionPayloadResponse{}
if err := json.Unmarshal(rb, ep); err != nil {
return nil, nil, errors.Wrap(err, "error unmarshaling the builder ExecutionPayloadResponse")
}
if strings.ToLower(ep.Version) != version.String(sb.Version()) {
return nil, nil, errors.Wrapf(errResponseVersionMismatch, "req=%s, recv=%s", strings.ToLower(ep.Version), version.String(sb.Version()))
}
// This parses the rest of the response and returns the inner data field.
pp, err := ep.ParsePayload()
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to parse execution payload from builder with version=%s", ep.Version)
}
// Get the payload as a proto.Message so it can be wrapped as an execution payload interface.
pb, err := pp.PayloadProto()
if err != nil {
return nil, nil, err
}
ed, err := blocks.NewWrappedExecutionData(pb)
if err != nil {
return nil, nil, err
}
bb, ok := pp.(BlobBundler)
if ok {
bbpb, err := bb.BundleProto()
switch sb.Version() {
case version.Bellatrix:
psb, err := sb.PbBlindedBellatrixBlock()
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to extract blobs bundle from builder response with version=%s", ep.Version)
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
}
return ed, bbpb, nil
b, err := structs.SignedBlindedBeaconBlockBellatrixFromConsensus(&ethpb.SignedBlindedBeaconBlockBellatrix{Block: psb.Block, Signature: bytesutil.SafeCopyBytes(psb.Signature)})
if err != nil {
return nil, nil, errors.Wrapf(err, "could not convert SignedBlindedBeaconBlockBellatrix to json marshalable type")
}
body, err := json.Marshal(b)
if err != nil {
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockBellatrix value body in SubmitBlindedBlock")
}
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Bellatrix))
r.Header.Set("Content-Type", "application/json")
r.Header.Set("Accept", "application/json")
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
if err != nil {
return nil, nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockBellatrix to the builder api")
}
ep := &ExecPayloadResponse{}
if err := json.Unmarshal(rb, ep); err != nil {
return nil, nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlock response")
}
if strings.ToLower(ep.Version) != version.String(version.Bellatrix) {
return nil, nil, errors.New("not a bellatrix payload")
}
p, err := ep.ToProto()
if err != nil {
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
}
payload, err := blocks.WrappedExecutionPayload(p)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
}
return payload, nil, nil
case version.Capella:
psb, err := sb.PbBlindedCapellaBlock()
if err != nil {
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
}
b, err := structs.SignedBlindedBeaconBlockCapellaFromConsensus(&ethpb.SignedBlindedBeaconBlockCapella{Block: psb.Block, Signature: bytesutil.SafeCopyBytes(psb.Signature)})
if err != nil {
return nil, nil, errors.Wrapf(err, "could not convert SignedBlindedBeaconBlockCapella to json marshalable type")
}
body, err := json.Marshal(b)
if err != nil {
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockCapella value body in SubmitBlindedBlockCapella")
}
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Capella))
r.Header.Set("Content-Type", "application/json")
r.Header.Set("Accept", "application/json")
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
if err != nil {
return nil, nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockCapella to the builder api")
}
ep := &ExecPayloadResponseCapella{}
if err := json.Unmarshal(rb, ep); err != nil {
return nil, nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockCapella response")
}
if strings.ToLower(ep.Version) != version.String(version.Capella) {
return nil, nil, errors.New("not a capella payload")
}
p, err := ep.ToProto()
if err != nil {
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
}
payload, err := blocks.WrappedExecutionPayloadCapella(p, big.NewInt(0))
if err != nil {
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
}
return payload, nil, nil
case version.Deneb:
psb, err := sb.PbBlindedDenebBlock()
if err != nil {
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
}
b, err := structs.SignedBlindedBeaconBlockDenebFromConsensus(&ethpb.SignedBlindedBeaconBlockDeneb{Message: psb.Message, Signature: bytesutil.SafeCopyBytes(psb.Signature)})
if err != nil {
return nil, nil, errors.Wrapf(err, "could not convert SignedBlindedBeaconBlockDeneb to json marshalable type")
}
body, err := json.Marshal(b)
if err != nil {
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockDeneb value body in SubmitBlindedBlockDeneb")
}
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Deneb))
r.Header.Set("Content-Type", "application/json")
r.Header.Set("Accept", "application/json")
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
if err != nil {
return nil, nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockDeneb to the builder api")
}
ep := &ExecPayloadResponseDeneb{}
if err := json.Unmarshal(rb, ep); err != nil {
return nil, nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockDeneb response")
}
if strings.ToLower(ep.Version) != version.String(version.Deneb) {
return nil, nil, errors.New("not a deneb payload")
}
p, blobBundle, err := ep.ToProto()
if err != nil {
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
}
payload, err := blocks.WrappedExecutionPayloadDeneb(p, big.NewInt(0))
if err != nil {
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
}
return payload, blobBundle, nil
default:
return nil, nil, fmt.Errorf("unsupported block version %s", version.String(sb.Version()))
}
return ed, nil, nil
}
// Status asks the remote builder server for a health check. A response of 200 with an empty body is the success/healthy

View File

@@ -6,6 +6,7 @@ import (
"encoding/json"
"fmt"
"io"
"math/big"
"net/http"
"net/url"
"strconv"
@@ -15,7 +16,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
types "github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
@@ -198,12 +198,12 @@ func TestClient_GetHeader(t *testing.T) {
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(expectedTxRoot, withdrawalsRoot))
require.Equal(t, uint64(1), bidHeader.GasUsed())
// this matches the value in the testExampleHeaderResponse
bidStr := "652312848583266388373324160190187140051835877600158453279131187530910662656"
value, err := stringToUint256(bidStr)
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
bidValue := bytesutil.ReverseByteOrder(bid.Value())
require.DeepEqual(t, bidValue, value.Bytes())
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
})
t.Run("capella", func(t *testing.T) {
hc := &http.Client{
@@ -230,11 +230,12 @@ func TestClient_GetHeader(t *testing.T) {
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
bidStr := "652312848583266388373324160190187140051835877600158453279131187530910662656"
value, err := stringToUint256(bidStr)
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
bidValue := bytesutil.ReverseByteOrder(bid.Value())
require.DeepEqual(t, bidValue, value.Bytes())
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
})
t.Run("deneb", func(t *testing.T) {
hc := &http.Client{
@@ -261,13 +262,12 @@ func TestClient_GetHeader(t *testing.T) {
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
bidStr := "652312848583266388373324160190187140051835877600158453279131187530910662656"
value, err := stringToUint256(bidStr)
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
bidValue := bytesutil.ReverseByteOrder(bid.Value())
require.DeepEqual(t, bidValue, value.Bytes())
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
kcgCommitments, err := bid.BlobKzgCommitments()
require.NoError(t, err)
require.Equal(t, len(kcgCommitments) > 0, true)
@@ -432,7 +432,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
require.NoError(t, err)
_, _, err = c.SubmitBlindedBlock(ctx, sbbb)
require.ErrorIs(t, err, errResponseVersionMismatch)
require.ErrorContains(t, "not a bellatrix payload", err)
})
t.Run("not blinded", func(t *testing.T) {
sbb, err := blocks.NewSignedBeaconBlock(&eth.SignedBeaconBlockBellatrix{Block: &eth.BeaconBlockBellatrix{Body: &eth.BeaconBlockBodyBellatrix{ExecutionPayload: &v1.ExecutionPayload{}}}})

View File

@@ -9,15 +9,11 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/math"
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"google.golang.org/protobuf/proto"
)
var errInvalidUint256 = errors.New("invalid Uint256")
@@ -48,9 +44,6 @@ func sszBytesToUint256(b []byte) (Uint256, error) {
// SSZBytes creates an ssz-style (little-endian byte slice) representation of the Uint256.
func (s Uint256) SSZBytes() []byte {
if s.Int == nil {
s.Int = big.NewInt(0)
}
if !math.IsValidUint256(s.Int) {
return []byte{}
}
@@ -98,9 +91,6 @@ func (s Uint256) MarshalJSON() ([]byte, error) {
// MarshalText returns a text byte representation of Uint256.
func (s Uint256) MarshalText() ([]byte, error) {
if s.Int == nil {
s.Int = big.NewInt(0)
}
if !math.IsValidUint256(s.Int) {
return nil, errors.Wrapf(errInvalidUint256, "value=%s", s.Int)
}
@@ -156,8 +146,6 @@ func (bb *BuilderBid) ToProto() (*eth.BuilderBid, error) {
}
return &eth.BuilderBid{
Header: header,
// Note that SSZBytes() reverses byte order for the little-endian representation.
// Uint256.Bytes() is big-endian, SSZBytes takes this value and reverses it.
Value: bb.Value.SSZBytes(),
Pubkey: bb.Pubkey,
}, nil
@@ -277,11 +265,6 @@ func (r *ExecPayloadResponse) ToProto() (*v1.ExecutionPayload, error) {
return r.Data.ToProto()
}
func (r *ExecutionPayload) PayloadProto() (proto.Message, error) {
pb, err := r.ToProto()
return pb, err
}
// ToProto returns a ExecutionPayload Proto
func (p *ExecutionPayload) ToProto() (*v1.ExecutionPayload, error) {
txs := make([][]byte, len(p.Transactions))
@@ -413,51 +396,6 @@ func FromProtoDeneb(payload *v1.ExecutionPayloadDeneb) (ExecutionPayloadDeneb, e
}, nil
}
var errInvalidTypeConversion = errors.New("unable to translate between api and foreign type")
// ExecutionPayloadResponseFromData converts an ExecutionData interface value to a payload response.
// This involves serializing the execution payload value so that the abstract payload envelope can be used.
func ExecutionPayloadResponseFromData(ed interfaces.ExecutionData, bundle *v1.BlobsBundle) (*ExecutionPayloadResponse, error) {
pb := ed.Proto()
var data interface{}
var err error
var ver string
switch pbStruct := pb.(type) {
case *v1.ExecutionPayload:
ver = version.String(version.Bellatrix)
data, err = FromProto(pbStruct)
if err != nil {
return nil, errors.Wrap(err, "failed to convert a Bellatrix ExecutionPayload to an API response")
}
case *v1.ExecutionPayloadCapella:
ver = version.String(version.Capella)
data, err = FromProtoCapella(pbStruct)
if err != nil {
return nil, errors.Wrap(err, "failed to convert a Capella ExecutionPayload to an API response")
}
case *v1.ExecutionPayloadDeneb:
ver = version.String(version.Deneb)
payloadStruct, err := FromProtoDeneb(pbStruct)
if err != nil {
return nil, errors.Wrap(err, "failed to convert a Deneb ExecutionPayload to an API response")
}
data = &ExecutionPayloadDenebAndBlobsBundle{
ExecutionPayload: &payloadStruct,
BlobsBundle: FromBundleProto(bundle),
}
default:
return nil, errInvalidTypeConversion
}
encoded, err := json.Marshal(data)
if err != nil {
return nil, errors.Wrapf(err, "failed to marshal execution payload version=%s", ver)
}
return &ExecutionPayloadResponse{
Version: ver,
Data: encoded,
}, nil
}
// ExecHeaderResponseCapella is the response of builder API /eth/v1/builder/header/{slot}/{parent_hash}/{pubkey} for Capella.
type ExecHeaderResponseCapella struct {
Data struct {
@@ -486,8 +424,6 @@ func (bb *BuilderBidCapella) ToProto() (*eth.BuilderBidCapella, error) {
}
return &eth.BuilderBidCapella{
Header: header,
// Note that SSZBytes() reverses byte order for the little-endian representation.
// Uint256.Bytes() is big-endian, SSZBytes takes this value and reverses it.
Value: bytesutil.SafeCopyBytes(bb.Value.SSZBytes()),
Pubkey: bytesutil.SafeCopyBytes(bb.Pubkey),
}, nil
@@ -587,42 +523,6 @@ type ExecPayloadResponseCapella struct {
Data ExecutionPayloadCapella `json:"data"`
}
// ExecutionPayloadResponse allows for unmarshaling just the Version field of the payload.
// This allows it to return different ExecutionPayload types based on the version field.
type ExecutionPayloadResponse struct {
Version string `json:"version"`
Data json.RawMessage `json:"data"`
}
// ParsedPayload can retrieve the underlying protobuf message for the given execution payload response.
type ParsedPayload interface {
PayloadProto() (proto.Message, error)
}
// BlobBundler can retrieve the underlying blob bundle protobuf message for the given execution payload response.
type BlobBundler interface {
BundleProto() (*v1.BlobsBundle, error)
}
func (r *ExecutionPayloadResponse) ParsePayload() (ParsedPayload, error) {
var toProto ParsedPayload
switch r.Version {
case version.String(version.Bellatrix):
toProto = &ExecutionPayload{}
case version.String(version.Capella):
toProto = &ExecutionPayloadCapella{}
case version.String(version.Deneb):
toProto = &ExecutionPayloadDenebAndBlobsBundle{}
default:
return nil, consensusblocks.ErrUnsupportedVersion
}
if err := json.Unmarshal(r.Data, toProto); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal the response .Data field with the stated version schema")
}
return toProto, nil
}
// ExecutionPayloadCapella is a field of ExecPayloadResponseCapella.
type ExecutionPayloadCapella struct {
ParentHash hexutil.Bytes `json:"parent_hash"`
@@ -647,11 +547,6 @@ func (r *ExecPayloadResponseCapella) ToProto() (*v1.ExecutionPayloadCapella, err
return r.Data.ToProto()
}
func (p *ExecutionPayloadCapella) PayloadProto() (proto.Message, error) {
pb, err := p.ToProto()
return pb, err
}
// ToProto returns a ExecutionPayloadCapella Proto.
func (p *ExecutionPayloadCapella) ToProto() (*v1.ExecutionPayloadCapella, error) {
txs := make([][]byte, len(p.Transactions))
@@ -1026,10 +921,8 @@ func (bb *BuilderBidDeneb) ToProto() (*eth.BuilderBidDeneb, error) {
return &eth.BuilderBidDeneb{
Header: header,
BlobKzgCommitments: kzgCommitments,
// Note that SSZBytes() reverses byte order for the little-endian representation.
// Uint256.Bytes() is big-endian, SSZBytes takes this value and reverses it.
Value: bytesutil.SafeCopyBytes(bb.Value.SSZBytes()),
Pubkey: bytesutil.SafeCopyBytes(bb.Pubkey),
Value: bytesutil.SafeCopyBytes(bb.Value.SSZBytes()),
Pubkey: bytesutil.SafeCopyBytes(bb.Pubkey),
}, nil
}
@@ -1235,12 +1128,6 @@ func (r *ExecPayloadResponseDeneb) ToProto() (*v1.ExecutionPayloadDeneb, *v1.Blo
if r.Data == nil {
return nil, nil, errors.New("data field in response is empty")
}
if r.Data.ExecutionPayload == nil {
return nil, nil, errors.Wrap(consensusblocks.ErrNilObject, "nil execution payload")
}
if r.Data.BlobsBundle == nil {
return nil, nil, errors.Wrap(consensusblocks.ErrNilObject, "nil blobs bundle")
}
payload, err := r.Data.ExecutionPayload.ToProto()
if err != nil {
return nil, nil, err
@@ -1252,26 +1139,8 @@ func (r *ExecPayloadResponseDeneb) ToProto() (*v1.ExecutionPayloadDeneb, *v1.Blo
return payload, bundle, nil
}
func (r *ExecutionPayloadDenebAndBlobsBundle) PayloadProto() (proto.Message, error) {
if r.ExecutionPayload == nil {
return nil, errors.Wrap(consensusblocks.ErrNilObject, "nil execution payload in combined deneb payload")
}
pb, err := r.ExecutionPayload.ToProto()
return pb, err
}
func (r *ExecutionPayloadDenebAndBlobsBundle) BundleProto() (*v1.BlobsBundle, error) {
if r.BlobsBundle == nil {
return nil, errors.Wrap(consensusblocks.ErrNilObject, "nil blobs bundle")
}
return r.BlobsBundle.ToProto()
}
// ToProto returns the ExecutionPayloadDeneb Proto.
func (p *ExecutionPayloadDeneb) ToProto() (*v1.ExecutionPayloadDeneb, error) {
if p == nil {
return nil, errors.Wrap(consensusblocks.ErrNilObject, "nil execution payload")
}
txs := make([][]byte, len(p.Transactions))
for i := range p.Transactions {
txs[i] = bytesutil.SafeCopyBytes(p.Transactions[i])

View File

@@ -15,11 +15,9 @@ import (
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/math"
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
@@ -1602,6 +1600,7 @@ func TestBuilderBidUnmarshalUint256(t *testing.T) {
require.NoError(t, expectedValue.UnmarshalText([]byte(base10)))
r := &ExecHeaderResponse{}
require.NoError(t, json.Unmarshal([]byte(testBuilderBid), r))
//require.Equal(t, expectedValue, r.Data.Message.Value)
marshaled := r.Data.Message.Value.String()
require.Equal(t, base10, marshaled)
require.Equal(t, 0, expectedValue.Cmp(r.Data.Message.Value.Int))
@@ -1908,40 +1907,3 @@ func TestErrorMessage_non200Err(t *testing.T) {
})
}
}
func TestEmptyResponseBody(t *testing.T) {
t.Run("empty buffer", func(t *testing.T) {
var b []byte
r := &ExecutionPayloadResponse{}
err := json.Unmarshal(b, r)
_, ok := err.(*json.SyntaxError)
require.Equal(t, true, ok)
})
t.Run("empty object", func(t *testing.T) {
empty := []byte("{}")
emptyResponse := &ExecutionPayloadResponse{}
require.NoError(t, json.Unmarshal(empty, emptyResponse))
_, err := emptyResponse.ParsePayload()
require.ErrorIs(t, err, consensusblocks.ErrUnsupportedVersion)
})
versions := []int{version.Bellatrix, version.Capella, version.Deneb}
for i := range versions {
vstr := version.String(versions[i])
t.Run("populated version without payload"+vstr, func(t *testing.T) {
in := &ExecutionPayloadResponse{Version: vstr}
encoded, err := json.Marshal(in)
require.NoError(t, err)
epr := &ExecutionPayloadResponse{}
require.NoError(t, json.Unmarshal(encoded, epr))
pp, err := epr.ParsePayload()
require.NoError(t, err)
pb, err := pp.PayloadProto()
if err == nil {
require.NoError(t, err)
require.Equal(t, false, pb == nil)
} else {
require.ErrorIs(t, err, consensusblocks.ErrNilObject)
}
})
}
}

View File

@@ -4,6 +4,4 @@ const (
WebUrlPrefix = "/v2/validator/"
WebApiUrlPrefix = "/api/v2/validator/"
KeymanagerApiPrefix = "/eth/v1"
SystemLogsPrefix = "health/logs"
AuthTokenFileName = "auth-token"
)

View File

@@ -14,7 +14,7 @@ go_library(
"//validator:__subpackages__",
],
deps = [
"//api/server/middleware:go_default_library",
"//api/server:go_default_library",
"//runtime:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
"@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library",

View File

@@ -11,7 +11,7 @@ import (
"github.com/gorilla/mux"
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api/server/middleware"
"github.com/prysmaticlabs/prysm/v5/api/server"
"github.com/prysmaticlabs/prysm/v5/runtime"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
@@ -104,7 +104,7 @@ func (g *Gateway) Start() {
}
}
corsMux := middleware.CorsHandler(g.cfg.allowedOrigins).Middleware(g.cfg.router)
corsMux := server.CorsHandler(g.cfg.allowedOrigins).Middleware(g.cfg.router)
if g.cfg.muxHandler != nil {
g.cfg.router.PathPrefix("/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {

View File

@@ -1,32 +0,0 @@
package api
import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/crypto/rand"
)
// GenerateRandomHexString generates a random hex string that follows the standards for jwt token
// used for beacon node -> execution client
// used for web client -> validator client
func GenerateRandomHexString() (string, error) {
secret := make([]byte, 32)
randGen := rand.NewGenerator()
n, err := randGen.Read(secret)
if err != nil {
return "", err
} else if n != 32 {
return "", errors.New("rand: unexpected length")
}
return hexutil.Encode(secret), nil
}
// ValidateAuthToken validating auth token for web
func ValidateAuthToken(token string) error {
b, err := hexutil.Decode(token)
// token should be hex-encoded and at least 256 bits
if err != nil || len(b) < 32 {
return errors.New("invalid auth token: token should be hex-encoded and at least 256 bits")
}
return nil
}

View File

@@ -1,13 +0,0 @@
package api
import (
"testing"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestGenerateRandomHexString(t *testing.T) {
token, err := GenerateRandomHexString()
require.NoError(t, err)
require.NoError(t, ValidateAuthToken(token))
}

View File

@@ -2,14 +2,29 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["error.go"],
srcs = [
"error.go",
"middleware.go",
"util.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/api/server",
visibility = ["//visibility:public"],
deps = [
"@com_github_gorilla_mux//:go_default_library",
"@com_github_rs_cors//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["error_test.go"],
srcs = [
"error_test.go",
"middleware_test.go",
"util_test.go",
],
embed = [":go_default_library"],
deps = ["//testing/assert:go_default_library"],
deps = [
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
],
)

32
api/server/middleware.go Normal file
View File

@@ -0,0 +1,32 @@
package server
import (
"net/http"
"github.com/gorilla/mux"
"github.com/rs/cors"
)
// NormalizeQueryValuesHandler normalizes an input query of "key=value1,value2,value3" to "key=value1&key=value2&key=value3"
func NormalizeQueryValuesHandler(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
query := r.URL.Query()
NormalizeQueryValues(query)
r.URL.RawQuery = query.Encode()
next.ServeHTTP(w, r)
})
}
// CorsHandler sets the cors settings on api endpoints
func CorsHandler(allowOrigins []string) mux.MiddlewareFunc {
c := cors.New(cors.Options{
AllowedOrigins: allowOrigins,
AllowedMethods: []string{http.MethodPost, http.MethodGet, http.MethodDelete, http.MethodOptions},
AllowCredentials: true,
MaxAge: 600,
AllowedHeaders: []string{"*"},
})
return c.Handler
}

View File

@@ -1,29 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"middleware.go",
"util.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/api/server/middleware",
visibility = ["//visibility:public"],
deps = [
"@com_github_gorilla_mux//:go_default_library",
"@com_github_rs_cors//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"middleware_test.go",
"util_test.go",
],
embed = [":go_default_library"],
deps = [
"//api:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
],
)

View File

@@ -1,112 +0,0 @@
package middleware
import (
"fmt"
"net/http"
"strings"
"github.com/gorilla/mux"
"github.com/rs/cors"
)
// NormalizeQueryValuesHandler normalizes an input query of "key=value1,value2,value3" to "key=value1&key=value2&key=value3"
func NormalizeQueryValuesHandler(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
query := r.URL.Query()
NormalizeQueryValues(query)
r.URL.RawQuery = query.Encode()
next.ServeHTTP(w, r)
})
}
// CorsHandler sets the cors settings on api endpoints
func CorsHandler(allowOrigins []string) mux.MiddlewareFunc {
c := cors.New(cors.Options{
AllowedOrigins: allowOrigins,
AllowedMethods: []string{http.MethodPost, http.MethodGet, http.MethodDelete, http.MethodOptions},
AllowCredentials: true,
MaxAge: 600,
AllowedHeaders: []string{"*"},
})
return c.Handler
}
// ContentTypeHandler checks request for the appropriate media types otherwise returning a http.StatusUnsupportedMediaType error
func ContentTypeHandler(acceptedMediaTypes []string) mux.MiddlewareFunc {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// skip the GET request
if r.Method == http.MethodGet {
next.ServeHTTP(w, r)
return
}
contentType := r.Header.Get("Content-Type")
if contentType == "" {
http.Error(w, "Content-Type header is missing", http.StatusUnsupportedMediaType)
return
}
accepted := false
for _, acceptedType := range acceptedMediaTypes {
if strings.Contains(strings.TrimSpace(contentType), strings.TrimSpace(acceptedType)) {
accepted = true
break
}
}
if !accepted {
http.Error(w, fmt.Sprintf("Unsupported media type: %s", contentType), http.StatusUnsupportedMediaType)
return
}
next.ServeHTTP(w, r)
})
}
}
// AcceptHeaderHandler checks if the client's response preference is handled
func AcceptHeaderHandler(serverAcceptedTypes []string) mux.MiddlewareFunc {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
acceptHeader := r.Header.Get("Accept")
// header is optional and should skip if not provided
if acceptHeader == "" {
next.ServeHTTP(w, r)
return
}
accepted := false
acceptTypes := strings.Split(acceptHeader, ",")
// follows rules defined in https://datatracker.ietf.org/doc/html/rfc2616#section-14.1
for _, acceptType := range acceptTypes {
acceptType = strings.TrimSpace(acceptType)
if acceptType == "*/*" {
accepted = true
break
}
for _, serverAcceptedType := range serverAcceptedTypes {
if strings.HasPrefix(acceptType, serverAcceptedType) {
accepted = true
break
}
if acceptType != "/*" && strings.HasSuffix(acceptType, "/*") && strings.HasPrefix(serverAcceptedType, acceptType[:len(acceptType)-2]) {
accepted = true
break
}
}
if accepted {
break
}
}
if !accepted {
http.Error(w, fmt.Sprintf("Not Acceptable: %s", acceptHeader), http.StatusNotAcceptable)
return
}
next.ServeHTTP(w, r)
})
}
}

View File

@@ -1,204 +0,0 @@
package middleware
import (
"net/http"
"net/http/httptest"
"testing"
"github.com/prysmaticlabs/prysm/v5/api"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestNormalizeQueryValuesHandler(t *testing.T) {
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, err := w.Write([]byte("next handler"))
require.NoError(t, err)
})
handler := NormalizeQueryValuesHandler(nextHandler)
tests := []struct {
name string
inputQuery string
expectedQuery string
}{
{
name: "3 values",
inputQuery: "key=value1,value2,value3",
expectedQuery: "key=value1&key=value2&key=value3", // replace with expected normalized value
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
req, err := http.NewRequest("GET", "/test?"+test.inputQuery, http.NoBody)
if err != nil {
t.Fatal(err)
}
rr := httptest.NewRecorder()
handler.ServeHTTP(rr, req)
if rr.Code != http.StatusOK {
t.Errorf("handler returned wrong status code: got %v want %v", rr.Code, http.StatusOK)
}
if req.URL.RawQuery != test.expectedQuery {
t.Errorf("query not normalized: got %v want %v", req.URL.RawQuery, test.expectedQuery)
}
if rr.Body.String() != "next handler" {
t.Errorf("next handler was not executed")
}
})
}
}
func TestContentTypeHandler(t *testing.T) {
acceptedMediaTypes := []string{api.JsonMediaType, api.OctetStreamMediaType}
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, err := w.Write([]byte("next handler"))
require.NoError(t, err)
})
handler := ContentTypeHandler(acceptedMediaTypes)(nextHandler)
tests := []struct {
name string
contentType string
expectedStatusCode int
isGet bool
}{
{
name: "Accepted Content-Type - application/json",
contentType: api.JsonMediaType,
expectedStatusCode: http.StatusOK,
},
{
name: "Accepted Content-Type - ssz format",
contentType: api.OctetStreamMediaType,
expectedStatusCode: http.StatusOK,
},
{
name: "Unsupported Content-Type - text/plain",
contentType: "text/plain",
expectedStatusCode: http.StatusUnsupportedMediaType,
},
{
name: "Missing Content-Type",
contentType: "",
expectedStatusCode: http.StatusUnsupportedMediaType,
},
{
name: "GET request skips content type check",
contentType: "",
expectedStatusCode: http.StatusOK,
isGet: true,
},
{
name: "Content type contains charset is ok",
contentType: "application/json; charset=utf-8",
expectedStatusCode: http.StatusOK,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
httpMethod := http.MethodPost
if tt.isGet {
httpMethod = http.MethodGet
}
req := httptest.NewRequest(httpMethod, "/", nil)
if tt.contentType != "" {
req.Header.Set("Content-Type", tt.contentType)
}
rr := httptest.NewRecorder()
handler.ServeHTTP(rr, req)
if status := rr.Code; status != tt.expectedStatusCode {
t.Errorf("handler returned wrong status code: got %v want %v", status, tt.expectedStatusCode)
}
})
}
}
func TestAcceptHeaderHandler(t *testing.T) {
acceptedTypes := []string{"application/json", "application/octet-stream"}
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, err := w.Write([]byte("next handler"))
require.NoError(t, err)
})
handler := AcceptHeaderHandler(acceptedTypes)(nextHandler)
tests := []struct {
name string
acceptHeader string
expectedStatusCode int
}{
{
name: "Accepted Accept-Type - application/json",
acceptHeader: "application/json",
expectedStatusCode: http.StatusOK,
},
{
name: "Accepted Accept-Type - application/octet-stream",
acceptHeader: "application/octet-stream",
expectedStatusCode: http.StatusOK,
},
{
name: "Accepted Accept-Type with parameters",
acceptHeader: "application/json;q=0.9, application/octet-stream;q=0.8",
expectedStatusCode: http.StatusOK,
},
{
name: "Unsupported Accept-Type - text/plain",
acceptHeader: "text/plain",
expectedStatusCode: http.StatusNotAcceptable,
},
{
name: "Missing Accept header",
acceptHeader: "",
expectedStatusCode: http.StatusOK,
},
{
name: "*/* is accepted",
acceptHeader: "*/*",
expectedStatusCode: http.StatusOK,
},
{
name: "application/* is accepted",
acceptHeader: "application/*",
expectedStatusCode: http.StatusOK,
},
{
name: "/* is unsupported",
acceptHeader: "/*",
expectedStatusCode: http.StatusNotAcceptable,
},
{
name: "application/ is unsupported",
acceptHeader: "application/",
expectedStatusCode: http.StatusNotAcceptable,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
req := httptest.NewRequest("GET", "/", nil)
if tt.acceptHeader != "" {
req.Header.Set("Accept", tt.acceptHeader)
}
rr := httptest.NewRecorder()
handler.ServeHTTP(rr, req)
if status := rr.Code; status != tt.expectedStatusCode {
t.Errorf("handler returned wrong status code: got %v want %v", status, tt.expectedStatusCode)
}
})
}
}

View File

@@ -0,0 +1,54 @@
package server
import (
"net/http"
"net/http/httptest"
"testing"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestNormalizeQueryValuesHandler(t *testing.T) {
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, err := w.Write([]byte("next handler"))
require.NoError(t, err)
})
handler := NormalizeQueryValuesHandler(nextHandler)
tests := []struct {
name string
inputQuery string
expectedQuery string
}{
{
name: "3 values",
inputQuery: "key=value1,value2,value3",
expectedQuery: "key=value1&key=value2&key=value3", // replace with expected normalized value
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
req, err := http.NewRequest("GET", "/test?"+test.inputQuery, nil)
if err != nil {
t.Fatal(err)
}
rr := httptest.NewRecorder()
handler.ServeHTTP(rr, req)
if rr.Code != http.StatusOK {
t.Errorf("handler returned wrong status code: got %v want %v", rr.Code, http.StatusOK)
}
if req.URL.RawQuery != test.expectedQuery {
t.Errorf("query not normalized: got %v want %v", req.URL.RawQuery, test.expectedQuery)
}
if rr.Body.String() != "next handler" {
t.Errorf("next handler was not executed")
}
})
}
}

View File

@@ -26,7 +26,6 @@ go_library(
"//api/server:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/fieldparams:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//container/slice:go_default_library",

View File

@@ -1,34 +1,10 @@
package structs
import "encoding/json"
// MessageJsoner describes a signed consensus type wrapper that can return the `.Message` field in a json envelope
// encoded as a []byte, for use as a json.RawMessage value when encoding the outer envelope.
type MessageJsoner interface {
MessageRawJson() ([]byte, error)
}
// SignedMessageJsoner embeds MessageJsoner and adds a method to also retrieve the Signature field as a string.
type SignedMessageJsoner interface {
MessageJsoner
SigString() string
}
type SignedBeaconBlock struct {
Message *BeaconBlock `json:"message"`
Signature string `json:"signature"`
}
var _ SignedMessageJsoner = &SignedBeaconBlock{}
func (s *SignedBeaconBlock) MessageRawJson() ([]byte, error) {
return json.Marshal(s.Message)
}
func (s *SignedBeaconBlock) SigString() string {
return s.Signature
}
type BeaconBlock struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
@@ -53,16 +29,6 @@ type SignedBeaconBlockAltair struct {
Signature string `json:"signature"`
}
var _ SignedMessageJsoner = &SignedBeaconBlockAltair{}
func (s *SignedBeaconBlockAltair) MessageRawJson() ([]byte, error) {
return json.Marshal(s.Message)
}
func (s *SignedBeaconBlockAltair) SigString() string {
return s.Signature
}
type BeaconBlockAltair struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
@@ -88,16 +54,6 @@ type SignedBeaconBlockBellatrix struct {
Signature string `json:"signature"`
}
var _ SignedMessageJsoner = &SignedBeaconBlockBellatrix{}
func (s *SignedBeaconBlockBellatrix) MessageRawJson() ([]byte, error) {
return json.Marshal(s.Message)
}
func (s *SignedBeaconBlockBellatrix) SigString() string {
return s.Signature
}
type BeaconBlockBellatrix struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
@@ -124,16 +80,6 @@ type SignedBlindedBeaconBlockBellatrix struct {
Signature string `json:"signature"`
}
var _ SignedMessageJsoner = &SignedBlindedBeaconBlockBellatrix{}
func (s *SignedBlindedBeaconBlockBellatrix) MessageRawJson() ([]byte, error) {
return json.Marshal(s.Message)
}
func (s *SignedBlindedBeaconBlockBellatrix) SigString() string {
return s.Signature
}
type BlindedBeaconBlockBellatrix struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
@@ -160,16 +106,6 @@ type SignedBeaconBlockCapella struct {
Signature string `json:"signature"`
}
var _ SignedMessageJsoner = &SignedBeaconBlockCapella{}
func (s *SignedBeaconBlockCapella) MessageRawJson() ([]byte, error) {
return json.Marshal(s.Message)
}
func (s *SignedBeaconBlockCapella) SigString() string {
return s.Signature
}
type BeaconBlockCapella struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
@@ -197,16 +133,6 @@ type SignedBlindedBeaconBlockCapella struct {
Signature string `json:"signature"`
}
var _ SignedMessageJsoner = &SignedBlindedBeaconBlockCapella{}
func (s *SignedBlindedBeaconBlockCapella) MessageRawJson() ([]byte, error) {
return json.Marshal(s.Message)
}
func (s *SignedBlindedBeaconBlockCapella) SigString() string {
return s.Signature
}
type BlindedBeaconBlockCapella struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
@@ -246,16 +172,6 @@ type SignedBeaconBlockDeneb struct {
Signature string `json:"signature"`
}
var _ SignedMessageJsoner = &SignedBeaconBlockDeneb{}
func (s *SignedBeaconBlockDeneb) MessageRawJson() ([]byte, error) {
return json.Marshal(s.Message)
}
func (s *SignedBeaconBlockDeneb) SigString() string {
return s.Signature
}
type BeaconBlockDeneb struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
@@ -292,16 +208,6 @@ type SignedBlindedBeaconBlockDeneb struct {
Signature string `json:"signature"`
}
var _ SignedMessageJsoner = &SignedBlindedBeaconBlockDeneb{}
func (s *SignedBlindedBeaconBlockDeneb) MessageRawJson() ([]byte, error) {
return json.Marshal(s.Message)
}
func (s *SignedBlindedBeaconBlockDeneb) SigString() string {
return s.Signature
}
type BlindedBeaconBlockBodyDeneb struct {
RandaoReveal string `json:"randao_reveal"`
Eth1Data *Eth1Data `json:"eth1_data"`

View File

@@ -6,10 +6,8 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api/server"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/container/slice"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
@@ -17,8 +15,6 @@ import (
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
var ErrUnsupportedConversion = errors.New("Could not determine api struct type to use for value")
func (h *SignedBeaconBlockHeader) ToConsensus() (*eth.SignedBeaconBlockHeader, error) {
msg, err := h.Message.ToConsensus()
if err != nil {
@@ -1856,34 +1852,7 @@ func BeaconBlockFromConsensus(b *eth.BeaconBlock) *BeaconBlock {
}
}
func SignedBeaconBlockMessageJsoner(block interfaces.ReadOnlySignedBeaconBlock) (SignedMessageJsoner, error) {
pb, err := block.Proto()
if err != nil {
return nil, err
}
switch pbStruct := pb.(type) {
case *eth.SignedBeaconBlock:
return SignedBeaconBlockPhase0FromConsensus(pbStruct), nil
case *eth.SignedBeaconBlockAltair:
return SignedBeaconBlockAltairFromConsensus(pbStruct), nil
case *eth.SignedBlindedBeaconBlockBellatrix:
return SignedBlindedBeaconBlockBellatrixFromConsensus(pbStruct)
case *eth.SignedBeaconBlockBellatrix:
return SignedBeaconBlockBellatrixFromConsensus(pbStruct)
case *eth.SignedBlindedBeaconBlockCapella:
return SignedBlindedBeaconBlockCapellaFromConsensus(pbStruct)
case *eth.SignedBeaconBlockCapella:
return SignedBeaconBlockCapellaFromConsensus(pbStruct)
case *eth.SignedBlindedBeaconBlockDeneb:
return SignedBlindedBeaconBlockDenebFromConsensus(pbStruct)
case *eth.SignedBeaconBlockDeneb:
return SignedBeaconBlockDenebFromConsensus(pbStruct)
default:
return nil, ErrUnsupportedConversion
}
}
func SignedBeaconBlockPhase0FromConsensus(b *eth.SignedBeaconBlock) *SignedBeaconBlock {
func SignedBeaconBlockFromConsensus(b *eth.SignedBeaconBlock) *SignedBeaconBlock {
return &SignedBeaconBlock{
Message: BeaconBlockFromConsensus(b.Block),
Signature: hexutil.Encode(b.Signature),

View File

@@ -1,4 +1,4 @@
package middleware
package server
import (
"net/url"

View File

@@ -1,4 +1,4 @@
package middleware
package server
import (
"testing"

View File

@@ -88,7 +88,7 @@ func TestToggle(t *testing.T) {
}
}
func TestToggleMultipleTimes(t *testing.T) {
func TestToogleMultipleTimes(t *testing.T) {
t.Parallel()
v := New()
@@ -101,16 +101,16 @@ func TestToggleMultipleTimes(t *testing.T) {
expected := i%2 != 0
if v.IsSet() != expected {
t.Fatalf("AtomicBool.Toggle() doesn't work after %d calls, expected: %v, got %v", i, expected, v.IsSet())
t.Fatalf("AtomicBool.Toogle() doesn't work after %d calls, expected: %v, got %v", i, expected, v.IsSet())
}
if pre == v.IsSet() {
t.Fatalf("AtomicBool.Toggle() returned wrong value at the %dth calls, expected: %v, got %v", i, !v.IsSet(), pre)
t.Fatalf("AtomicBool.Toogle() returned wrong value at the %dth calls, expected: %v, got %v", i, !v.IsSet(), pre)
}
}
}
func TestToggleAfterOverflow(t *testing.T) {
func TestToogleAfterOverflow(t *testing.T) {
t.Parallel()
var value int32 = math.MaxInt32
@@ -122,7 +122,7 @@ func TestToggleAfterOverflow(t *testing.T) {
v.Toggle()
expected := math.MaxInt32%2 == 0
if v.IsSet() != expected {
t.Fatalf("AtomicBool.Toggle() doesn't work after overflow, expected: %v, got %v", expected, v.IsSet())
t.Fatalf("AtomicBool.Toogle() doesn't work after overflow, expected: %v, got %v", expected, v.IsSet())
}
// make sure overflow happened
@@ -135,7 +135,7 @@ func TestToggleAfterOverflow(t *testing.T) {
v.Toggle()
expected = !expected
if v.IsSet() != expected {
t.Fatalf("AtomicBool.Toggle() doesn't work after the second call after overflow, expected: %v, got %v", expected, v.IsSet())
t.Fatalf("AtomicBool.Toogle() doesn't work after the second call after overflow, expected: %v, got %v", expected, v.IsSet())
}
}

View File

@@ -20,7 +20,6 @@ package event
import (
"errors"
"reflect"
"slices"
"sync"
)
@@ -220,9 +219,12 @@ type caseList []reflect.SelectCase
// find returns the index of a case containing the given channel.
func (cs caseList) find(channel interface{}) int {
return slices.IndexFunc(cs, func(selectCase reflect.SelectCase) bool {
return selectCase.Chan.Interface() == channel
})
for i, cas := range cs {
if cas.Chan.Interface() == channel {
return i
}
}
return -1
}
// delete removes the given case from cs.

View File

@@ -63,7 +63,7 @@ func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (interface{}, err
return results, nil
}
// calculateChunkSize calculates a suitable chunk size for the purposes of parallelization.
// calculateChunkSize calculates a suitable chunk size for the purposes of parallelisation.
func calculateChunkSize(items int) int {
// Start with a simple even split
chunkSize := items / runtime.GOMAXPROCS(0)

View File

@@ -137,7 +137,7 @@ go_test(
"//async/event:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
"//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",

View File

@@ -11,6 +11,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
f "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -398,6 +399,14 @@ func (s *Service) InForkchoice(root [32]byte) bool {
return s.cfg.ForkChoiceStore.HasNode(root)
}
// IsViableForCheckpoint returns whether the given checkpoint is a checkpoint in any
// chain known to forkchoice
func (s *Service) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.IsViableForCheckpoint(cp)
}
// IsOptimisticForRoot takes the root as argument instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
@@ -509,6 +518,13 @@ func (s *Service) Ancestor(ctx context.Context, root []byte, slot primitives.Slo
return ar[:], nil
}
// SetOptimisticToInvalid wraps the corresponding method in forkchoice
func (s *Service) SetOptimisticToInvalid(ctx context.Context, root, parent, lvh [32]byte) ([][32]byte, error) {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
return s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parent, lvh)
}
// SetGenesisTime sets the genesis time of beacon chain.
func (s *Service) SetGenesisTime(t time.Time) {
s.genesisTime = t

View File

@@ -256,7 +256,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, lvh [32]byte) error {
newPayloadInvalidNodeCount.Inc()
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
invalidRoots, err := s.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
if err != nil {
return err
}
@@ -325,7 +325,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
var attr payloadattribute.Attributer
switch st.Version() {
case version.Deneb:
withdrawals, _, err := st.ExpectedWithdrawals()
withdrawals, err := st.ExpectedWithdrawals()
if err != nil {
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
return emptyAttri
@@ -342,7 +342,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
return emptyAttri
}
case version.Capella:
withdrawals, _, err := st.ExpectedWithdrawals()
withdrawals, err := st.ExpectedWithdrawals()
if err != nil {
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
return emptyAttri

View File

@@ -401,7 +401,7 @@ func (s *Service) saveOrphanedOperations(ctx context.Context, orphanedRoot [32]b
}
for _, a := range orphanedBlk.Block().Body().Attestations() {
// if the attestation is one epoch older, it wouldn't been useful to save it.
if a.GetData().Slot+params.BeaconConfig().SlotsPerEpoch < s.CurrentSlot() {
if a.Data.Slot+params.BeaconConfig().SlotsPerEpoch < s.CurrentSlot() {
continue
}
if helpers.IsAggregated(a) {

View File

@@ -61,7 +61,7 @@ func TestService_headNextSyncCommitteeIndices(t *testing.T) {
indices, err := c.headNextSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
require.NoError(t, err)
// NextSyncCommittee should be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
// NextSyncCommittee should be be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
require.NotEqual(t, 0, len(indices))
}

View File

@@ -312,14 +312,14 @@ func TestSaveOrphanedAtts(t *testing.T) {
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
wantAtts := []ethpb.Att{
wantAtts := []*ethpb.Attestation{
blk3.Block.Body.Attestations[0],
blk2.Block.Body.Attestations[0],
blk1.Block.Body.Attestations[0],
}
atts := service.cfg.AttPool.AggregatedAttestations()
sort.Slice(atts, func(i, j int) bool {
return atts[i].GetData().Slot > atts[j].GetData().Slot
return atts[i].Data.Slot > atts[j].Data.Slot
})
require.DeepEqual(t, wantAtts, atts)
}
@@ -389,14 +389,14 @@ func TestSaveOrphanedOps(t *testing.T) {
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
wantAtts := []ethpb.Att{
wantAtts := []*ethpb.Attestation{
blk3.Block.Body.Attestations[0],
blk2.Block.Body.Attestations[0],
blk1.Block.Body.Attestations[0],
}
atts := service.cfg.AttPool.AggregatedAttestations()
sort.Slice(atts, func(i, j int) bool {
return atts[i].GetData().Slot > atts[j].GetData().Slot
return atts[i].Data.Slot > atts[j].Data.Slot
})
require.DeepEqual(t, wantAtts, atts)
require.Equal(t, 1, len(service.cfg.SlashingPool.PendingProposerSlashings(ctx, st, false)))
@@ -517,14 +517,14 @@ func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
wantAtts := []ethpb.Att{
wantAtts := []*ethpb.Attestation{
blk3.Block.Body.Attestations[0],
blk2.Block.Body.Attestations[0],
blk1.Block.Body.Attestations[0],
}
atts := service.cfg.AttPool.AggregatedAttestations()
sort.Slice(atts, func(i, j int) bool {
return atts[i].GetData().Slot > atts[j].GetData().Slot
return atts[i].Data.Slot > atts[j].Data.Slot
})
require.DeepEqual(t, wantAtts, atts)
}

View File

@@ -369,6 +369,6 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
func reportAttestationInclusion(blk interfaces.ReadOnlyBeaconBlock) {
for _, att := range blk.Body().Attestations() {
attestationInclusionDelay.Observe(float64(blk.Slot() - att.GetData().Slot))
attestationInclusionDelay.Observe(float64(blk.Slot() - att.Data.Slot))
}
}

View File

@@ -36,17 +36,17 @@ import (
//
// # Update latest messages for attesting indices
// update_latest_messages(store, indexed_attestation.attesting_indices, attestation)
func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time.Duration) error {
func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation, disparity time.Duration) error {
ctx, span := trace.StartSpan(ctx, "blockChain.onAttestation")
defer span.End()
if err := helpers.ValidateNilAttestation(a); err != nil {
return err
}
if err := helpers.ValidateSlotTargetEpoch(a.GetData()); err != nil {
if err := helpers.ValidateSlotTargetEpoch(a.Data); err != nil {
return err
}
tgt := ethpb.CopyCheckpoint(a.GetData().Target)
tgt := ethpb.CopyCheckpoint(a.Data.Target)
// Note that target root check is ignored here because it was performed in sync's validation pipeline:
// validate_aggregate_proof.go and validate_beacon_attestation.go
@@ -67,7 +67,7 @@ func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time
}
// Verify attestation beacon block is known and not from the future.
if err := s.verifyBeaconBlock(ctx, a.GetData()); err != nil {
if err := s.verifyBeaconBlock(ctx, a.Data); err != nil {
return errors.Wrap(err, "could not verify attestation beacon block")
}
@@ -75,16 +75,16 @@ func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time
// validate_aggregate_proof.go and validate_beacon_attestation.go
// Verify attestations can only affect the fork choice of subsequent slots.
if err := slots.VerifyTime(genesisTime, a.GetData().Slot+1, disparity); err != nil {
if err := slots.VerifyTime(genesisTime, a.Data.Slot+1, disparity); err != nil {
return err
}
// Use the target state to verify attesting indices are valid.
committees, err := helpers.AttestationCommittees(ctx, baseState, a)
committee, err := helpers.BeaconCommitteeFromState(ctx, baseState, a.Data.Slot, a.Data.CommitteeIndex)
if err != nil {
return err
}
indexedAtt, err := attestation.ConvertToIndexed(ctx, a, committees...)
indexedAtt, err := attestation.ConvertToIndexed(ctx, a, committee)
if err != nil {
return err
}
@@ -97,7 +97,7 @@ func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time
// We assume trusted attestation in this function has verified signature.
// Update forkchoice store with the new attestation for updating weight.
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indexedAtt.GetAttestingIndices(), bytesutil.ToBytes32(a.GetData().BeaconBlockRoot), a.GetData().Target.Epoch)
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indexedAtt.AttestingIndices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
return nil
}

View File

@@ -7,11 +7,9 @@ import (
"time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
@@ -75,7 +73,7 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
tests := []struct {
name string
a ethpb.Att
a *ethpb.Attestation
wantedErr string
}{
{
@@ -127,36 +125,25 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
}
func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
eval := func(ctx context.Context, service *Service, genesisState state.BeaconState, pks []bls.SecretKey) {
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
require.NoError(t, service.saveGenesisData(ctx, genesisState))
att, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
require.NoError(t, err)
tRoot := bytesutil.ToBytes32(att[0].GetData().Target.Root)
copied := genesisState.Copy()
copied, err = transition.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
ojc := &ethpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
ofc := &ethpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
}
service, tr := minimalTestService(t)
ctx := tr.ctx
t.Run("pre-Electra", func(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
genesisState, pks := util.DeterministicGenesisState(t, 64)
eval(ctx, service, genesisState, pks)
})
t.Run("post-Electra", func(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
genesisState, pks := util.DeterministicGenesisStateElectra(t, 64)
eval(ctx, service, genesisState, pks)
})
genesisState, pks := util.DeterministicGenesisState(t, 64)
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
require.NoError(t, service.saveGenesisData(ctx, genesisState))
att, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
require.NoError(t, err)
tRoot := bytesutil.ToBytes32(att[0].Data.Target.Root)
copied := genesisState.Copy()
copied, err = transition.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
ojc := &ethpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
ofc := &ethpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
}
func TestService_GetRecentPreState(t *testing.T) {

View File

@@ -366,17 +366,17 @@ func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot,
func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, st state.BeaconState) error {
// Feed in block's attestations to fork choice store.
for _, a := range blk.Body().Attestations() {
committees, err := helpers.AttestationCommittees(ctx, st, a)
committee, err := helpers.BeaconCommitteeFromState(ctx, st, a.Data.Slot, a.Data.CommitteeIndex)
if err != nil {
return err
}
indices, err := attestation.AttestingIndices(a, committees...)
indices, err := attestation.AttestingIndices(a.AggregationBits, committee)
if err != nil {
return err
}
r := bytesutil.ToBytes32(a.GetData().BeaconBlockRoot)
r := bytesutil.ToBytes32(a.Data.BeaconBlockRoot)
if s.cfg.ForkChoiceStore.HasNode(r) {
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, r, a.GetData().Target.Epoch)
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, r, a.Data.Target.Epoch)
} else if err := s.cfg.AttPool.SaveBlockAttestation(a); err != nil {
return err
}
@@ -387,7 +387,7 @@ func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.Re
// InsertSlashingsToForkChoiceStore inserts attester slashing indices to fork choice store.
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
// This function requires a write lock on forkchoice.
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []ethpb.AttSlashing) {
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []*ethpb.AttesterSlashing) {
for _, slashing := range slashings {
indices := blocks.SlashableAttesterIndices(slashing)
for _, index := range indices {

View File

@@ -60,7 +60,7 @@ func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcu
// logNonCanonicalBlockReceived prints a message informing that the received
// block is not the head of the chain. It requires the caller holds a lock on
// Forkchoice.
// Foprkchoice.
func (s *Service) logNonCanonicalBlockReceived(blockRoot [32]byte, headRoot [32]byte) {
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
if err != nil {

View File

@@ -824,10 +824,7 @@ func TestRemoveBlockAttestationsInPool(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: r[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, r))
atts := make([]ethpb.Att, len(b.Block.Body.Attestations))
for i, a := range b.Block.Body.Attestations {
atts[i] = a
}
atts := b.Block.Body.Attestations
require.NoError(t, service.cfg.AttPool.SaveAggregatedAttestations(atts))
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
@@ -1963,130 +1960,68 @@ func TestNoViableHead_Reboot(t *testing.T) {
}
func TestOnBlock_HandleBlockAttestations(t *testing.T) {
t.Run("pre-Electra", func(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
service, tr := minimalTestService(t)
ctx := tr.ctx
st, keys := util.DeterministicGenesisState(t, 64)
stateRoot, err := st.HashTreeRoot(ctx)
require.NoError(t, err, "Could not hash genesis state")
st, keys := util.DeterministicGenesisState(t, 64)
stateRoot, err := st.HashTreeRoot(ctx)
require.NoError(t, err, "Could not hash genesis state")
require.NoError(t, service.saveGenesisData(ctx, st))
require.NoError(t, service.saveGenesisData(ctx, st))
genesis := blocks.NewGenesisBlock(stateRoot[:])
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
parentRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err, "Could not get signing root")
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
genesis := blocks.NewGenesisBlock(stateRoot[:])
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
parentRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err, "Could not get signing root")
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
st, err = service.HeadState(ctx)
require.NoError(t, err)
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
st, err = service.HeadState(ctx)
require.NoError(t, err)
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
st, err = service.HeadState(ctx)
require.NoError(t, err)
b, err = util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
st, err = service.HeadState(ctx)
require.NoError(t, err)
b, err = util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
// prepare another block that is not inserted
st3, err := transition.ExecuteStateTransition(ctx, st, wsb)
require.NoError(t, err)
b3, err := util.GenerateFullBlock(st3, keys, util.DefaultBlockGenConfig(), 3)
require.NoError(t, err)
wsb3, err := consensusblocks.NewSignedBeaconBlock(b3)
require.NoError(t, err)
// prepare another block that is not inserted
st3, err := transition.ExecuteStateTransition(ctx, st, wsb)
require.NoError(t, err)
b3, err := util.GenerateFullBlock(st3, keys, util.DefaultBlockGenConfig(), 3)
require.NoError(t, err)
wsb3, err := consensusblocks.NewSignedBeaconBlock(b3)
require.NoError(t, err)
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
a := wsb.Block().Body().Attestations()[0]
r := bytesutil.ToBytes32(a.GetData().BeaconBlockRoot)
require.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r))
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
a := wsb.Block().Body().Attestations()[0]
r := bytesutil.ToBytes32(a.Data.BeaconBlockRoot)
require.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r))
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
a3 := wsb3.Block().Body().Attestations()[0]
r3 := bytesutil.ToBytes32(a3.GetData().BeaconBlockRoot)
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
a3 := wsb3.Block().Body().Attestations()[0]
r3 := bytesutil.ToBytes32(a3.Data.BeaconBlockRoot)
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committee as st
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committee as st
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
})
t.Run("post-Electra", func(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
st, keys := util.DeterministicGenesisStateElectra(t, 64)
require.NoError(t, service.saveGenesisData(ctx, st))
genesis, err := blocks.NewGenesisBlockForState(ctx, st)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, genesis), "Could not save genesis block")
parentRoot, err := genesis.Block().HashTreeRoot()
require.NoError(t, err, "Could not get signing root")
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
st, err = service.HeadState(ctx)
require.NoError(t, err)
b, err := util.GenerateFullBlockElectra(st, keys, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
st, err = service.HeadState(ctx)
require.NoError(t, err)
b, err = util.GenerateFullBlockElectra(st, keys, util.DefaultBlockGenConfig(), 2)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
// prepare another block that is not inserted
st3, err := transition.ExecuteStateTransition(ctx, st, wsb)
require.NoError(t, err)
b3, err := util.GenerateFullBlockElectra(st3, keys, util.DefaultBlockGenConfig(), 3)
require.NoError(t, err)
wsb3, err := consensusblocks.NewSignedBeaconBlock(b3)
require.NoError(t, err)
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
a := wsb.Block().Body().Attestations()[0]
r := bytesutil.ToBytes32(a.GetData().BeaconBlockRoot)
require.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r))
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
a3 := wsb3.Block().Body().Attestations()[0]
r3 := bytesutil.ToBytes32(a3.GetData().BeaconBlockRoot)
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committee as st
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committee as st
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
})
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committee as st
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committee as st
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
}
func TestFillMissingBlockPayloadId_DiffSlotExitEarly(t *testing.T) {

View File

@@ -31,7 +31,7 @@ type AttestationStateFetcher interface {
// AttestationReceiver interface defines the methods of chain service receive and processing new attestations.
type AttestationReceiver interface {
AttestationStateFetcher
VerifyLmdFfgConsistency(ctx context.Context, att ethpb.Att) error
VerifyLmdFfgConsistency(ctx context.Context, att *ethpb.Attestation) error
InForkchoice([32]byte) bool
}
@@ -51,13 +51,13 @@ func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Chec
}
// VerifyLmdFfgConsistency verifies that attestation's LMD and FFG votes are consistency to each other.
func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a ethpb.Att) error {
r, err := s.TargetRootForEpoch([32]byte(a.GetData().BeaconBlockRoot), a.GetData().Target.Epoch)
func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestation) error {
r, err := s.TargetRootForEpoch([32]byte(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
if err != nil {
return err
}
if !bytes.Equal(a.GetData().Target.Root, r[:]) {
return fmt.Errorf("FFG and LMD votes are not consistent, block root: %#x, target root: %#x, canonical target root: %#x", a.GetData().BeaconBlockRoot, a.GetData().Target.Root, r)
if !bytes.Equal(a.Data.Target.Root, r[:]) {
return fmt.Errorf("FFG and LMD votes are not consistent, block root: %#x, target root: %#x, canonical target root: %#x", a.Data.BeaconBlockRoot, a.Data.Target.Root, r)
}
return nil
}
@@ -170,13 +170,13 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
// Based on the spec, don't process the attestation until the subsequent slot.
// This delays consideration in the fork choice until their slot is in the past.
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation
nextSlot := a.GetData().Slot + 1
nextSlot := a.Data.Slot + 1
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), nextSlot, disparity); err != nil {
continue
}
hasState := s.cfg.BeaconDB.HasStateSummary(ctx, bytesutil.ToBytes32(a.GetData().BeaconBlockRoot))
hasBlock := s.hasBlock(ctx, bytesutil.ToBytes32(a.GetData().BeaconBlockRoot))
hasState := s.cfg.BeaconDB.HasStateSummary(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
hasBlock := s.hasBlock(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
if !(hasState && hasBlock) {
continue
}
@@ -185,17 +185,17 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
log.WithError(err).Error("Could not delete fork choice attestation in pool")
}
if !helpers.VerifyCheckpointEpoch(a.GetData().Target, s.genesisTime) {
if !helpers.VerifyCheckpointEpoch(a.Data.Target, s.genesisTime) {
continue
}
if err := s.receiveAttestationNoPubsub(ctx, a, disparity); err != nil {
log.WithFields(logrus.Fields{
"slot": a.GetData().Slot,
"committeeIndex": a.GetData().CommitteeIndex,
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().BeaconBlockRoot)),
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().Target.Root)),
"aggregationCount": a.GetAggregationBits().Count(),
"slot": a.Data.Slot,
"committeeIndex": a.Data.CommitteeIndex,
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.Data.BeaconBlockRoot)),
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.Data.Target.Root)),
"aggregationCount": a.AggregationBits.Count(),
}).WithError(err).Warn("Could not process attestation for fork choice")
}
}
@@ -206,7 +206,7 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
// 1. Validate attestation, update validator's latest vote
// 2. Apply fork choice to the processed attestation
// 3. Save latest head info
func (s *Service) receiveAttestationNoPubsub(ctx context.Context, att ethpb.Att, disparity time.Duration) error {
func (s *Service) receiveAttestationNoPubsub(ctx context.Context, att *ethpb.Attestation, disparity time.Duration) error {
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.receiveAttestationNoPubsub")
defer span.End()

View File

@@ -73,7 +73,7 @@ func TestProcessAttestations_Ok(t *testing.T) {
require.NoError(t, service.saveGenesisData(ctx, genesisState))
atts, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
require.NoError(t, err)
tRoot := bytesutil.ToBytes32(atts[0].GetData().Target.Root)
tRoot := bytesutil.ToBytes32(atts[0].Data.Target.Root)
copied := genesisState.Copy()
copied, err = transition.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
@@ -83,11 +83,7 @@ func TestProcessAttestations_Ok(t *testing.T) {
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
attsToSave := make([]ethpb.Att, len(atts))
for i, a := range atts {
attsToSave[i] = a
}
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(attsToSave))
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
service.processAttestations(ctx, 0)
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations()))
require.LogsDoNotContain(t, hook, "Could not process attestation for fork choice")
@@ -125,14 +121,10 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
// Generate attestations for this block in Slot 1
atts, err := util.GenerateAttestations(copied, pks, 1, 1, false)
require.NoError(t, err)
attsToSave := make([]ethpb.Att, len(atts))
for i, a := range atts {
attsToSave[i] = a
}
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(attsToSave))
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
// Verify the target is in forkchoice
require.Equal(t, true, fcs.HasNode(bytesutil.ToBytes32(atts[0].GetData().BeaconBlockRoot)))
require.Equal(t, tRoot, bytesutil.ToBytes32(atts[0].GetData().BeaconBlockRoot))
require.Equal(t, true, fcs.HasNode(bytesutil.ToBytes32(atts[0].Data.BeaconBlockRoot)))
require.Equal(t, tRoot, bytesutil.ToBytes32(atts[0].Data.BeaconBlockRoot))
require.Equal(t, true, fcs.HasNode(service.originBlockRoot))
// Insert a new block to forkchoice

View File

@@ -52,7 +52,7 @@ type BlobReceiver interface {
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
type SlashingReceiver interface {
ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing)
ReceiveAttesterSlashing(ctx context.Context, slashings *ethpb.AttesterSlashing)
}
// ReceiveBlock is a function that defines the operations (minus pubsub)
@@ -170,7 +170,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
// Send finalized events and finalized deposits in the background
if newFinalized {
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
go s.sendNewFinalizedEvent(ctx, postState)
go s.sendNewFinalizedEvent(blockCopy, postState)
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
go func() {
s.insertFinalizedDeposits(depCtx, finalized.Root)
@@ -295,10 +295,10 @@ func (s *Service) HasBlock(ctx context.Context, root [32]byte) bool {
}
// ReceiveAttesterSlashing receives an attester slashing and inserts it to forkchoice
func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing) {
func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing *ethpb.AttesterSlashing) {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
s.InsertSlashingsToForkChoiceStore(ctx, []ethpb.AttSlashing{slashing})
s.InsertSlashingsToForkChoiceStore(ctx, []*ethpb.AttesterSlashing{slashing})
}
// prunePostBlockOperationPools only runs on new head otherwise should return a nil.
@@ -443,7 +443,7 @@ func (s *Service) updateFinalizationOnBlock(ctx context.Context, preState, postS
// sendNewFinalizedEvent sends a new finalization checkpoint event over the
// event feed. It needs to be called on the background
func (s *Service) sendNewFinalizedEvent(ctx context.Context, postState state.BeaconState) {
func (s *Service) sendNewFinalizedEvent(signed interfaces.ReadOnlySignedBeaconBlock, postState state.BeaconState) {
isValidPayload := false
s.headLock.RLock()
if s.head != nil {
@@ -451,17 +451,8 @@ func (s *Service) sendNewFinalizedEvent(ctx context.Context, postState state.Bea
}
s.headLock.RUnlock()
blk, err := s.cfg.BeaconDB.Block(ctx, bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root))
if err != nil {
log.WithError(err).Error("Could not retrieve block for finalized checkpoint root. Finalized event will not be emitted")
return
}
if blk == nil || blk.IsNil() || blk.Block() == nil || blk.Block().IsNil() {
log.WithError(err).Error("Block retrieved for finalized checkpoint root is nil. Finalized event will not be emitted")
return
}
stateRoot := blk.Block().StateRoot()
// Send an event regarding the new finalized checkpoint over a common event feed.
stateRoot := signed.Block().StateRoot()
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.FinalizedCheckpoint,
Data: &ethpbv1.EventFinalizedCheckpoint{
@@ -479,12 +470,12 @@ func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySigne
// is done in the background to avoid adding more load to this critical code path.
ctx := context.TODO()
for _, att := range signed.Block().Body().Attestations() {
committees, err := helpers.AttestationCommittees(ctx, preState, att)
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, att.Data.Slot, att.Data.CommitteeIndex)
if err != nil {
log.WithError(err).Error("Could not get attestation committees")
log.WithError(err).Error("Could not get attestation committee")
return
}
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committees...)
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committee)
if err != nil {
log.WithError(err).Error("Could not convert to indexed attestation")
return
@@ -497,10 +488,7 @@ func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySigne
func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header interfaces.ExecutionData, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) (bool, error) {
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, signed)
if err != nil {
s.cfg.ForkChoiceStore.Lock()
err = s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
s.cfg.ForkChoiceStore.Unlock()
return false, err
return false, s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
}
if signed.Version() < version.Capella && isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, ver, header, signed); err != nil {

View File

@@ -8,14 +8,12 @@ import (
blockchainTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpbv1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
@@ -380,38 +378,3 @@ func TestHandleBlockBLSToExecutionChanges(t *testing.T) {
require.Equal(t, false, pool.ValidatorExists(idx))
})
}
func Test_sendNewFinalizedEvent(t *testing.T) {
s, _ := minimalTestService(t)
notifier := &blockchainTesting.MockStateNotifier{RecordEvents: true}
s.cfg.StateNotifier = notifier
finalizedSt, err := util.NewBeaconState()
require.NoError(t, err)
finalizedStRoot, err := finalizedSt.HashTreeRoot(s.ctx)
require.NoError(t, err)
b := util.NewBeaconBlock()
b.Block.StateRoot = finalizedStRoot[:]
sbb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
sbbRoot, err := sbb.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, s.cfg.BeaconDB.SaveBlock(s.ctx, sbb))
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetFinalizedCheckpoint(&ethpb.Checkpoint{
Epoch: 123,
Root: sbbRoot[:],
}))
s.sendNewFinalizedEvent(s.ctx, st)
require.Equal(t, 1, len(notifier.ReceivedEvents()))
e := notifier.ReceivedEvents()[0]
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
require.Equal(t, true, ok, "event has wrong data type")
assert.Equal(t, primitives.Epoch(123), fc.Epoch)
assert.DeepEqual(t, sbbRoot[:], fc.Block)
assert.DeepEqual(t, finalizedStRoot[:], fc.State)
assert.Equal(t, false, fc.ExecutionOptimistic)
}

View File

@@ -9,7 +9,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
@@ -80,7 +80,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
attService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
require.NoError(t, err)
depositCache, err := depositsnapshot.New()
depositCache, err := depositcache.New()
require.NoError(t, err)
fc := doublylinkedtree.New()

View File

@@ -8,12 +8,11 @@ import (
"github.com/prysmaticlabs/prysm/v5/async/event"
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
testDB "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
mockExecution "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
@@ -50,7 +49,7 @@ func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
return nil
}
func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ ethpb.Att) error {
func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Attestation) error {
mb.broadcastCalled = true
return nil
}
@@ -80,7 +79,7 @@ type testServiceRequirements struct {
attPool attestations.Pool
attSrv *attestations.Service
blsPool *blstoexec.Pool
dc *depositsnapshot.Cache
dc *depositcache.DepositCache
}
func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceRequirements) {
@@ -95,7 +94,7 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
attSrv, err := attestations.NewService(ctx, &attestations.Config{Pool: attPool})
require.NoError(t, err)
blsPool := blstoexec.NewPool()
dc, err := depositsnapshot.New()
dc, err := depositcache.New()
require.NoError(t, err)
req := &testServiceRequirements{
ctx: ctx,
@@ -121,7 +120,6 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
WithTrackedValidatorsCache(cache.NewTrackedValidatorsCache()),
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
WithSyncChecker(mock.MockChecker{}),
WithExecutionEngineCaller(&mockExecution.EngineClient{}),
}
// append the variadic opts so they override the defaults by being processed afterwards
opts = append(defOpts, opts...)

View File

@@ -414,8 +414,8 @@ func (*ChainService) HeadGenesisValidatorsRoot() [32]byte {
}
// VerifyLmdFfgConsistency mocks VerifyLmdFfgConsistency and always returns nil.
func (*ChainService) VerifyLmdFfgConsistency(_ context.Context, a ethpb.Att) error {
if !bytes.Equal(a.GetData().BeaconBlockRoot, a.GetData().Target.Root) {
func (*ChainService) VerifyLmdFfgConsistency(_ context.Context, a *ethpb.Attestation) error {
if !bytes.Equal(a.Data.BeaconBlockRoot, a.Data.Target.Root) {
return errors.New("LMD and FFG miss matched")
}
return nil
@@ -495,7 +495,7 @@ func (s *ChainService) UpdateHead(ctx context.Context, slot primitives.Slot) {
}
// ReceiveAttesterSlashing mocks the same method in the chain service.
func (*ChainService) ReceiveAttesterSlashing(context.Context, ethpb.AttSlashing) {}
func (*ChainService) ReceiveAttesterSlashing(context.Context, *ethpb.AttesterSlashing) {}
// IsFinalized mocks the same method in the chain service.
func (s *ChainService) IsFinalized(_ context.Context, blockRoot [32]byte) bool {

View File

@@ -2,6 +2,7 @@ package testing
import (
"context"
"math/big"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api/client/builder"
@@ -54,13 +55,13 @@ func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.
}
return w, nil, s.ErrSubmitBlindedBlock
case version.Capella:
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella)
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella, big.NewInt(0))
if err != nil {
return nil, nil, errors.Wrap(err, "could not wrap capella payload")
}
return w, nil, s.ErrSubmitBlindedBlock
case version.Deneb:
w, err := blocks.WrappedExecutionPayloadDeneb(s.PayloadDeneb)
w, err := blocks.WrappedExecutionPayloadDeneb(s.PayloadDeneb, big.NewInt(0))
if err != nil {
return nil, nil, errors.Wrap(err, "could not wrap deneb payload")
}

View File

@@ -6,7 +6,6 @@ go_library(
"active_balance.go",
"active_balance_disabled.go", # keep
"attestation_data.go",
"balance_cache_key.go",
"checkpoint_state.go",
"committee.go",
"committee_disabled.go", # keep
@@ -71,7 +70,6 @@ go_test(
"committee_fuzz_test.go",
"committee_test.go",
"payload_id_test.go",
"private_access_test.go",
"proposer_indices_test.go",
"registration_test.go",
"skip_slot_cache_test.go",
@@ -95,7 +93,6 @@ go_test(
"//testing/util:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_google_gofuzz//:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_stretchr_testify//require:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],

View File

@@ -3,13 +3,17 @@
package cache
import (
"encoding/binary"
"sync"
lru "github.com/hashicorp/golang-lru"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
lruwrpr "github.com/prysmaticlabs/prysm/v5/cache/lru"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
)
const (
@@ -82,3 +86,36 @@ func (c *BalanceCache) Get(st state.ReadOnlyBeaconState) (uint64, error) {
balanceCacheHit.Inc()
return value.(uint64), nil
}
// Given input state `st`, balance key is constructed as:
// (block_root in `st` at epoch_start_slot - 1) + current_epoch + validator_count
func balanceCacheKey(st state.ReadOnlyBeaconState) (string, error) {
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
currentEpoch := st.Slot().DivSlot(slotsPerEpoch)
epochStartSlot, err := slotsPerEpoch.SafeMul(uint64(currentEpoch))
if err != nil {
// impossible condition due to early division
return "", errors.Errorf("start slot calculation overflows: %v", err)
}
prevSlot := primitives.Slot(0)
if epochStartSlot > 1 {
prevSlot = epochStartSlot - 1
}
r, err := st.BlockRootAtIndex(uint64(prevSlot % params.BeaconConfig().SlotsPerHistoricalRoot))
if err != nil {
// impossible condition because index is always constrained within state
return "", err
}
// Mix in current epoch
b := make([]byte, 8)
binary.LittleEndian.PutUint64(b, uint64(currentEpoch))
key := append(r, b...)
// Mix in validator count
b = make([]byte, 8)
binary.LittleEndian.PutUint64(b, uint64(st.NumValidators()))
key = append(key, b...)
return string(key), nil
}

View File

@@ -1,13 +1,12 @@
//go:build !fuzz
package cache_test
package cache
import (
"encoding/binary"
"math"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
@@ -28,33 +27,33 @@ func TestBalanceCache_AddGetBalance(t *testing.T) {
st, err := state_native.InitializeFromProtoPhase0(raw)
require.NoError(t, err)
cc := cache.NewEffectiveBalanceCache()
_, err = cc.Get(st)
require.ErrorContains(t, cache.ErrNotFound.Error(), err)
cache := NewEffectiveBalanceCache()
_, err = cache.Get(st)
require.ErrorContains(t, ErrNotFound.Error(), err)
b := uint64(100)
require.NoError(t, cc.AddTotalEffectiveBalance(st, b))
cachedB, err := cc.Get(st)
require.NoError(t, cache.AddTotalEffectiveBalance(st, b))
cachedB, err := cache.Get(st)
require.NoError(t, err)
require.Equal(t, b, cachedB)
require.NoError(t, st.SetSlot(1000))
_, err = cc.Get(st)
require.ErrorContains(t, cache.ErrNotFound.Error(), err)
_, err = cache.Get(st)
require.ErrorContains(t, ErrNotFound.Error(), err)
b = uint64(200)
require.NoError(t, cc.AddTotalEffectiveBalance(st, b))
cachedB, err = cc.Get(st)
require.NoError(t, cache.AddTotalEffectiveBalance(st, b))
cachedB, err = cache.Get(st)
require.NoError(t, err)
require.Equal(t, b, cachedB)
require.NoError(t, st.SetSlot(1000+params.BeaconConfig().SlotsPerHistoricalRoot))
_, err = cc.Get(st)
require.ErrorContains(t, cache.ErrNotFound.Error(), err)
_, err = cache.Get(st)
require.ErrorContains(t, ErrNotFound.Error(), err)
b = uint64(300)
require.NoError(t, cc.AddTotalEffectiveBalance(st, b))
cachedB, err = cc.Get(st)
require.NoError(t, cache.AddTotalEffectiveBalance(st, b))
cachedB, err = cache.Get(st)
require.NoError(t, err)
require.Equal(t, b, cachedB)
}
@@ -73,6 +72,6 @@ func TestBalanceCache_BalanceKey(t *testing.T) {
require.NoError(t, err)
require.NoError(t, st.SetSlot(primitives.Slot(math.MaxUint64)))
_, err = cache.BalanceCacheKey(st)
_, err = balanceCacheKey(st)
require.NoError(t, err)
}

View File

@@ -1,43 +0,0 @@
package cache
import (
"encoding/binary"
"fmt"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
)
// Given input state `st`, balance key is constructed as:
// (block_root in `st` at epoch_start_slot - 1) + current_epoch + validator_count
func balanceCacheKey(st state.ReadOnlyBeaconState) (string, error) {
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
currentEpoch := st.Slot().DivSlot(slotsPerEpoch)
epochStartSlot, err := slotsPerEpoch.SafeMul(uint64(currentEpoch))
if err != nil {
// impossible condition due to early division
return "", fmt.Errorf("start slot calculation overflows: %w", err)
}
prevSlot := primitives.Slot(0)
if epochStartSlot > 1 {
prevSlot = epochStartSlot - 1
}
r, err := st.BlockRootAtIndex(uint64(prevSlot % params.BeaconConfig().SlotsPerHistoricalRoot))
if err != nil {
// impossible condition because index is always constrained within state
return "", err
}
// Mix in current epoch
b := make([]byte, 8)
binary.LittleEndian.PutUint64(b, uint64(currentEpoch))
key := append(r, b...)
// Mix in validator count
b = make([]byte, 8)
binary.LittleEndian.PutUint64(b, uint64(st.NumValidators()))
key = append(key, b...)
return string(key), nil
}

View File

@@ -1,9 +1,8 @@
package cache_test
package cache
import (
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -16,7 +15,7 @@ import (
)
func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
cache := cache.NewCheckpointStateCache()
cache := NewCheckpointStateCache()
cp1 := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)}
st, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
@@ -59,16 +58,16 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
}
func TestCheckpointStateCache_MaxSize(t *testing.T) {
c := cache.NewCheckpointStateCache()
c := NewCheckpointStateCache()
st, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Slot: 0,
})
require.NoError(t, err)
for i := uint64(0); i < uint64(cache.MaxCheckpointStateSize()+100); i++ {
for i := uint64(0); i < uint64(maxCheckpointStateSize+100); i++ {
require.NoError(t, st.SetSlot(primitives.Slot(i)))
require.NoError(t, c.AddCheckpointState(&ethpb.Checkpoint{Epoch: primitives.Epoch(i), Root: make([]byte, 32)}, st))
}
assert.Equal(t, cache.MaxCheckpointStateSize(), len(c.Cache().Keys()))
assert.Equal(t, maxCheckpointStateSize, len(c.cache.Keys()))
}

View File

@@ -0,0 +1,50 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"deposits_cache.go",
"log.go",
"pending_deposits.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache",
visibility = [
"//beacon-chain:__subpackages__",
"//testing/spectest:__subpackages__",
],
deps = [
"//beacon-chain/cache:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//container/trie:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"deposits_cache_test.go",
"pending_deposits_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/cache:go_default_library",
"//config/params:go_default_library",
"//container/trie:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

View File

@@ -0,0 +1,327 @@
// Package depositcache is the source of validator deposits maintained
// in-memory by the beacon node deposits processed from the
// eth1 powchain are then stored in this cache to be accessed by
// any other service during a beacon node's runtime.
package depositcache
import (
"context"
"encoding/hex"
"math/big"
"sort"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/container/trie"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
var (
historicalDepositsCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "beacondb_all_deposits",
Help: "The number of total deposits in the beaconDB in-memory database",
})
)
// FinalizedDeposits stores the trie of deposits that have been included
// in the beacon state up to the latest finalized checkpoint.
type FinalizedDeposits struct {
deposits *trie.SparseMerkleTrie
merkleTrieIndex int64
}
// DepositCache stores all in-memory deposit objects. This
// stores all the deposit related data that is required by the beacon-node.
type DepositCache struct {
// Beacon chain deposits in memory.
pendingDeposits []*ethpb.DepositContainer
deposits []*ethpb.DepositContainer
finalizedDeposits FinalizedDeposits
depositsByKey map[[fieldparams.BLSPubkeyLength]byte][]*ethpb.DepositContainer
depositsLock sync.RWMutex
}
// New instantiates a new deposit cache
func New() (*DepositCache, error) {
finalizedDepositsTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
if err != nil {
return nil, err
}
// finalizedDeposits.merkleTrieIndex is initialized to -1 because it represents the index of the last trie item.
// Inserting the first item into the trie will set the value of the index to 0.
return &DepositCache{
pendingDeposits: []*ethpb.DepositContainer{},
deposits: []*ethpb.DepositContainer{},
depositsByKey: map[[fieldparams.BLSPubkeyLength]byte][]*ethpb.DepositContainer{},
finalizedDeposits: FinalizedDeposits{deposits: finalizedDepositsTrie, merkleTrieIndex: -1},
}, nil
}
// InsertDeposit into the database. If deposit or block number are nil
// then this method does nothing.
func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) error {
_, span := trace.StartSpan(ctx, "DepositsCache.InsertDeposit")
defer span.End()
if d == nil {
log.WithFields(logrus.Fields{
"block": blockNum,
"deposit": d,
"index": index,
"depositRoot": hex.EncodeToString(depositRoot[:]),
}).Warn("Ignoring nil deposit insertion")
return errors.New("nil deposit inserted into the cache")
}
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
if int(index) != len(dc.deposits) {
return errors.Errorf("wanted deposit with index %d to be inserted but received %d", len(dc.deposits), index)
}
// Keep the slice sorted on insertion in order to avoid costly sorting on retrieval.
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Index >= index })
depCtr := &ethpb.DepositContainer{Deposit: d, Eth1BlockHeight: blockNum, DepositRoot: depositRoot[:], Index: index}
newDeposits := append(
[]*ethpb.DepositContainer{depCtr},
dc.deposits[heightIdx:]...)
dc.deposits = append(dc.deposits[:heightIdx], newDeposits...)
// Append the deposit to our map, in the event no deposits
// exist for the pubkey , it is simply added to the map.
pubkey := bytesutil.ToBytes48(d.Data.PublicKey)
dc.depositsByKey[pubkey] = append(dc.depositsByKey[pubkey], depCtr)
historicalDepositsCount.Inc()
return nil
}
// InsertDepositContainers inserts a set of deposit containers into our deposit cache.
func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*ethpb.DepositContainer) {
_, span := trace.StartSpan(ctx, "DepositsCache.InsertDepositContainers")
defer span.End()
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
sort.SliceStable(ctrs, func(i int, j int) bool { return ctrs[i].Index < ctrs[j].Index })
dc.deposits = ctrs
for _, c := range ctrs {
// Use a new value, as the reference
// of c changes in the next iteration.
newPtr := c
pKey := bytesutil.ToBytes48(newPtr.Deposit.Data.PublicKey)
dc.depositsByKey[pKey] = append(dc.depositsByKey[pKey], newPtr)
}
historicalDepositsCount.Add(float64(len(ctrs)))
}
// InsertFinalizedDeposits inserts deposits up to eth1DepositIndex (inclusive) into the finalized deposits cache.
func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context,
eth1DepositIndex int64, _ common.Hash, _ uint64) error {
_, span := trace.StartSpan(ctx, "DepositsCache.InsertFinalizedDeposits")
defer span.End()
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
depositTrie := dc.finalizedDeposits.Deposits()
insertIndex := int(dc.finalizedDeposits.merkleTrieIndex + 1)
// Don't insert into finalized trie if there is no deposit to
// insert.
if len(dc.deposits) == 0 {
return nil
}
// In the event we have less deposits than we need to
// finalize we finalize till the index on which we do have it.
if len(dc.deposits) <= int(eth1DepositIndex) {
eth1DepositIndex = int64(len(dc.deposits)) - 1
}
// If we finalize to some lower deposit index, we
// ignore it.
if int(eth1DepositIndex) < insertIndex {
return nil
}
for _, d := range dc.deposits {
if d.Index <= dc.finalizedDeposits.merkleTrieIndex {
continue
}
if d.Index > eth1DepositIndex {
break
}
depHash, err := d.Deposit.Data.HashTreeRoot()
if err != nil {
return errors.Wrap(err, "could not hash deposit data")
}
if err = depositTrie.Insert(depHash[:], insertIndex); err != nil {
return errors.Wrap(err, "could not insert deposit hash")
}
insertIndex++
}
tree, ok := depositTrie.(*trie.SparseMerkleTrie)
if !ok {
return errors.New("not a sparse merkle tree")
}
dc.finalizedDeposits = FinalizedDeposits{
deposits: tree,
merkleTrieIndex: eth1DepositIndex,
}
return nil
}
// AllDepositContainers returns all historical deposit containers.
func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*ethpb.DepositContainer {
_, span := trace.StartSpan(ctx, "DepositsCache.AllDepositContainers")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
// Make a shallow copy of the deposits and return that. This way, the
// caller can safely iterate over the returned list of deposits without
// the possibility of new deposits showing up. If we were to return the
// list without a copy, when a new deposit is added to the cache, it
// would also be present in the returned value. This could result in a
// race condition if the list is being iterated over.
//
// It's not necessary to make a deep copy of this list because the
// deposits in the cache should never be modified. It is still possible
// for the caller to modify one of the underlying deposits and modify
// the cache, but that's not a race condition. Also, a deep copy would
// take too long and use too much memory.
deposits := make([]*ethpb.DepositContainer, len(dc.deposits))
copy(deposits, dc.deposits)
return deposits
}
// AllDeposits returns a list of historical deposits until the given block number
// (inclusive). If no block is specified then this method returns all historical deposits.
func (dc *DepositCache) AllDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
return dc.allDeposits(untilBlk)
}
func (dc *DepositCache) allDeposits(untilBlk *big.Int) []*ethpb.Deposit {
var deposits []*ethpb.Deposit
for _, ctnr := range dc.deposits {
if untilBlk == nil || untilBlk.Uint64() >= ctnr.Eth1BlockHeight {
deposits = append(deposits, ctnr.Deposit)
}
}
return deposits
}
// DepositsNumberAndRootAtHeight returns number of deposits made up to blockheight and the
// root that corresponds to the latest deposit at that blockheight.
func (dc *DepositCache) DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte) {
_, span := trace.StartSpan(ctx, "DepositsCache.DepositsNumberAndRootAtHeight")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Eth1BlockHeight > blockHeight.Uint64() })
// send the deposit root of the empty trie, if eth1follow distance is greater than the time of the earliest
// deposit.
if heightIdx == 0 {
return 0, [32]byte{}
}
return uint64(heightIdx), bytesutil.ToBytes32(dc.deposits[heightIdx-1].DepositRoot)
}
// DepositByPubkey looks through historical deposits and finds one which contains
// a certain public key within its deposit data.
func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int) {
_, span := trace.StartSpan(ctx, "DepositsCache.DepositByPubkey")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
var deposit *ethpb.Deposit
var blockNum *big.Int
deps, ok := dc.depositsByKey[bytesutil.ToBytes48(pubKey)]
if !ok || len(deps) == 0 {
return deposit, blockNum
}
// We always return the first deposit if a particular
// validator key has multiple deposits assigned to
// it.
deposit = deps[0].Deposit
blockNum = big.NewInt(int64(deps[0].Eth1BlockHeight))
return deposit, blockNum
}
// FinalizedDeposits returns the finalized deposits trie.
func (dc *DepositCache) FinalizedDeposits(ctx context.Context) (cache.FinalizedDeposits, error) {
_, span := trace.StartSpan(ctx, "DepositsCache.FinalizedDeposits")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
return &FinalizedDeposits{
deposits: dc.finalizedDeposits.deposits.Copy(),
merkleTrieIndex: dc.finalizedDeposits.merkleTrieIndex,
}, nil
}
// NonFinalizedDeposits returns the list of non-finalized deposits until the given block number (inclusive).
// If no block is specified then this method returns all non-finalized deposits.
func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit {
_, span := trace.StartSpan(ctx, "DepositsCache.NonFinalizedDeposits")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
if dc.finalizedDeposits.Deposits() == nil {
return dc.allDeposits(untilBlk)
}
var deposits []*ethpb.Deposit
for _, d := range dc.deposits {
if (d.Index > lastFinalizedIndex) && (untilBlk == nil || untilBlk.Uint64() >= d.Eth1BlockHeight) {
deposits = append(deposits, d.Deposit)
}
}
return deposits
}
// PruneProofs removes proofs from all deposits whose index is equal or less than untilDepositIndex.
func (dc *DepositCache) PruneProofs(ctx context.Context, untilDepositIndex int64) error {
_, span := trace.StartSpan(ctx, "DepositsCache.PruneProofs")
defer span.End()
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
if untilDepositIndex >= int64(len(dc.deposits)) {
untilDepositIndex = int64(len(dc.deposits) - 1)
}
for i := untilDepositIndex; i >= 0; i-- {
// Finding a nil proof means that all proofs up to this deposit have been already pruned.
if dc.deposits[i].Deposit.Proof == nil {
break
}
dc.deposits[i].Deposit.Proof = nil
}
return nil
}
// Deposits returns the cached internal deposit tree.
func (fd *FinalizedDeposits) Deposits() cache.MerkleTree {
if fd.deposits != nil {
return fd.deposits
}
return nil
}
// MerkleTrieIndex represents the last finalized index in
// the finalized deposit container.
func (fd *FinalizedDeposits) MerkleTrieIndex() int64 {
return fd.merkleTrieIndex
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,5 @@
package depositcache
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "depositcache")

View File

@@ -0,0 +1,151 @@
package depositcache
import (
"context"
"math/big"
"sort"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
var (
pendingDepositsCount = promauto.NewGauge(prometheus.GaugeOpts{
Name: "beacondb_pending_deposits",
Help: "The number of pending deposits in the beaconDB in-memory database",
})
)
// PendingDepositsFetcher specifically outlines a struct that can retrieve deposits
// which have not yet been included in the chain.
type PendingDepositsFetcher interface {
PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer
}
// InsertPendingDeposit into the database. If deposit or block number are nil
// then this method does nothing.
func (dc *DepositCache) InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) {
_, span := trace.StartSpan(ctx, "DepositsCache.InsertPendingDeposit")
defer span.End()
if d == nil {
log.WithFields(logrus.Fields{
"block": blockNum,
"deposit": d,
}).Debug("Ignoring nil deposit insertion")
return
}
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
dc.pendingDeposits = append(dc.pendingDeposits,
&ethpb.DepositContainer{Deposit: d, Eth1BlockHeight: blockNum, Index: index, DepositRoot: depositRoot[:]})
pendingDepositsCount.Inc()
span.AddAttributes(trace.Int64Attribute("count", int64(len(dc.pendingDeposits))))
}
// PendingDeposits returns a list of deposits until the given block number
// (inclusive). If no block is specified then this method returns all pending
// deposits.
func (dc *DepositCache) PendingDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
ctx, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
defer span.End()
depositCntrs := dc.PendingContainers(ctx, untilBlk)
deposits := make([]*ethpb.Deposit, 0, len(depositCntrs))
for _, dep := range depositCntrs {
deposits = append(deposits, dep.Deposit)
}
return deposits
}
// PendingContainers returns a list of deposit containers until the given block number
// (inclusive).
func (dc *DepositCache) PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer {
_, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
depositCntrs := make([]*ethpb.DepositContainer, 0, len(dc.pendingDeposits))
for _, ctnr := range dc.pendingDeposits {
if untilBlk == nil || untilBlk.Uint64() >= ctnr.Eth1BlockHeight {
depositCntrs = append(depositCntrs, ctnr)
}
}
// Sort the deposits by Merkle index.
sort.SliceStable(depositCntrs, func(i, j int) bool {
return depositCntrs[i].Index < depositCntrs[j].Index
})
span.AddAttributes(trace.Int64Attribute("count", int64(len(depositCntrs))))
return depositCntrs
}
// RemovePendingDeposit from the database. The deposit is indexed by the
// Index. This method does nothing if deposit ptr is nil.
func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Deposit) {
_, span := trace.StartSpan(ctx, "DepositsCache.RemovePendingDeposit")
defer span.End()
if d == nil {
log.Debug("Ignoring nil deposit removal")
return
}
depRoot, err := hash.Proto(d)
if err != nil {
log.WithError(err).Error("Could not remove deposit")
return
}
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
idx := -1
for i, ctnr := range dc.pendingDeposits {
h, err := hash.Proto(ctnr.Deposit)
if err != nil {
log.WithError(err).Error("Could not hash deposit")
continue
}
if h == depRoot {
idx = i
break
}
}
if idx >= 0 {
dc.pendingDeposits = append(dc.pendingDeposits[:idx], dc.pendingDeposits[idx+1:]...)
pendingDepositsCount.Dec()
}
}
// PrunePendingDeposits removes any deposit which is older than the given deposit merkle tree index.
func (dc *DepositCache) PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64) {
_, span := trace.StartSpan(ctx, "DepositsCache.PrunePendingDeposits")
defer span.End()
if merkleTreeIndex == 0 {
log.Debug("Ignoring 0 deposit removal")
return
}
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
cleanDeposits := make([]*ethpb.DepositContainer, 0, len(dc.pendingDeposits))
for _, dp := range dc.pendingDeposits {
if dp.Index >= merkleTreeIndex {
cleanDeposits = append(cleanDeposits, dp)
}
}
dc.pendingDeposits = cleanDeposits
pendingDepositsCount.Set(float64(len(dc.pendingDeposits)))
}

View File

@@ -1,32 +1,82 @@
package depositsnapshot
package depositcache
import (
"context"
"math/big"
"testing"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"google.golang.org/protobuf/proto"
)
var _ PendingDepositsFetcher = (*Cache)(nil)
var _ PendingDepositsFetcher = (*DepositCache)(nil)
func TestInsertPendingDeposit_OK(t *testing.T) {
dc := Cache{}
dc := DepositCache{}
dc.InsertPendingDeposit(context.Background(), &ethpb.Deposit{}, 111, 100, [32]byte{})
assert.Equal(t, 1, len(dc.pendingDeposits), "deposit not inserted")
}
func TestInsertPendingDeposit_ignoresNilDeposit(t *testing.T) {
dc := Cache{}
dc := DepositCache{}
dc.InsertPendingDeposit(context.Background(), nil /*deposit*/, 0 /*blockNum*/, 0, [32]byte{})
assert.Equal(t, 0, len(dc.pendingDeposits))
}
func TestRemovePendingDeposit_OK(t *testing.T) {
db := DepositCache{}
proof1 := makeDepositProof()
proof1[0] = bytesutil.PadTo([]byte{'A'}, 32)
proof2 := makeDepositProof()
proof2[0] = bytesutil.PadTo([]byte{'A'}, 32)
data := &ethpb.Deposit_Data{
PublicKey: make([]byte, 48),
WithdrawalCredentials: make([]byte, 32),
Amount: 0,
Signature: make([]byte, 96),
}
depToRemove := &ethpb.Deposit{Proof: proof1, Data: data}
otherDep := &ethpb.Deposit{Proof: proof2, Data: data}
db.pendingDeposits = []*ethpb.DepositContainer{
{Deposit: depToRemove, Index: 1},
{Deposit: otherDep, Index: 5},
}
db.RemovePendingDeposit(context.Background(), depToRemove)
if len(db.pendingDeposits) != 1 || !proto.Equal(db.pendingDeposits[0].Deposit, otherDep) {
t.Error("Failed to remove deposit")
}
}
func TestRemovePendingDeposit_IgnoresNilDeposit(t *testing.T) {
dc := DepositCache{}
dc.pendingDeposits = []*ethpb.DepositContainer{{Deposit: &ethpb.Deposit{}}}
dc.RemovePendingDeposit(context.Background(), nil /*deposit*/)
assert.Equal(t, 1, len(dc.pendingDeposits), "deposit unexpectedly removed")
}
func TestPendingDeposit_RoundTrip(t *testing.T) {
dc := DepositCache{}
proof := makeDepositProof()
proof[0] = bytesutil.PadTo([]byte{'A'}, 32)
data := &ethpb.Deposit_Data{
PublicKey: make([]byte, 48),
WithdrawalCredentials: make([]byte, 32),
Amount: 0,
Signature: make([]byte, 96),
}
dep := &ethpb.Deposit{Proof: proof, Data: data}
dc.InsertPendingDeposit(context.Background(), dep, 111, 100, [32]byte{})
dc.RemovePendingDeposit(context.Background(), dep)
assert.Equal(t, 0, len(dc.pendingDeposits), "Failed to insert & delete a pending deposit")
}
func TestPendingDeposits_OK(t *testing.T) {
dc := Cache{}
dc := DepositCache{}
dc.pendingDeposits = []*ethpb.DepositContainer{
{Eth1BlockHeight: 2, Deposit: &ethpb.Deposit{Proof: [][]byte{[]byte("A")}}},
@@ -46,7 +96,7 @@ func TestPendingDeposits_OK(t *testing.T) {
}
func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
dc := Cache{}
dc := DepositCache{}
dc.pendingDeposits = []*ethpb.DepositContainer{
{Eth1BlockHeight: 2, Index: 2},
@@ -70,7 +120,7 @@ func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
}
func TestPrunePendingDeposits_OK(t *testing.T) {
dc := Cache{}
dc := DepositCache{}
dc.pendingDeposits = []*ethpb.DepositContainer{
{Eth1BlockHeight: 2, Index: 2},

View File

@@ -34,7 +34,6 @@ go_test(
name = "go_default_test",
srcs = [
"deposit_cache_test.go",
"deposit_fetcher_test.go",
"deposit_tree_snapshot_test.go",
"merkle_tree_test.go",
"spec_test.go",

View File

@@ -784,7 +784,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
depositTrie, err := trie.GenerateTrieFromItems(trieItems, params.BeaconConfig().DepositContractTreeDepth)
assert.NoError(t, err)
// Perform this in a nonsensical ordering
// Perform this in a non-sensical ordering
err = dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0)
require.NoError(t, err)
err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0)

View File

@@ -262,12 +262,6 @@ func toFinalizedDepositsContainer(deposits *DepositTree, index int64) finalizedD
}
}
// PendingDepositsFetcher specifically outlines a struct that can retrieve deposits
// which have not yet been included in the chain.
type PendingDepositsFetcher interface {
PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer
}
// PendingDeposits returns a list of deposits until the given block number
// (inclusive). If no block is specified then this method returns all pending
// deposits.

View File

@@ -99,23 +99,11 @@ func (d *DepositTree) getProof(index uint64) ([32]byte, [][32]byte, error) {
if d.depositCount <= 0 {
return [32]byte{}, nil, ErrInvalidDepositCount
}
if index >= d.depositCount {
return [32]byte{}, nil, ErrInvalidIndex
}
finalizedDeposits, _ := d.tree.GetFinalized([][32]byte{})
finalizedIdx := -1
if finalizedDeposits != 0 {
fd, err := math.Int(finalizedDeposits)
if err != nil {
return [32]byte{}, nil, err
}
finalizedIdx = fd - 1
finalizedDeposits = finalizedDeposits - 1
}
i, err := math.Int(index)
if err != nil {
return [32]byte{}, nil, err
}
if finalizedDeposits > 0 && i <= finalizedIdx {
if index <= finalizedDeposits {
return [32]byte{}, nil, ErrInvalidIndex
}
leaf, proof := generateProof(d.tree, index, DepositContractDepth)

View File

@@ -1,18 +0,0 @@
package cache
import (
lru "github.com/hashicorp/golang-lru"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
)
func BalanceCacheKey(st state.ReadOnlyBeaconState) (string, error) {
return balanceCacheKey(st)
}
func MaxCheckpointStateSize() int {
return maxCheckpointStateSize
}
func (c *CheckpointStateCache) Cache() *lru.Cache {
return c.cache
}

View File

@@ -1,9 +1,8 @@
package cache_test
package cache
import (
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -126,7 +125,7 @@ func TestSyncCommitteeHeadState(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := cache.NewSyncCommitteeHeadState()
c := NewSyncCommitteeHeadState()
if tt.put != nil {
err := c.Put(tt.put.slot, tt.put.state)
if (err != nil) != tt.wantPutErr {

View File

@@ -48,7 +48,7 @@ func ProcessAttestationsNoVerifySignature(
func ProcessAttestationNoVerifySignature(
ctx context.Context,
beaconState state.BeaconState,
att ethpb.Att,
att *ethpb.Attestation,
totalBalance uint64,
) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "altair.ProcessAttestationNoVerifySignature")
@@ -58,24 +58,24 @@ func ProcessAttestationNoVerifySignature(
return nil, err
}
delay, err := beaconState.Slot().SafeSubSlot(att.GetData().Slot)
delay, err := beaconState.Slot().SafeSubSlot(att.Data.Slot)
if err != nil {
return nil, fmt.Errorf("att slot %d can't be greater than state slot %d", att.GetData().Slot, beaconState.Slot())
return nil, fmt.Errorf("att slot %d can't be greater than state slot %d", att.Data.Slot, beaconState.Slot())
}
participatedFlags, err := AttestationParticipationFlagIndices(beaconState, att.GetData(), delay)
participatedFlags, err := AttestationParticipationFlagIndices(beaconState, att.Data, delay)
if err != nil {
return nil, err
}
committees, err := helpers.AttestationCommittees(ctx, beaconState, att)
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.Data.Slot, att.Data.CommitteeIndex)
if err != nil {
return nil, err
}
indices, err := attestation.AttestingIndices(att, committees...)
indices, err := attestation.AttestingIndices(att.AggregationBits, committee)
if err != nil {
return nil, err
}
return SetParticipationAndRewardProposer(ctx, beaconState, att.GetData().Target.Epoch, indices, participatedFlags, totalBalance)
return SetParticipationAndRewardProposer(ctx, beaconState, att.Data.Target.Epoch, indices, participatedFlags, totalBalance)
}
// SetParticipationAndRewardProposer retrieves and sets the epoch participation bits in state. Based on the epoch participation, it rewards

View File

@@ -195,95 +195,47 @@ func TestProcessAttestations_InvalidAggregationBitsLength(t *testing.T) {
}
func TestProcessAttestations_OK(t *testing.T) {
t.Run("pre-Electra", func(t *testing.T) {
beaconState, privKeys := util.DeterministicGenesisStateAltair(t, 100)
beaconState, privKeys := util.DeterministicGenesisStateAltair(t, 100)
aggBits := bitfield.NewBitlist(3)
aggBits.SetBitAt(0, true)
var mockRoot [32]byte
copy(mockRoot[:], "hello-world")
att := util.HydrateAttestation(&ethpb.Attestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Root: mockRoot[:]},
Target: &ethpb.Checkpoint{Root: mockRoot[:]},
},
AggregationBits: aggBits,
})
cfc := beaconState.CurrentJustifiedCheckpoint()
cfc.Root = mockRoot[:]
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, 0)
require.NoError(t, err)
attestingIndices, err := attestation.AttestingIndices(att, committee)
require.NoError(t, err)
sigs := make([]bls.Signature, len(attestingIndices))
for i, indice := range attestingIndices {
sb, err := signing.ComputeDomainAndSign(beaconState, 0, att.Data, params.BeaconConfig().DomainBeaconAttester, privKeys[indice])
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
sigs[i] = sig
}
att.Signature = bls.AggregateSignatures(sigs).Marshal()
block := util.NewBeaconBlockAltair()
block.Block.Body.Attestations = []*ethpb.Attestation{att}
err = beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
require.NoError(t, err)
aggBits := bitfield.NewBitlist(3)
aggBits.SetBitAt(0, true)
var mockRoot [32]byte
copy(mockRoot[:], "hello-world")
att := util.HydrateAttestation(&ethpb.Attestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Root: mockRoot[:]},
Target: &ethpb.Checkpoint{Root: mockRoot[:]},
},
AggregationBits: aggBits,
})
t.Run("post-Electra", func(t *testing.T) {
beaconState, privKeys := util.DeterministicGenesisStateElectra(t, 100)
aggBits := bitfield.NewBitlist(3)
aggBits.SetBitAt(0, true)
committeeBits := primitives.NewAttestationCommitteeBits()
committeeBits.SetBitAt(0, true)
var mockRoot [32]byte
copy(mockRoot[:], "hello-world")
att := util.HydrateAttestationElectra(&ethpb.AttestationElectra{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Root: mockRoot[:]},
Target: &ethpb.Checkpoint{Root: mockRoot[:]},
},
AggregationBits: aggBits,
CommitteeBits: committeeBits,
})
cfc := beaconState.CurrentJustifiedCheckpoint()
cfc.Root = mockRoot[:]
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
cfc := beaconState.CurrentJustifiedCheckpoint()
cfc.Root = mockRoot[:]
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, att.Data.CommitteeIndex)
require.NoError(t, err)
attestingIndices, err := attestation.AttestingIndices(att.AggregationBits, committee)
require.NoError(t, err)
sigs := make([]bls.Signature, len(attestingIndices))
for i, indice := range attestingIndices {
sb, err := signing.ComputeDomainAndSign(beaconState, 0, att.Data, params.BeaconConfig().DomainBeaconAttester, privKeys[indice])
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
sigs[i] = sig
}
att.Signature = bls.AggregateSignatures(sigs).Marshal()
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, 0)
require.NoError(t, err)
attestingIndices, err := attestation.AttestingIndices(att, committee)
require.NoError(t, err)
sigs := make([]bls.Signature, len(attestingIndices))
for i, indice := range attestingIndices {
sb, err := signing.ComputeDomainAndSign(beaconState, 0, att.Data, params.BeaconConfig().DomainBeaconAttester, privKeys[indice])
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
sigs[i] = sig
}
att.Signature = bls.AggregateSignatures(sigs).Marshal()
block := util.NewBeaconBlockAltair()
block.Block.Body.Attestations = []*ethpb.Attestation{att}
block := util.NewBeaconBlockElectra()
block.Block.Body.Attestations = []*ethpb.AttestationElectra{att}
err = beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
require.NoError(t, err)
})
err = beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
require.NoError(t, err)
}
func TestProcessAttestationNoVerify_SourceTargetHead(t *testing.T) {
@@ -321,7 +273,7 @@ func TestProcessAttestationNoVerify_SourceTargetHead(t *testing.T) {
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, att.Data.CommitteeIndex)
require.NoError(t, err)
indices, err := attestation.AttestingIndices(att, committee)
indices, err := attestation.AttestingIndices(att.AggregationBits, committee)
require.NoError(t, err)
for _, index := range indices {
has, err := altair.HasValidatorFlag(p[index], params.BeaconConfig().TimelyHeadFlagIndex)

View File

@@ -28,87 +28,87 @@ import (
// process_historical_roots_update(state)
// process_participation_flag_updates(state) # [New in Altair]
// process_sync_committee_updates(state) # [New in Altair]
func ProcessEpoch(ctx context.Context, state state.BeaconState) error {
func ProcessEpoch(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "altair.ProcessEpoch")
defer span.End()
if state == nil || state.IsNil() {
return errors.New("nil state")
return nil, errors.New("nil state")
}
vp, bp, err := InitializePrecomputeValidators(ctx, state)
if err != nil {
return err
return nil, err
}
// New in Altair.
vp, bp, err = ProcessEpochParticipation(ctx, state, bp, vp)
if err != nil {
return err
return nil, err
}
state, err = precompute.ProcessJustificationAndFinalizationPreCompute(state, bp)
if err != nil {
return errors.Wrap(err, "could not process justification")
return nil, errors.Wrap(err, "could not process justification")
}
// New in Altair.
state, vp, err = ProcessInactivityScores(ctx, state, vp)
if err != nil {
return errors.Wrap(err, "could not process inactivity updates")
return nil, errors.Wrap(err, "could not process inactivity updates")
}
// New in Altair.
state, err = ProcessRewardsAndPenaltiesPrecompute(state, bp, vp)
if err != nil {
return errors.Wrap(err, "could not process rewards and penalties")
return nil, errors.Wrap(err, "could not process rewards and penalties")
}
state, err = e.ProcessRegistryUpdates(ctx, state)
if err != nil {
return errors.Wrap(err, "could not process registry updates")
return nil, errors.Wrap(err, "could not process registry updates")
}
// Modified in Altair and Bellatrix.
proportionalSlashingMultiplier, err := state.ProportionalSlashingMultiplier()
if err != nil {
return err
return nil, err
}
state, err = e.ProcessSlashings(state, proportionalSlashingMultiplier)
if err != nil {
return err
return nil, err
}
state, err = e.ProcessEth1DataReset(state)
if err != nil {
return err
return nil, err
}
state, err = e.ProcessEffectiveBalanceUpdates(state)
if err != nil {
return err
return nil, err
}
state, err = e.ProcessSlashingsReset(state)
if err != nil {
return err
return nil, err
}
state, err = e.ProcessRandaoMixesReset(state)
if err != nil {
return err
return nil, err
}
state, err = e.ProcessHistoricalDataUpdate(state)
if err != nil {
return err
return nil, err
}
// New in Altair.
state, err = ProcessParticipationFlagUpdates(state)
if err != nil {
return err
return nil, err
}
// New in Altair.
_, err = ProcessSyncCommitteeUpdates(ctx, state)
state, err = ProcessSyncCommitteeUpdates(ctx, state)
if err != nil {
return err
return nil, err
}
return nil
return state, nil
}

View File

@@ -13,9 +13,9 @@ import (
func TestProcessEpoch_CanProcess(t *testing.T) {
st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, st.SetSlot(10*params.BeaconConfig().SlotsPerEpoch))
err := altair.ProcessEpoch(context.Background(), st)
newState, err := altair.ProcessEpoch(context.Background(), st)
require.NoError(t, err)
require.Equal(t, uint64(0), st.Slashings()[2], "Unexpected slashed balance")
require.Equal(t, uint64(0), newState.Slashings()[2], "Unexpected slashed balance")
b := st.Balances()
require.Equal(t, params.BeaconConfig().MaxValidatorsPerCommittee, uint64(len(b)))
@@ -45,9 +45,9 @@ func TestProcessEpoch_CanProcess(t *testing.T) {
func TestProcessEpoch_CanProcessBellatrix(t *testing.T) {
st, _ := util.DeterministicGenesisStateBellatrix(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, st.SetSlot(10*params.BeaconConfig().SlotsPerEpoch))
err := altair.ProcessEpoch(context.Background(), st)
newState, err := altair.ProcessEpoch(context.Background(), st)
require.NoError(t, err)
require.Equal(t, uint64(0), st.Slashings()[2], "Unexpected slashed balance")
require.Equal(t, uint64(0), newState.Slashings()[2], "Unexpected slashed balance")
b := st.Balances()
require.Equal(t, params.BeaconConfig().MaxValidatorsPerCommittee, uint64(len(b)))

View File

@@ -154,11 +154,11 @@ func TranslateParticipation(ctx context.Context, state state.BeaconState, atts [
if err != nil {
return nil, err
}
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.GetData().Slot, att.GetData().CommitteeIndex)
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.Data.Slot, att.Data.CommitteeIndex)
if err != nil {
return nil, err
}
indices, err := attestation.AttestingIndices(att, committee)
indices, err := attestation.AttestingIndices(att.AggregationBits, committee)
if err != nil {
return nil, err
}

View File

@@ -55,7 +55,7 @@ func TestTranslateParticipation(t *testing.T) {
committee, err := helpers.BeaconCommitteeFromState(ctx, s, pendingAtts[0].Data.Slot, pendingAtts[0].Data.CommitteeIndex)
require.NoError(t, err)
indices, err := attestation.AttestingIndices(pendingAtts[0], committee)
indices, err := attestation.AttestingIndices(pendingAtts[0].AggregationBits, committee)
require.NoError(t, err)
for _, index := range indices {
has, err := altair.HasValidatorFlag(participation[index], params.BeaconConfig().TimelySourceFlagIndex)

View File

@@ -46,7 +46,7 @@ func ProcessAttestationsNoVerifySignature(
func VerifyAttestationNoVerifySignature(
ctx context.Context,
beaconState state.ReadOnlyBeaconState,
att ethpb.Att,
att *ethpb.Attestation,
) error {
ctx, span := trace.StartSpan(ctx, "core.VerifyAttestationNoVerifySignature")
defer span.End()
@@ -56,7 +56,7 @@ func VerifyAttestationNoVerifySignature(
}
currEpoch := time.CurrentEpoch(beaconState)
prevEpoch := time.PrevEpoch(beaconState)
data := att.GetData()
data := att.Data
if data.Target.Epoch != prevEpoch && data.Target.Epoch != currEpoch {
return fmt.Errorf(
"expected target epoch (%d) to be the previous epoch (%d) or the current epoch (%d)",
@@ -76,11 +76,11 @@ func VerifyAttestationNoVerifySignature(
}
}
if err := helpers.ValidateSlotTargetEpoch(att.GetData()); err != nil {
if err := helpers.ValidateSlotTargetEpoch(att.Data); err != nil {
return err
}
s := att.GetData().Slot
s := att.Data.Slot
minInclusionCheck := s+params.BeaconConfig().MinAttestationInclusionDelay <= beaconState.Slot()
if !minInclusionCheck {
return fmt.Errorf(
@@ -102,58 +102,27 @@ func VerifyAttestationNoVerifySignature(
)
}
}
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, beaconState, att.GetData().Target.Epoch)
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, beaconState, att.Data.Target.Epoch)
if err != nil {
return err
}
c := helpers.SlotCommitteeCount(activeValidatorCount)
if uint64(att.Data.CommitteeIndex) >= c {
return fmt.Errorf("committee index %d >= committee count %d", att.Data.CommitteeIndex, c)
}
var indexedAtt ethpb.IndexedAtt
if err := helpers.VerifyAttestationBitfieldLengths(ctx, beaconState, att); err != nil {
return errors.Wrap(err, "could not verify attestation bitfields")
}
if att.Version() < version.Electra {
if uint64(att.GetData().CommitteeIndex) >= c {
return fmt.Errorf("committee index %d >= committee count %d", att.GetData().CommitteeIndex, c)
}
if err = helpers.VerifyAttestationBitfieldLengths(ctx, beaconState, att); err != nil {
return errors.Wrap(err, "could not verify attestation bitfields")
}
// Verify attesting indices are correct.
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.GetData().Slot, att.GetData().CommitteeIndex)
if err != nil {
return err
}
indexedAtt, err = attestation.ConvertToIndexed(ctx, att, committee)
if err != nil {
return err
}
} else {
if att.GetData().CommitteeIndex != 0 {
return errors.New("committee index must be 0 post-Electra")
}
committeeIndices := att.CommitteeBitsVal().BitIndices()
committees := make([][]primitives.ValidatorIndex, len(committeeIndices))
participantsCount := 0
var err error
for i, ci := range committeeIndices {
if uint64(ci) >= c {
return fmt.Errorf("committee index %d >= committee count %d", ci, c)
}
committees[i], err = helpers.BeaconCommitteeFromState(ctx, beaconState, att.GetData().Slot, primitives.CommitteeIndex(ci))
if err != nil {
return err
}
participantsCount += len(committees[i])
}
if att.GetAggregationBits().Len() != uint64(participantsCount) {
return fmt.Errorf("aggregation bits count %d is different than participant count %d", att.GetAggregationBits().Len(), participantsCount)
}
indexedAtt, err = attestation.ConvertToIndexed(ctx, att, committees...)
if err != nil {
return err
}
// Verify attesting indices are correct.
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.Data.Slot, att.Data.CommitteeIndex)
if err != nil {
return err
}
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committee)
if err != nil {
return err
}
return attestation.IsValidAttestationIndices(ctx, indexedAtt)
@@ -164,7 +133,7 @@ func VerifyAttestationNoVerifySignature(
func ProcessAttestationNoVerifySignature(
ctx context.Context,
beaconState state.BeaconState,
att ethpb.Att,
att *ethpb.Attestation,
) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "core.ProcessAttestationNoVerifySignature")
defer span.End()
@@ -174,15 +143,15 @@ func ProcessAttestationNoVerifySignature(
}
currEpoch := time.CurrentEpoch(beaconState)
data := att.GetData()
s := att.GetData().Slot
data := att.Data
s := att.Data.Slot
proposerIndex, err := helpers.BeaconProposerIndex(ctx, beaconState)
if err != nil {
return nil, err
}
pendingAtt := &ethpb.PendingAttestation{
Data: data,
AggregationBits: att.GetAggregationBits(),
AggregationBits: att.AggregationBits,
InclusionDelay: beaconState.Slot() - s,
ProposerIndex: proposerIndex,
}
@@ -200,6 +169,23 @@ func ProcessAttestationNoVerifySignature(
return beaconState, nil
}
// VerifyAttestationSignature converts and attestation into an indexed attestation and verifies
// the signature in that attestation.
func VerifyAttestationSignature(ctx context.Context, beaconState state.ReadOnlyBeaconState, att *ethpb.Attestation) error {
if err := helpers.ValidateNilAttestation(att); err != nil {
return err
}
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.Data.Slot, att.Data.CommitteeIndex)
if err != nil {
return err
}
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committee)
if err != nil {
return err
}
return VerifyIndexedAttestation(ctx, beaconState, indexedAtt)
}
// VerifyIndexedAttestation determines the validity of an indexed attestation.
//
// Spec pseudocode definition:
@@ -217,7 +203,7 @@ func ProcessAttestationNoVerifySignature(
// domain = get_domain(state, DOMAIN_BEACON_ATTESTER, indexed_attestation.data.target.epoch)
// signing_root = compute_signing_root(indexed_attestation.data, domain)
// return bls.FastAggregateVerify(pubkeys, signing_root, indexed_attestation.signature)
func VerifyIndexedAttestation(ctx context.Context, beaconState state.ReadOnlyBeaconState, indexedAtt ethpb.IndexedAtt) error {
func VerifyIndexedAttestation(ctx context.Context, beaconState state.ReadOnlyBeaconState, indexedAtt *ethpb.IndexedAttestation) error {
ctx, span := trace.StartSpan(ctx, "core.VerifyIndexedAttestation")
defer span.End()
@@ -226,14 +212,14 @@ func VerifyIndexedAttestation(ctx context.Context, beaconState state.ReadOnlyBea
}
domain, err := signing.Domain(
beaconState.Fork(),
indexedAtt.GetData().Target.Epoch,
indexedAtt.Data.Target.Epoch,
params.BeaconConfig().DomainBeaconAttester,
beaconState.GenesisValidatorsRoot(),
)
if err != nil {
return err
}
indices := indexedAtt.GetAttestingIndices()
indices := indexedAtt.AttestingIndices
var pubkeys []bls.PublicKey
for i := 0; i < len(indices); i++ {
pubkeyAtIdx := beaconState.PubkeyAtIndex(primitives.ValidatorIndex(indices[i]))

View File

@@ -44,7 +44,7 @@ func TestProcessAggregatedAttestation_OverlappingBits(t *testing.T) {
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att1.Data.Slot, att1.Data.CommitteeIndex)
require.NoError(t, err)
attestingIndices1, err := attestation.AttestingIndices(att1, committee)
attestingIndices1, err := attestation.AttestingIndices(att1.AggregationBits, committee)
require.NoError(t, err)
sigs := make([]bls.Signature, len(attestingIndices1))
for i, indice := range attestingIndices1 {
@@ -66,7 +66,7 @@ func TestProcessAggregatedAttestation_OverlappingBits(t *testing.T) {
committee, err = helpers.BeaconCommitteeFromState(context.Background(), beaconState, att2.Data.Slot, att2.Data.CommitteeIndex)
require.NoError(t, err)
attestingIndices2, err := attestation.AttestingIndices(att2, committee)
attestingIndices2, err := attestation.AttestingIndices(att2.AggregationBits, committee)
require.NoError(t, err)
sigs = make([]bls.Signature, len(attestingIndices2))
for i, indice := range attestingIndices2 {
@@ -221,83 +221,6 @@ func TestVerifyAttestationNoVerifySignature_BadAttIdx(t *testing.T) {
require.ErrorContains(t, "committee index 100 >= committee count 1", err)
}
func TestVerifyAttestationNoVerifySignature_Electra(t *testing.T) {
var mockRoot [32]byte
copy(mockRoot[:], "hello-world")
var zeroSig [fieldparams.BLSSignatureLength]byte
beaconState, _ := util.DeterministicGenesisState(t, 100)
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
require.NoError(t, err)
ckp := beaconState.CurrentJustifiedCheckpoint()
copy(ckp.Root, "hello-world")
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(ckp))
require.NoError(t, beaconState.AppendCurrentEpochAttestations(&ethpb.PendingAttestation{}))
t.Run("ok", func(t *testing.T) {
aggBits := bitfield.NewBitlist(3)
aggBits.SetBitAt(1, true)
committeeBits := bitfield.NewBitvector64()
committeeBits.SetBitAt(0, true)
att := &ethpb.AttestationElectra{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
},
AggregationBits: aggBits,
CommitteeBits: committeeBits,
}
att.Signature = zeroSig[:]
err = blocks.VerifyAttestationNoVerifySignature(context.TODO(), beaconState, att)
assert.NoError(t, err)
})
t.Run("non-zero committee index", func(t *testing.T) {
att := &ethpb.AttestationElectra{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
CommitteeIndex: 1,
},
AggregationBits: bitfield.NewBitlist(1),
CommitteeBits: bitfield.NewBitvector64(),
}
err = blocks.VerifyAttestationNoVerifySignature(context.TODO(), beaconState, att)
assert.ErrorContains(t, "committee index must be 0 post-Electra", err)
})
t.Run("index of committee too big", func(t *testing.T) {
aggBits := bitfield.NewBitlist(3)
committeeBits := bitfield.NewBitvector64()
committeeBits.SetBitAt(63, true)
att := &ethpb.AttestationElectra{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
},
AggregationBits: aggBits,
CommitteeBits: committeeBits,
}
att.Signature = zeroSig[:]
err = blocks.VerifyAttestationNoVerifySignature(context.TODO(), beaconState, att)
assert.ErrorContains(t, "committee index 63 >= committee count 1", err)
})
t.Run("wrong aggregation bits count", func(t *testing.T) {
aggBits := bitfield.NewBitlist(123)
committeeBits := bitfield.NewBitvector64()
committeeBits.SetBitAt(0, true)
att := &ethpb.AttestationElectra{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
},
AggregationBits: aggBits,
CommitteeBits: committeeBits,
}
att.Signature = zeroSig[:]
err = blocks.VerifyAttestationNoVerifySignature(context.TODO(), beaconState, att)
assert.ErrorContains(t, "aggregation bits count 123 is different than participant count 3", err)
})
}
func TestConvertToIndexed_OK(t *testing.T) {
helpers.ClearCache()
validators := make([]*ethpb.Validator, 2*params.BeaconConfig().SlotsPerEpoch)
@@ -463,7 +386,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
sig := keys[0].Sign([]byte{'t', 'e', 's', 't'})
list := bitfield.Bitlist{0b11111}
var atts []ethpb.Att
var atts []*ethpb.Attestation
for i := uint64(0); i < 1000; i++ {
atts = append(atts, &ethpb.Attestation{
Data: &ethpb.AttestationData{
@@ -479,7 +402,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
_, err := blocks.AttestationSignatureBatch(context.Background(), beaconState, atts)
assert.ErrorContains(t, want, err)
atts = []ethpb.Att{}
atts = []*ethpb.Attestation{}
list = bitfield.Bitlist{0b10000}
for i := uint64(0); i < 1000; i++ {
atts = append(atts, &ethpb.Attestation{
@@ -578,109 +501,53 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing
}
}
t.Run("pre-Electra", func(t *testing.T) {
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(5))
require.NoError(t, st.SetValidators(validators))
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(5))
require.NoError(t, st.SetValidators(validators))
comm1, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 0 /*committeeIndex*/)
require.NoError(t, err)
att1 := util.HydrateAttestation(&ethpb.Attestation{
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
Data: &ethpb.AttestationData{
Slot: 1,
},
})
domain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
require.NoError(t, err)
root, err := signing.ComputeSigningRoot(att1.Data, domain)
require.NoError(t, err)
var sigs []bls.Signature
for i, u := range comm1 {
att1.AggregationBits.SetBitAt(uint64(i), true)
sigs = append(sigs, keys[u].Sign(root[:]))
}
att1.Signature = bls.AggregateSignatures(sigs).Marshal()
comm2, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 1 /*committeeIndex*/)
require.NoError(t, err)
att2 := util.HydrateAttestation(&ethpb.Attestation{
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 1,
},
})
root, err = signing.ComputeSigningRoot(att2.Data, domain)
require.NoError(t, err)
sigs = nil
for i, u := range comm2 {
att2.AggregationBits.SetBitAt(uint64(i), true)
sigs = append(sigs, keys[u].Sign(root[:]))
}
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
set, err := blocks.AttestationSignatureBatch(ctx, st, []ethpb.Att{att1, att2})
require.NoError(t, err)
verified, err := set.Verify()
require.NoError(t, err)
assert.Equal(t, true, verified, "Multiple signatures were unable to be verified.")
comm1, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 0 /*committeeIndex*/)
require.NoError(t, err)
att1 := util.HydrateAttestation(&ethpb.Attestation{
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
Data: &ethpb.AttestationData{
Slot: 1,
},
})
t.Run("post-Electra", func(t *testing.T) {
st, err := util.NewBeaconStateElectra()
require.NoError(t, err)
require.NoError(t, st.SetSlot(5))
require.NoError(t, st.SetValidators(validators))
domain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
require.NoError(t, err)
root, err := signing.ComputeSigningRoot(att1.Data, domain)
require.NoError(t, err)
var sigs []bls.Signature
for i, u := range comm1 {
att1.AggregationBits.SetBitAt(uint64(i), true)
sigs = append(sigs, keys[u].Sign(root[:]))
}
att1.Signature = bls.AggregateSignatures(sigs).Marshal()
comm1, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 0 /*committeeIndex*/)
require.NoError(t, err)
commBits1 := primitives.NewAttestationCommitteeBits()
commBits1.SetBitAt(0, true)
att1 := util.HydrateAttestationElectra(&ethpb.AttestationElectra{
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
CommitteeBits: commBits1,
Data: &ethpb.AttestationData{
Slot: 1,
},
})
domain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
require.NoError(t, err)
root, err := signing.ComputeSigningRoot(att1.Data, domain)
require.NoError(t, err)
var sigs []bls.Signature
for i, u := range comm1 {
att1.AggregationBits.SetBitAt(uint64(i), true)
sigs = append(sigs, keys[u].Sign(root[:]))
}
att1.Signature = bls.AggregateSignatures(sigs).Marshal()
comm2, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 1 /*committeeIndex*/)
require.NoError(t, err)
commBits2 := primitives.NewAttestationCommitteeBits()
commBits2.SetBitAt(1, true)
att2 := util.HydrateAttestationElectra(&ethpb.AttestationElectra{
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
CommitteeBits: commBits2,
Data: &ethpb.AttestationData{
Slot: 1,
},
})
root, err = signing.ComputeSigningRoot(att2.Data, domain)
require.NoError(t, err)
sigs = nil
for i, u := range comm2 {
att2.AggregationBits.SetBitAt(uint64(i), true)
sigs = append(sigs, keys[u].Sign(root[:]))
}
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
set, err := blocks.AttestationSignatureBatch(ctx, st, []ethpb.Att{att1, att2})
require.NoError(t, err)
verified, err := set.Verify()
require.NoError(t, err)
assert.Equal(t, true, verified, "Multiple signatures were unable to be verified.")
comm2, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 1 /*committeeIndex*/)
require.NoError(t, err)
att2 := util.HydrateAttestation(&ethpb.Attestation{
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 1,
},
})
root, err = signing.ComputeSigningRoot(att2.Data, domain)
require.NoError(t, err)
sigs = nil
for i, u := range comm2 {
att2.AggregationBits.SetBitAt(uint64(i), true)
sigs = append(sigs, keys[u].Sign(root[:]))
}
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
set, err := blocks.AttestationSignatureBatch(ctx, st, []*ethpb.Attestation{att1, att2})
require.NoError(t, err)
verified, err := set.Verify()
require.NoError(t, err)
assert.Equal(t, true, verified, "Multiple signatures were unable to be verified.")
}
func TestRetrieveAttestationSignatureSet_AcrossFork(t *testing.T) {
@@ -740,6 +607,6 @@ func TestRetrieveAttestationSignatureSet_AcrossFork(t *testing.T) {
}
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
_, err = blocks.AttestationSignatureBatch(ctx, st, []ethpb.Att{att1, att2})
_, err = blocks.AttestationSignatureBatch(ctx, st, []*ethpb.Attestation{att1, att2})
require.NoError(t, err)
}

View File

@@ -7,11 +7,13 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/container/slice"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/slashings"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
@@ -38,7 +40,7 @@ import (
func ProcessAttesterSlashings(
ctx context.Context,
beaconState state.BeaconState,
slashings []ethpb.AttSlashing,
slashings []*ethpb.AttesterSlashing,
slashFunc slashValidatorFunc,
) (state.BeaconState, error) {
var err error
@@ -55,7 +57,7 @@ func ProcessAttesterSlashings(
func ProcessAttesterSlashing(
ctx context.Context,
beaconState state.BeaconState,
slashing ethpb.AttSlashing,
slashing *ethpb.AttesterSlashing,
slashFunc slashValidatorFunc,
) (state.BeaconState, error) {
if err := VerifyAttesterSlashing(ctx, beaconState, slashing); err != nil {
@@ -75,7 +77,19 @@ func ProcessAttesterSlashing(
return nil, err
}
if helpers.IsSlashableValidator(val.ActivationEpoch(), val.WithdrawableEpoch(), val.Slashed(), currentEpoch) {
beaconState, err = slashFunc(ctx, beaconState, primitives.ValidatorIndex(validatorIndex))
cfg := params.BeaconConfig()
var slashingQuotient uint64
switch {
case beaconState.Version() == version.Phase0:
slashingQuotient = cfg.MinSlashingPenaltyQuotient
case beaconState.Version() == version.Altair:
slashingQuotient = cfg.MinSlashingPenaltyQuotientAltair
case beaconState.Version() >= version.Bellatrix:
slashingQuotient = cfg.MinSlashingPenaltyQuotientBellatrix
default:
return nil, errors.New("unknown state version")
}
beaconState, err = slashFunc(ctx, beaconState, primitives.ValidatorIndex(validatorIndex), slashingQuotient, cfg.ProposerRewardQuotient)
if err != nil {
return nil, errors.Wrapf(err, "could not slash validator index %d",
validatorIndex)
@@ -90,20 +104,20 @@ func ProcessAttesterSlashing(
}
// VerifyAttesterSlashing validates the attestation data in both attestations in the slashing object.
func VerifyAttesterSlashing(ctx context.Context, beaconState state.ReadOnlyBeaconState, slashing ethpb.AttSlashing) error {
func VerifyAttesterSlashing(ctx context.Context, beaconState state.ReadOnlyBeaconState, slashing *ethpb.AttesterSlashing) error {
if slashing == nil {
return errors.New("nil slashing")
}
if slashing.FirstAttestation() == nil || slashing.SecondAttestation() == nil {
if slashing.Attestation_1 == nil || slashing.Attestation_2 == nil {
return errors.New("nil attestation")
}
if slashing.FirstAttestation().GetData() == nil || slashing.SecondAttestation().GetData() == nil {
if slashing.Attestation_1.Data == nil || slashing.Attestation_2.Data == nil {
return errors.New("nil attestation data")
}
att1 := slashing.FirstAttestation()
att2 := slashing.SecondAttestation()
data1 := att1.GetData()
data2 := att2.GetData()
att1 := slashing.Attestation_1
att2 := slashing.Attestation_2
data1 := att1.Data
data2 := att2.Data
if !IsSlashableAttestationData(data1, data2) {
return errors.New("attestations are not slashable")
}
@@ -143,11 +157,11 @@ func IsSlashableAttestationData(data1, data2 *ethpb.AttestationData) bool {
}
// SlashableAttesterIndices returns the intersection of attester indices from both attestations in this slashing.
func SlashableAttesterIndices(slashing ethpb.AttSlashing) []uint64 {
if slashing == nil || slashing.FirstAttestation() == nil || slashing.SecondAttestation() == nil {
func SlashableAttesterIndices(slashing *ethpb.AttesterSlashing) []uint64 {
if slashing == nil || slashing.Attestation_1 == nil || slashing.Attestation_2 == nil {
return nil
}
indices1 := slashing.FirstAttestation().GetAttestingIndices()
indices2 := slashing.SecondAttestation().GetAttestingIndices()
indices1 := slashing.Attestation_1.AttestingIndices
indices2 := slashing.Attestation_2.AttestingIndices
return slice.IntersectionUint64(indices1, indices2)
}

View File

@@ -57,11 +57,7 @@ func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
AttesterSlashings: slashings,
},
}
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
for i, s := range b.Block.Body.AttesterSlashings {
ss[i] = s
}
_, err = blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
_, err = blocks.ProcessAttesterSlashings(context.Background(), beaconState, b.Block.Body.AttesterSlashings, v.SlashValidator)
assert.ErrorContains(t, "attestations are not slashable", err)
}
@@ -96,11 +92,7 @@ func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T)
},
}
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
for i, s := range b.Block.Body.AttesterSlashings {
ss[i] = s
}
_, err = blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
_, err = blocks.ProcessAttesterSlashings(context.Background(), beaconState, b.Block.Body.AttesterSlashings, v.SlashValidator)
assert.ErrorContains(t, "validator indices count exceeds MAX_VALIDATORS_PER_COMMITTEE", err)
}
@@ -152,11 +144,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
},
}
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
for i, s := range b.Block.Body.AttesterSlashings {
ss[i] = s
}
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, b.Block.Body.AttesterSlashings, v.SlashValidator)
require.NoError(t, err)
newRegistry := newState.Validators()
@@ -225,11 +213,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusAltair(t *testing.T) {
},
}
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
for i, s := range b.Block.Body.AttesterSlashings {
ss[i] = s
}
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, b.Block.Body.AttesterSlashings, v.SlashValidator)
require.NoError(t, err)
newRegistry := newState.Validators()
@@ -298,11 +282,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusBellatrix(t *testing.T) {
},
}
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
for i, s := range b.Block.Body.AttesterSlashings {
ss[i] = s
}
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, b.Block.Body.AttesterSlashings, v.SlashValidator)
require.NoError(t, err)
newRegistry := newState.Validators()
@@ -371,11 +351,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusCapella(t *testing.T) {
},
}
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
for i, s := range b.Block.Body.AttesterSlashings {
ss[i] = s
}
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, b.Block.Body.AttesterSlashings, v.SlashValidator)
require.NoError(t, err)
newRegistry := newState.Validators()

View File

@@ -216,7 +216,7 @@ func TestFuzzProcessAttesterSlashings_10000(t *testing.T) {
fuzzer.Fuzz(a)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
require.NoError(t, err)
r, err := ProcessAttesterSlashings(ctx, s, []ethpb.AttSlashing{a}, v.SlashValidator)
r, err := ProcessAttesterSlashings(ctx, s, []*ethpb.AttesterSlashing{a}, v.SlashValidator)
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and slashing: %v", r, err, state, a)
}
@@ -297,6 +297,21 @@ func TestFuzzVerifyIndexedAttestationn_10000(t *testing.T) {
}
}
func TestFuzzVerifyAttestation_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethpb.BeaconState{}
attestation := &ethpb.Attestation{}
ctx := context.Background()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(attestation)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
require.NoError(t, err)
err = VerifyAttestationSignature(ctx, s, attestation)
_ = err
}
}
func TestFuzzProcessDeposits_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethpb.BeaconState{}

View File

@@ -91,11 +91,7 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
},
}
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
for i, s := range b.Block.Body.AttesterSlashings {
ss[i] = s
}
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, b.Block.Body.AttesterSlashings, v.SlashValidator)
require.NoError(t, err)
newRegistry := newState.Validators()
if !newRegistry[expectedSlashedVal].Slashed {

View File

@@ -135,6 +135,8 @@ func TestProcessVoluntaryExits_AppliesCorrectStatus(t *testing.T) {
}
func TestVerifyExitAndSignature(t *testing.T) {
undo := util.HackDenebMaxuint(t)
defer undo()
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
require.NoError(t, err)
tests := []struct {

View File

@@ -163,42 +163,7 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
},
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{
ParentHash: make([]byte, 32),
FeeRecipient: make([]byte, 20),
StateRoot: make([]byte, 32),
ReceiptsRoot: make([]byte, 32),
LogsBloom: make([]byte, 256),
PrevRandao: make([]byte, 32),
ExtraData: make([]byte, 0),
BaseFeePerGas: make([]byte, 32),
BlockHash: make([]byte, 32),
Transactions: make([][]byte, 0),
Withdrawals: make([]*enginev1.Withdrawal, 0),
},
BlsToExecutionChanges: make([]*ethpb.SignedBLSToExecutionChange, 0),
BlobKzgCommitments: make([][]byte, 0),
},
},
Signature: params.BeaconConfig().EmptySignature[:],
})
case *ethpb.BeaconStateElectra:
return blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockElectra{
Block: &ethpb.BeaconBlockElectra{
ParentRoot: params.BeaconConfig().ZeroHash[:],
StateRoot: root[:],
Body: &ethpb.BeaconBlockBodyElectra{
RandaoReveal: make([]byte, 96),
Eth1Data: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
},
Graffiti: make([]byte, 32),
SyncAggregate: &ethpb.SyncAggregate{
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
},
ExecutionPayload: &enginev1.ExecutionPayloadElectra{
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{ // Deneb difference.
ParentHash: make([]byte, 32),
FeeRecipient: make([]byte, 20),
StateRoot: make([]byte, 32),

View File

@@ -1,6 +1,7 @@
package blocks_test
import (
"math/big"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
@@ -609,7 +610,7 @@ func Test_ProcessPayloadCapella(t *testing.T) {
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
require.NoError(t, err)
payload.PrevRandao = random
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload)
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload, big.NewInt(0))
require.NoError(t, err)
_, err = blocks.ProcessPayload(st, wrapped)
require.NoError(t, err)
@@ -873,7 +874,7 @@ func emptyPayloadHeaderCapella() (interfaces.ExecutionData, error) {
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: make([]byte, fieldparams.RootLength),
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
})
}, big.NewInt(0))
}
func emptyPayload() *enginev1.ExecutionPayload {

View File

@@ -12,14 +12,12 @@ import (
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"google.golang.org/protobuf/proto"
)
type slashValidatorFunc func(
ctx context.Context,
st state.BeaconState,
vid primitives.ValidatorIndex) (state.BeaconState, error)
type slashValidatorFunc func(ctx context.Context, st state.BeaconState, vid primitives.ValidatorIndex, penaltyQuotient, proposerRewardQuotient uint64) (state.BeaconState, error)
// ProcessProposerSlashings is one of the operations performed
// on each processed beacon block to slash proposers based on
@@ -77,7 +75,19 @@ func ProcessProposerSlashing(
if err = VerifyProposerSlashing(beaconState, slashing); err != nil {
return nil, errors.Wrap(err, "could not verify proposer slashing")
}
beaconState, err = slashFunc(ctx, beaconState, slashing.Header_1.Header.ProposerIndex)
cfg := params.BeaconConfig()
var slashingQuotient uint64
switch {
case beaconState.Version() == version.Phase0:
slashingQuotient = cfg.MinSlashingPenaltyQuotient
case beaconState.Version() == version.Altair:
slashingQuotient = cfg.MinSlashingPenaltyQuotientAltair
case beaconState.Version() >= version.Bellatrix:
slashingQuotient = cfg.MinSlashingPenaltyQuotientBellatrix
default:
return nil, errors.New("unknown state version")
}
beaconState, err = slashFunc(ctx, beaconState, slashing.Header_1.Header.ProposerIndex, slashingQuotient, cfg.ProposerRewardQuotient)
if err != nil {
return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex)
}

View File

@@ -179,7 +179,7 @@ func randaoSigningData(ctx context.Context, beaconState state.ReadOnlyBeaconStat
func createAttestationSignatureBatch(
ctx context.Context,
beaconState state.ReadOnlyBeaconState,
atts []ethpb.Att,
atts []*ethpb.Attestation,
domain []byte,
) (*bls.SignatureBatch, error) {
if len(atts) == 0 {
@@ -191,19 +191,19 @@ func createAttestationSignatureBatch(
msgs := make([][32]byte, len(atts))
descs := make([]string, len(atts))
for i, a := range atts {
sigs[i] = a.GetSignature()
committees, err := helpers.AttestationCommittees(ctx, beaconState, a)
sigs[i] = a.Signature
c, err := helpers.BeaconCommitteeFromState(ctx, beaconState, a.Data.Slot, a.Data.CommitteeIndex)
if err != nil {
return nil, err
}
ia, err := attestation.ConvertToIndexed(ctx, a, committees...)
ia, err := attestation.ConvertToIndexed(ctx, a, c)
if err != nil {
return nil, err
}
if err := attestation.IsValidAttestationIndices(ctx, ia); err != nil {
return nil, err
}
indices := ia.GetAttestingIndices()
indices := ia.AttestingIndices
pubkeys := make([][]byte, len(indices))
for i := 0; i < len(indices); i++ {
pubkeyAtIdx := beaconState.PubkeyAtIndex(primitives.ValidatorIndex(indices[i]))
@@ -215,7 +215,7 @@ func createAttestationSignatureBatch(
}
pks[i] = aggP
root, err := signing.ComputeSigningRoot(ia.GetData(), domain)
root, err := signing.ComputeSigningRoot(ia.Data, domain)
if err != nil {
return nil, errors.Wrap(err, "could not get signing root of object")
}
@@ -233,7 +233,7 @@ func createAttestationSignatureBatch(
// AttestationSignatureBatch retrieves all the related attestation signature data such as the relevant public keys,
// signatures and attestation signing data and collate it into a signature batch object.
func AttestationSignatureBatch(ctx context.Context, beaconState state.ReadOnlyBeaconState, atts []ethpb.Att) (*bls.SignatureBatch, error) {
func AttestationSignatureBatch(ctx context.Context, beaconState state.ReadOnlyBeaconState, atts []*ethpb.Attestation) (*bls.SignatureBatch, error) {
if len(atts) == 0 {
return bls.NewSet(), nil
}
@@ -243,10 +243,10 @@ func AttestationSignatureBatch(ctx context.Context, beaconState state.ReadOnlyBe
dt := params.BeaconConfig().DomainBeaconAttester
// Split attestations by fork. Note: the signature domain will differ based on the fork.
var preForkAtts []ethpb.Att
var postForkAtts []ethpb.Att
var preForkAtts []*ethpb.Attestation
var postForkAtts []*ethpb.Attestation
for _, a := range atts {
if slots.ToEpoch(a.GetData().Slot) < fork.Epoch {
if slots.ToEpoch(a.Data.Slot) < fork.Epoch {
preForkAtts = append(preForkAtts, a)
} else {
postForkAtts = append(postForkAtts, a)

View File

@@ -145,7 +145,7 @@ func ValidateBLSToExecutionChange(st state.ReadOnlyBeaconState, signed *ethpb.Si
// next_validator_index = ValidatorIndex(next_index % len(state.validators))
// state.next_withdrawal_validator_index = next_validator_index
func ProcessWithdrawals(st state.BeaconState, executionData interfaces.ExecutionData) (state.BeaconState, error) {
expectedWithdrawals, _, err := st.ExpectedWithdrawals()
expectedWithdrawals, err := st.ExpectedWithdrawals()
if err != nil {
return nil, errors.Wrap(err, "could not get expected withdrawals")
}

View File

@@ -1,6 +1,7 @@
package blocks_test
import (
"math/big"
"math/rand"
"testing"
@@ -249,12 +250,12 @@ func TestProcessBlindWithdrawals(t *testing.T) {
maxEffectiveBalance := params.BeaconConfig().MaxEffectiveBalance
type args struct {
Name string
NextWithdrawalValidatorIndex primitives.ValidatorIndex
NextWithdrawalIndex uint64
FullWithdrawalIndices []primitives.ValidatorIndex
PendingPartialWithdrawalIndices []primitives.ValidatorIndex
Withdrawals []*enginev1.Withdrawal
Name string
NextWithdrawalValidatorIndex primitives.ValidatorIndex
NextWithdrawalIndex uint64
FullWithdrawalIndices []primitives.ValidatorIndex
PartialWithdrawalIndices []primitives.ValidatorIndex
Withdrawals []*enginev1.Withdrawal
}
type control struct {
NextWithdrawalValidatorIndex primitives.ValidatorIndex
@@ -282,7 +283,7 @@ func TestProcessBlindWithdrawals(t *testing.T) {
Amount: withdrawalAmount(i),
}
}
PendingPartialWithdrawal := func(i primitives.ValidatorIndex, idx uint64) *enginev1.Withdrawal {
partialWithdrawal := func(i primitives.ValidatorIndex, idx uint64) *enginev1.Withdrawal {
return &enginev1.Withdrawal{
Index: idx,
ValidatorIndex: i,
@@ -320,12 +321,12 @@ func TestProcessBlindWithdrawals(t *testing.T) {
},
{
Args: args{
Name: "success one partial withdrawal",
NextWithdrawalIndex: 21,
NextWithdrawalValidatorIndex: 120,
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{7},
Name: "success one partial withdrawal",
NextWithdrawalIndex: 21,
NextWithdrawalValidatorIndex: 120,
PartialWithdrawalIndices: []primitives.ValidatorIndex{7},
Withdrawals: []*enginev1.Withdrawal{
PendingPartialWithdrawal(7, 21),
partialWithdrawal(7, 21),
},
},
Control: control{
@@ -385,12 +386,12 @@ func TestProcessBlindWithdrawals(t *testing.T) {
},
{
Args: args{
Name: "success many partial withdrawals",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28},
Name: "success many partial withdrawals",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
PartialWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28},
Withdrawals: []*enginev1.Withdrawal{
PendingPartialWithdrawal(7, 22), PendingPartialWithdrawal(19, 23), PendingPartialWithdrawal(28, 24),
partialWithdrawal(7, 22), partialWithdrawal(19, 23), partialWithdrawal(28, 24),
},
},
Control: control{
@@ -405,14 +406,14 @@ func TestProcessBlindWithdrawals(t *testing.T) {
},
{
Args: args{
Name: "success many withdrawals",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 88,
FullWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28},
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{2, 1, 89, 15},
Name: "success many withdrawals",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 88,
FullWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28},
PartialWithdrawalIndices: []primitives.ValidatorIndex{2, 1, 89, 15},
Withdrawals: []*enginev1.Withdrawal{
PendingPartialWithdrawal(89, 22), PendingPartialWithdrawal(1, 23), PendingPartialWithdrawal(2, 24),
fullWithdrawal(7, 25), PendingPartialWithdrawal(15, 26), fullWithdrawal(19, 27),
partialWithdrawal(89, 22), partialWithdrawal(1, 23), partialWithdrawal(2, 24),
fullWithdrawal(7, 25), partialWithdrawal(15, 26), fullWithdrawal(19, 27),
fullWithdrawal(28, 28),
},
},
@@ -452,17 +453,17 @@ func TestProcessBlindWithdrawals(t *testing.T) {
},
{
Args: args{
Name: "success more than max partially withdrawals",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 0,
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 21, 22, 23, 24, 25, 26, 27, 29, 35, 89},
Name: "success more than max partially withdrawals",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 0,
PartialWithdrawalIndices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 21, 22, 23, 24, 25, 26, 27, 29, 35, 89},
Withdrawals: []*enginev1.Withdrawal{
PendingPartialWithdrawal(1, 22), PendingPartialWithdrawal(2, 23), PendingPartialWithdrawal(3, 24),
PendingPartialWithdrawal(4, 25), PendingPartialWithdrawal(5, 26), PendingPartialWithdrawal(6, 27),
PendingPartialWithdrawal(7, 28), PendingPartialWithdrawal(8, 29), PendingPartialWithdrawal(9, 30),
PendingPartialWithdrawal(21, 31), PendingPartialWithdrawal(22, 32), PendingPartialWithdrawal(23, 33),
PendingPartialWithdrawal(24, 34), PendingPartialWithdrawal(25, 35), PendingPartialWithdrawal(26, 36),
PendingPartialWithdrawal(27, 37),
partialWithdrawal(1, 22), partialWithdrawal(2, 23), partialWithdrawal(3, 24),
partialWithdrawal(4, 25), partialWithdrawal(5, 26), partialWithdrawal(6, 27),
partialWithdrawal(7, 28), partialWithdrawal(8, 29), partialWithdrawal(9, 30),
partialWithdrawal(21, 31), partialWithdrawal(22, 32), partialWithdrawal(23, 33),
partialWithdrawal(24, 34), partialWithdrawal(25, 35), partialWithdrawal(26, 36),
partialWithdrawal(27, 37),
},
},
Control: control{
@@ -490,12 +491,12 @@ func TestProcessBlindWithdrawals(t *testing.T) {
},
{
Args: args{
Name: "failure wrong number of partial withdrawal",
NextWithdrawalIndex: 21,
NextWithdrawalValidatorIndex: 37,
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{7},
Name: "failure wrong number of partial withdrawal",
NextWithdrawalIndex: 21,
NextWithdrawalValidatorIndex: 37,
PartialWithdrawalIndices: []primitives.ValidatorIndex{7},
Withdrawals: []*enginev1.Withdrawal{
PendingPartialWithdrawal(7, 21), PendingPartialWithdrawal(9, 22),
partialWithdrawal(7, 21), partialWithdrawal(9, 22),
},
},
Control: control{
@@ -539,7 +540,7 @@ func TestProcessBlindWithdrawals(t *testing.T) {
NextWithdrawalValidatorIndex: 4,
FullWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28, 1},
Withdrawals: []*enginev1.Withdrawal{
fullWithdrawal(7, 22), fullWithdrawal(19, 23), PendingPartialWithdrawal(28, 24),
fullWithdrawal(7, 22), fullWithdrawal(19, 23), partialWithdrawal(28, 24),
fullWithdrawal(1, 25),
},
},
@@ -563,10 +564,10 @@ func TestProcessBlindWithdrawals(t *testing.T) {
},
{
Args: args{
Name: "failure validator not partially withdrawable",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{notPartiallyWithdrawable},
Name: "failure validator not partially withdrawable",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
PartialWithdrawalIndices: []primitives.ValidatorIndex{notPartiallyWithdrawable},
Withdrawals: []*enginev1.Withdrawal{
fullWithdrawal(notPartiallyWithdrawable, 22),
},
@@ -610,7 +611,7 @@ func TestProcessBlindWithdrawals(t *testing.T) {
st.Balances[idx] = withdrawalAmount(idx)
validators[idx].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
}
for _, idx := range arguments.PendingPartialWithdrawalIndices {
for _, idx := range arguments.PartialWithdrawalIndices {
validators[idx].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
st.Balances[idx] = withdrawalAmount(idx)
}
@@ -628,8 +629,8 @@ func TestProcessBlindWithdrawals(t *testing.T) {
if test.Args.FullWithdrawalIndices == nil {
test.Args.FullWithdrawalIndices = make([]primitives.ValidatorIndex, 0)
}
if test.Args.PendingPartialWithdrawalIndices == nil {
test.Args.PendingPartialWithdrawalIndices = make([]primitives.ValidatorIndex, 0)
if test.Args.PartialWithdrawalIndices == nil {
test.Args.PartialWithdrawalIndices = make([]primitives.ValidatorIndex, 0)
}
slot, err := slots.EpochStart(currentEpoch)
require.NoError(t, err)
@@ -642,7 +643,10 @@ func TestProcessBlindWithdrawals(t *testing.T) {
require.NoError(t, err)
wdRoot, err := ssz.WithdrawalSliceRoot(test.Args.Withdrawals, fieldparams.MaxWithdrawalsPerPayload)
require.NoError(t, err)
p, err := consensusblocks.WrappedExecutionPayloadHeaderCapella(&enginev1.ExecutionPayloadHeaderCapella{WithdrawalsRoot: wdRoot[:]})
p, err := consensusblocks.WrappedExecutionPayloadHeaderCapella(
&enginev1.ExecutionPayloadHeaderCapella{WithdrawalsRoot: wdRoot[:]},
big.NewInt(0),
)
require.NoError(t, err)
post, err := blocks.ProcessWithdrawals(st, p)
if test.Control.ExpectedError {
@@ -669,12 +673,12 @@ func TestProcessWithdrawals(t *testing.T) {
maxEffectiveBalance := params.BeaconConfig().MaxEffectiveBalance
type args struct {
Name string
NextWithdrawalValidatorIndex primitives.ValidatorIndex
NextWithdrawalIndex uint64
FullWithdrawalIndices []primitives.ValidatorIndex
PendingPartialWithdrawalIndices []primitives.ValidatorIndex
Withdrawals []*enginev1.Withdrawal
Name string
NextWithdrawalValidatorIndex primitives.ValidatorIndex
NextWithdrawalIndex uint64
FullWithdrawalIndices []primitives.ValidatorIndex
PartialWithdrawalIndices []primitives.ValidatorIndex
Withdrawals []*enginev1.Withdrawal
}
type control struct {
NextWithdrawalValidatorIndex primitives.ValidatorIndex
@@ -702,7 +706,7 @@ func TestProcessWithdrawals(t *testing.T) {
Amount: withdrawalAmount(i),
}
}
PendingPartialWithdrawal := func(i primitives.ValidatorIndex, idx uint64) *enginev1.Withdrawal {
partialWithdrawal := func(i primitives.ValidatorIndex, idx uint64) *enginev1.Withdrawal {
return &enginev1.Withdrawal{
Index: idx,
ValidatorIndex: i,
@@ -740,12 +744,12 @@ func TestProcessWithdrawals(t *testing.T) {
},
{
Args: args{
Name: "success one partial withdrawal",
NextWithdrawalIndex: 21,
NextWithdrawalValidatorIndex: 120,
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{7},
Name: "success one partial withdrawal",
NextWithdrawalIndex: 21,
NextWithdrawalValidatorIndex: 120,
PartialWithdrawalIndices: []primitives.ValidatorIndex{7},
Withdrawals: []*enginev1.Withdrawal{
PendingPartialWithdrawal(7, 21),
partialWithdrawal(7, 21),
},
},
Control: control{
@@ -805,12 +809,12 @@ func TestProcessWithdrawals(t *testing.T) {
},
{
Args: args{
Name: "success many partial withdrawals",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28},
Name: "success many partial withdrawals",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
PartialWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28},
Withdrawals: []*enginev1.Withdrawal{
PendingPartialWithdrawal(7, 22), PendingPartialWithdrawal(19, 23), PendingPartialWithdrawal(28, 24),
partialWithdrawal(7, 22), partialWithdrawal(19, 23), partialWithdrawal(28, 24),
},
},
Control: control{
@@ -825,14 +829,14 @@ func TestProcessWithdrawals(t *testing.T) {
},
{
Args: args{
Name: "success many withdrawals",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 88,
FullWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28},
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{2, 1, 89, 15},
Name: "success many withdrawals",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 88,
FullWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28},
PartialWithdrawalIndices: []primitives.ValidatorIndex{2, 1, 89, 15},
Withdrawals: []*enginev1.Withdrawal{
PendingPartialWithdrawal(89, 22), PendingPartialWithdrawal(1, 23), PendingPartialWithdrawal(2, 24),
fullWithdrawal(7, 25), PendingPartialWithdrawal(15, 26), fullWithdrawal(19, 27),
partialWithdrawal(89, 22), partialWithdrawal(1, 23), partialWithdrawal(2, 24),
fullWithdrawal(7, 25), partialWithdrawal(15, 26), fullWithdrawal(19, 27),
fullWithdrawal(28, 28),
},
},
@@ -872,17 +876,17 @@ func TestProcessWithdrawals(t *testing.T) {
},
{
Args: args{
Name: "success more than max partially withdrawals",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 0,
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 21, 22, 23, 24, 25, 26, 27, 29, 35, 89},
Name: "success more than max partially withdrawals",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 0,
PartialWithdrawalIndices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 21, 22, 23, 24, 25, 26, 27, 29, 35, 89},
Withdrawals: []*enginev1.Withdrawal{
PendingPartialWithdrawal(1, 22), PendingPartialWithdrawal(2, 23), PendingPartialWithdrawal(3, 24),
PendingPartialWithdrawal(4, 25), PendingPartialWithdrawal(5, 26), PendingPartialWithdrawal(6, 27),
PendingPartialWithdrawal(7, 28), PendingPartialWithdrawal(8, 29), PendingPartialWithdrawal(9, 30),
PendingPartialWithdrawal(21, 31), PendingPartialWithdrawal(22, 32), PendingPartialWithdrawal(23, 33),
PendingPartialWithdrawal(24, 34), PendingPartialWithdrawal(25, 35), PendingPartialWithdrawal(26, 36),
PendingPartialWithdrawal(27, 37),
partialWithdrawal(1, 22), partialWithdrawal(2, 23), partialWithdrawal(3, 24),
partialWithdrawal(4, 25), partialWithdrawal(5, 26), partialWithdrawal(6, 27),
partialWithdrawal(7, 28), partialWithdrawal(8, 29), partialWithdrawal(9, 30),
partialWithdrawal(21, 31), partialWithdrawal(22, 32), partialWithdrawal(23, 33),
partialWithdrawal(24, 34), partialWithdrawal(25, 35), partialWithdrawal(26, 36),
partialWithdrawal(27, 37),
},
},
Control: control{
@@ -910,12 +914,12 @@ func TestProcessWithdrawals(t *testing.T) {
},
{
Args: args{
Name: "failure wrong number of partial withdrawal",
NextWithdrawalIndex: 21,
NextWithdrawalValidatorIndex: 37,
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{7},
Name: "failure wrong number of partial withdrawal",
NextWithdrawalIndex: 21,
NextWithdrawalValidatorIndex: 37,
PartialWithdrawalIndices: []primitives.ValidatorIndex{7},
Withdrawals: []*enginev1.Withdrawal{
PendingPartialWithdrawal(7, 21), PendingPartialWithdrawal(9, 22),
partialWithdrawal(7, 21), partialWithdrawal(9, 22),
},
},
Control: control{
@@ -959,7 +963,7 @@ func TestProcessWithdrawals(t *testing.T) {
NextWithdrawalValidatorIndex: 4,
FullWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28, 1},
Withdrawals: []*enginev1.Withdrawal{
fullWithdrawal(7, 22), fullWithdrawal(19, 23), PendingPartialWithdrawal(28, 24),
fullWithdrawal(7, 22), fullWithdrawal(19, 23), partialWithdrawal(28, 24),
fullWithdrawal(1, 25),
},
},
@@ -983,10 +987,10 @@ func TestProcessWithdrawals(t *testing.T) {
},
{
Args: args{
Name: "failure validator not partially withdrawable",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{notPartiallyWithdrawable},
Name: "failure validator not partially withdrawable",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
PartialWithdrawalIndices: []primitives.ValidatorIndex{notPartiallyWithdrawable},
Withdrawals: []*enginev1.Withdrawal{
fullWithdrawal(notPartiallyWithdrawable, 22),
},
@@ -1030,7 +1034,7 @@ func TestProcessWithdrawals(t *testing.T) {
st.Balances[idx] = withdrawalAmount(idx)
validators[idx].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
}
for _, idx := range arguments.PendingPartialWithdrawalIndices {
for _, idx := range arguments.PartialWithdrawalIndices {
validators[idx].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
st.Balances[idx] = withdrawalAmount(idx)
}
@@ -1048,8 +1052,8 @@ func TestProcessWithdrawals(t *testing.T) {
if test.Args.FullWithdrawalIndices == nil {
test.Args.FullWithdrawalIndices = make([]primitives.ValidatorIndex, 0)
}
if test.Args.PendingPartialWithdrawalIndices == nil {
test.Args.PendingPartialWithdrawalIndices = make([]primitives.ValidatorIndex, 0)
if test.Args.PartialWithdrawalIndices == nil {
test.Args.PartialWithdrawalIndices = make([]primitives.ValidatorIndex, 0)
}
slot, err := slots.EpochStart(currentEpoch)
require.NoError(t, err)
@@ -1060,7 +1064,7 @@ func TestProcessWithdrawals(t *testing.T) {
}
st, err := prepareValidators(spb, test.Args)
require.NoError(t, err)
p, err := consensusblocks.WrappedExecutionPayloadCapella(&enginev1.ExecutionPayloadCapella{Withdrawals: test.Args.Withdrawals})
p, err := consensusblocks.WrappedExecutionPayloadCapella(&enginev1.ExecutionPayloadCapella{Withdrawals: test.Args.Withdrawals}, big.NewInt(0))
require.NoError(t, err)
post, err := blocks.ProcessWithdrawals(st, p)
if test.Control.ExpectedError {

View File

@@ -1,79 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"attestation.go",
"churn.go",
"consolidations.go",
"deposits.go",
"effective_balance_updates.go",
"registry_updates.go",
"transition.go",
"upgrade.go",
"validator.go",
"withdrawals.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/epoch:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/validators:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"churn_test.go",
"consolidations_test.go",
"deposits_test.go",
"effective_balance_updates_test.go",
"upgrade_test.go",
"validator_test.go",
"withdrawals_test.go",
],
deps = [
":go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls/blst:go_default_library",
"//crypto/bls/common:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/interop:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
],
)

View File

@@ -1,7 +0,0 @@
package electra
import "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
var (
ProcessAttestationsNoVerifySignature = altair.ProcessAttestationsNoVerifySignature
)

View File

@@ -1,85 +0,0 @@
package electra
import (
"context"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/math"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
// ComputeConsolidationEpochAndUpdateChurn fulfills the consensus spec definition below. This method
// calls mutating methods to the beacon state.
//
// Spec definition:
//
// def compute_consolidation_epoch_and_update_churn(state: BeaconState, consolidation_balance: Gwei) -> Epoch:
// earliest_consolidation_epoch = max(
// state.earliest_consolidation_epoch, compute_activation_exit_epoch(get_current_epoch(state)))
// per_epoch_consolidation_churn = get_consolidation_churn_limit(state)
// # New epoch for consolidations.
// if state.earliest_consolidation_epoch < earliest_consolidation_epoch:
// consolidation_balance_to_consume = per_epoch_consolidation_churn
// else:
// consolidation_balance_to_consume = state.consolidation_balance_to_consume
//
// # Consolidation doesn't fit in the current earliest epoch.
// if consolidation_balance > consolidation_balance_to_consume:
// balance_to_process = consolidation_balance - consolidation_balance_to_consume
// additional_epochs = (balance_to_process - 1) // per_epoch_consolidation_churn + 1
// earliest_consolidation_epoch += additional_epochs
// consolidation_balance_to_consume += additional_epochs * per_epoch_consolidation_churn
//
// # Consume the balance and update state variables.
// state.consolidation_balance_to_consume = consolidation_balance_to_consume - consolidation_balance
// state.earliest_consolidation_epoch = earliest_consolidation_epoch
//
// return state.earliest_consolidation_epoch
func ComputeConsolidationEpochAndUpdateChurn(ctx context.Context, s state.BeaconState, consolidationBalance primitives.Gwei) (primitives.Epoch, error) {
earliestEpoch, err := s.EarliestConsolidationEpoch()
if err != nil {
return 0, err
}
earliestConsolidationEpoch := max(earliestEpoch, helpers.ActivationExitEpoch(slots.ToEpoch(s.Slot())))
activeBal, err := helpers.TotalActiveBalance(s)
if err != nil {
return 0, err
}
perEpochConsolidationChurn := helpers.ConsolidationChurnLimit(primitives.Gwei(activeBal))
// New epoch for consolidations.
var consolidationBalanceToConsume primitives.Gwei
if earliestEpoch < earliestConsolidationEpoch {
consolidationBalanceToConsume = perEpochConsolidationChurn
} else {
consolidationBalanceToConsume, err = s.ConsolidationBalanceToConsume()
if err != nil {
return 0, err
}
}
// Consolidation doesn't fit in the current earliest epoch.
if consolidationBalance > consolidationBalanceToConsume {
balanceToProcess := consolidationBalance - consolidationBalanceToConsume
// additional_epochs = (balance_to_process - 1) // per_epoch_consolidation_churn + 1
additionalEpochs, err := math.Div64(uint64(balanceToProcess-1), uint64(perEpochConsolidationChurn))
if err != nil {
return 0, err
}
additionalEpochs++
earliestConsolidationEpoch += primitives.Epoch(additionalEpochs)
consolidationBalanceToConsume += primitives.Gwei(additionalEpochs) * perEpochConsolidationChurn
}
// Consume the balance and update state variables.
if err := s.SetConsolidationBalanceToConsume(consolidationBalanceToConsume - consolidationBalance); err != nil {
return 0, err
}
if err := s.SetEarliestConsolidationEpoch(earliestConsolidationEpoch); err != nil {
return 0, err
}
return earliestConsolidationEpoch, nil
}

View File

@@ -1,141 +0,0 @@
package electra_test
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
func createValidatorsWithTotalActiveBalance(totalBal primitives.Gwei) []*eth.Validator {
num := totalBal / primitives.Gwei(params.BeaconConfig().MinActivationBalance)
vals := make([]*eth.Validator, num)
for i := range vals {
vals[i] = &eth.Validator{
ActivationEpoch: primitives.Epoch(0),
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
}
}
if totalBal%primitives.Gwei(params.BeaconConfig().MinActivationBalance) != 0 {
vals = append(vals, &eth.Validator{
ActivationEpoch: primitives.Epoch(0),
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: uint64(totalBal) % params.BeaconConfig().MinActivationBalance,
})
}
return vals
}
func TestComputeConsolidationEpochAndUpdateChurn(t *testing.T) {
// Test setup: create a state with 32M ETH total active balance.
// In this state, the churn is expected to be 232 ETH per epoch.
tests := []struct {
name string
state state.BeaconState
consolidationBalance primitives.Gwei
expectedEpoch primitives.Epoch
expectedConsolidationBalanceToConsume primitives.Gwei
}{
{
name: "compute consolidation with no consolidation balance",
state: func(t *testing.T) state.BeaconState {
s, err := state_native.InitializeFromProtoUnsafeElectra(&eth.BeaconStateElectra{
Slot: slots.UnsafeEpochStart(10),
EarliestConsolidationEpoch: 9,
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
})
require.NoError(t, err)
return s
}(t),
consolidationBalance: 0, // 0 ETH
expectedEpoch: 15, // current epoch + 1 + MaxSeedLookahead
expectedConsolidationBalanceToConsume: 232000000000, // 232 ETH
},
{
name: "new epoch for consolidations",
state: func(t *testing.T) state.BeaconState {
s, err := state_native.InitializeFromProtoUnsafeElectra(&eth.BeaconStateElectra{
Slot: slots.UnsafeEpochStart(10),
EarliestConsolidationEpoch: 9,
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
})
require.NoError(t, err)
return s
}(t),
consolidationBalance: 32000000000, // 32 ETH
expectedEpoch: 15, // current epoch + 1 + MaxSeedLookahead
expectedConsolidationBalanceToConsume: 200000000000, // 200 ETH
},
{
name: "flows into another epoch",
state: func(t *testing.T) state.BeaconState {
s, err := state_native.InitializeFromProtoUnsafeElectra(&eth.BeaconStateElectra{
Slot: slots.UnsafeEpochStart(10),
EarliestConsolidationEpoch: 9,
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
})
require.NoError(t, err)
return s
}(t),
consolidationBalance: 235000000000, // 235 ETH
expectedEpoch: 16, // Flows into another epoch.
expectedConsolidationBalanceToConsume: 229000000000, // 229 ETH
},
{
name: "not a new epoch, fits in remaining balance of current epoch",
state: func(t *testing.T) state.BeaconState {
s, err := state_native.InitializeFromProtoUnsafeElectra(&eth.BeaconStateElectra{
Slot: slots.UnsafeEpochStart(10),
EarliestConsolidationEpoch: 15,
ConsolidationBalanceToConsume: 200000000000, // 200 ETH
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
})
require.NoError(t, err)
return s
}(t),
consolidationBalance: 32000000000, // 32 ETH
expectedEpoch: 15, // Fits into current earliest consolidation epoch.
expectedConsolidationBalanceToConsume: 168000000000, // 126 ETH
},
{
name: "not a new epoch, fits in remaining balance of current epoch",
state: func(t *testing.T) state.BeaconState {
s, err := state_native.InitializeFromProtoUnsafeElectra(&eth.BeaconStateElectra{
Slot: slots.UnsafeEpochStart(10),
EarliestConsolidationEpoch: 15,
ConsolidationBalanceToConsume: 200000000000, // 200 ETH
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
})
require.NoError(t, err)
return s
}(t),
consolidationBalance: 232000000000, // 232 ETH
expectedEpoch: 16, // Flows into another epoch.
expectedConsolidationBalanceToConsume: 200000000000, // 200 ETH
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotEpoch, err := electra.ComputeConsolidationEpochAndUpdateChurn(context.TODO(), tt.state, tt.consolidationBalance)
require.NoError(t, err)
require.Equal(t, tt.expectedEpoch, gotEpoch)
// Check consolidation balance to consume is set on the state.
cbtc, err := tt.state.ConsolidationBalanceToConsume()
require.NoError(t, err)
require.Equal(t, tt.expectedConsolidationBalanceToConsume, cbtc)
// Check earliest consolidation epoch was set on the state.
gotEpoch, err = tt.state.EarliestConsolidationEpoch()
require.NoError(t, err)
require.Equal(t, tt.expectedEpoch, gotEpoch)
})
}
}

View File

@@ -1,258 +0,0 @@
package electra
import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"go.opencensus.io/trace"
)
// ProcessPendingConsolidations implements the spec definition below. This method makes mutating
// calls to the beacon state.
//
// Spec definition:
//
// def process_pending_consolidations(state: BeaconState) -> None:
// next_pending_consolidation = 0
// for pending_consolidation in state.pending_consolidations:
// source_validator = state.validators[pending_consolidation.source_index]
// if source_validator.slashed:
// next_pending_consolidation += 1
// continue
// if source_validator.withdrawable_epoch > get_current_epoch(state):
// break
//
// # Churn any target excess active balance of target and raise its max
// switch_to_compounding_validator(state, pending_consolidation.target_index)
// # Move active balance to target. Excess balance is withdrawable.
// active_balance = get_active_balance(state, pending_consolidation.source_index)
// decrease_balance(state, pending_consolidation.source_index, active_balance)
// increase_balance(state, pending_consolidation.target_index, active_balance)
// next_pending_consolidation += 1
//
// state.pending_consolidations = state.pending_consolidations[next_pending_consolidation:]
func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "electra.ProcessPendingConsolidations")
defer span.End()
if st == nil || st.IsNil() {
return errors.New("nil state")
}
currentEpoch := slots.ToEpoch(st.Slot())
var nextPendingConsolidation uint64
pendingConsolidations, err := st.PendingConsolidations()
if err != nil {
return err
}
for _, pc := range pendingConsolidations {
sourceValidator, err := st.ValidatorAtIndex(pc.SourceIndex)
if err != nil {
return err
}
if sourceValidator.Slashed {
nextPendingConsolidation++
continue
}
if sourceValidator.WithdrawableEpoch > currentEpoch {
break
}
if err := SwitchToCompoundingValidator(ctx, st, pc.TargetIndex); err != nil {
return err
}
activeBalance, err := st.ActiveBalanceAtIndex(pc.SourceIndex)
if err != nil {
return err
}
if err := helpers.DecreaseBalance(st, pc.SourceIndex, activeBalance); err != nil {
return err
}
if err := helpers.IncreaseBalance(st, pc.TargetIndex, activeBalance); err != nil {
return err
}
nextPendingConsolidation++
}
if nextPendingConsolidation > 0 {
return st.SetPendingConsolidations(pendingConsolidations[nextPendingConsolidation:])
}
return nil
}
// ProcessConsolidations implements the spec definition below. This method makes mutating calls to
// the beacon state.
//
// Spec definition:
//
// def process_consolidation(state: BeaconState, signed_consolidation: SignedConsolidation) -> None:
// # If the pending consolidations queue is full, no consolidations are allowed in the block
// assert len(state.pending_consolidations) < PENDING_CONSOLIDATIONS_LIMIT
// # If there is too little available consolidation churn limit, no consolidations are allowed in the block
// assert get_consolidation_churn_limit(state) > MIN_ACTIVATION_BALANCE
// consolidation = signed_consolidation.message
// # Verify that source != target, so a consolidation cannot be used as an exit.
// assert consolidation.source_index != consolidation.target_index
//
// source_validator = state.validators[consolidation.source_index]
// target_validator = state.validators[consolidation.target_index]
// # Verify the source and the target are active
// current_epoch = get_current_epoch(state)
// assert is_active_validator(source_validator, current_epoch)
// assert is_active_validator(target_validator, current_epoch)
// # Verify exits for source and target have not been initiated
// assert source_validator.exit_epoch == FAR_FUTURE_EPOCH
// assert target_validator.exit_epoch == FAR_FUTURE_EPOCH
// # Consolidations must specify an epoch when they become valid; they are not valid before then
// assert current_epoch >= consolidation.epoch
//
// # Verify the source and the target have Execution layer withdrawal credentials
// assert has_execution_withdrawal_credential(source_validator)
// assert has_execution_withdrawal_credential(target_validator)
// # Verify the same withdrawal address
// assert source_validator.withdrawal_credentials[12:] == target_validator.withdrawal_credentials[12:]
//
// # Verify consolidation is signed by the source and the target
// domain = compute_domain(DOMAIN_CONSOLIDATION, genesis_validators_root=state.genesis_validators_root)
// signing_root = compute_signing_root(consolidation, domain)
// pubkeys = [source_validator.pubkey, target_validator.pubkey]
// assert bls.FastAggregateVerify(pubkeys, signing_root, signed_consolidation.signature)
//
// # Initiate source validator exit and append pending consolidation
// source_validator.exit_epoch = compute_consolidation_epoch_and_update_churn(
// state, source_validator.effective_balance)
// source_validator.withdrawable_epoch = Epoch(
// source_validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY
// )
// state.pending_consolidations.append(PendingConsolidation(
// source_index=consolidation.source_index,
// target_index=consolidation.target_index
// ))
func ProcessConsolidations(ctx context.Context, st state.BeaconState, cs []*ethpb.SignedConsolidation) error {
_, span := trace.StartSpan(ctx, "electra.ProcessConsolidations")
defer span.End()
if st == nil || st.IsNil() {
return errors.New("nil state")
}
if len(cs) == 0 {
return nil // Nothing to process.
}
domain, err := signing.ComputeDomain(
params.BeaconConfig().DomainConsolidation,
nil, // Use genesis fork version
st.GenesisValidatorsRoot(),
)
if err != nil {
return err
}
totalBalance, err := helpers.TotalActiveBalance(st)
if err != nil {
return err
}
if helpers.ConsolidationChurnLimit(primitives.Gwei(totalBalance)) <= primitives.Gwei(params.BeaconConfig().MinActivationBalance) {
return errors.New("too little available consolidation churn limit")
}
currentEpoch := slots.ToEpoch(st.Slot())
for _, c := range cs {
if c == nil || c.Message == nil {
return errors.New("nil consolidation")
}
if n, err := st.NumPendingConsolidations(); err != nil {
return err
} else if n >= params.BeaconConfig().PendingConsolidationsLimit {
return errors.New("pending consolidations queue is full")
}
if c.Message.SourceIndex == c.Message.TargetIndex {
return errors.New("source and target index are the same")
}
source, err := st.ValidatorAtIndex(c.Message.SourceIndex)
if err != nil {
return err
}
target, err := st.ValidatorAtIndex(c.Message.TargetIndex)
if err != nil {
return err
}
if !helpers.IsActiveValidator(source, currentEpoch) {
return errors.New("source is not active")
}
if !helpers.IsActiveValidator(target, currentEpoch) {
return errors.New("target is not active")
}
if source.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
return errors.New("source exit epoch has been initiated")
}
if target.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
return errors.New("target exit epoch has been initiated")
}
if currentEpoch < c.Message.Epoch {
return errors.New("consolidation is not valid yet")
}
if !helpers.HasExecutionWithdrawalCredentials(source) {
return errors.New("source does not have execution withdrawal credentials")
}
if !helpers.HasExecutionWithdrawalCredentials(target) {
return errors.New("target does not have execution withdrawal credentials")
}
if !helpers.IsSameWithdrawalCredentials(source, target) {
return errors.New("source and target have different withdrawal credentials")
}
sr, err := signing.ComputeSigningRoot(c.Message, domain)
if err != nil {
return err
}
sourcePk, err := bls.PublicKeyFromBytes(source.PublicKey)
if err != nil {
return errors.Wrap(err, "could not convert source public key bytes to bls public key")
}
targetPk, err := bls.PublicKeyFromBytes(target.PublicKey)
if err != nil {
return errors.Wrap(err, "could not convert target public key bytes to bls public key")
}
sig, err := bls.SignatureFromBytes(c.Signature)
if err != nil {
return errors.Wrap(err, "could not convert bytes to signature")
}
if !sig.FastAggregateVerify([]bls.PublicKey{sourcePk, targetPk}, sr) {
return errors.New("consolidation signature verification failed")
}
sEE, err := ComputeConsolidationEpochAndUpdateChurn(ctx, st, primitives.Gwei(source.EffectiveBalance))
if err != nil {
return err
}
source.ExitEpoch = sEE
source.WithdrawableEpoch = sEE + params.BeaconConfig().MinValidatorWithdrawabilityDelay
if err := st.UpdateValidatorAtIndex(c.Message.SourceIndex, source); err != nil {
return err
}
if err := st.AppendPendingConsolidation(c.Message.ToPendingConsolidation()); err != nil {
return err
}
}
return nil
}

View File

@@ -1,441 +0,0 @@
package electra_test
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/bls/blst"
"github.com/prysmaticlabs/prysm/v5/crypto/bls/common"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/interop"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
)
func TestProcessPendingConsolidations(t *testing.T) {
tests := []struct {
name string
state state.BeaconState
check func(*testing.T, state.BeaconState)
wantErr bool
}{
{
name: "nil state",
state: nil,
wantErr: true,
},
{
name: "no pending consolidations",
state: func() state.BeaconState {
pb := &eth.BeaconStateElectra{}
st, err := state_native.InitializeFromProtoUnsafeElectra(pb)
require.NoError(t, err)
return st
}(),
wantErr: false,
},
{
name: "processes pending consolidation successfully",
state: func() state.BeaconState {
pb := &eth.BeaconStateElectra{
Validators: []*eth.Validator{
{
WithdrawalCredentials: []byte{0x01, 0xFF},
},
{
WithdrawalCredentials: []byte{0x01, 0xAB},
},
},
Balances: []uint64{
params.BeaconConfig().MinActivationBalance,
params.BeaconConfig().MinActivationBalance,
},
PendingConsolidations: []*eth.PendingConsolidation{
{
SourceIndex: 0,
TargetIndex: 1,
},
},
}
st, err := state_native.InitializeFromProtoUnsafeElectra(pb)
require.NoError(t, err)
return st
}(),
check: func(t *testing.T, st state.BeaconState) {
// Balances are transferred from v0 to v1.
bal0, err := st.BalanceAtIndex(0)
require.NoError(t, err)
require.Equal(t, uint64(0), bal0)
bal1, err := st.BalanceAtIndex(1)
require.NoError(t, err)
require.Equal(t, 2*params.BeaconConfig().MinActivationBalance, bal1)
// The pending consolidation is removed from the list.
num, err := st.NumPendingConsolidations()
require.NoError(t, err)
require.Equal(t, uint64(0), num)
// v1 is switched to compounding validator.
v1, err := st.ValidatorAtIndex(1)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().CompoundingWithdrawalPrefixByte, v1.WithdrawalCredentials[0])
},
wantErr: false,
},
{
name: "stop processing when a source val withdrawable epoch is in the future",
state: func() state.BeaconState {
pb := &eth.BeaconStateElectra{
Validators: []*eth.Validator{
{
WithdrawalCredentials: []byte{0x01, 0xFF},
WithdrawableEpoch: 100,
},
{
WithdrawalCredentials: []byte{0x01, 0xAB},
},
},
Balances: []uint64{
params.BeaconConfig().MinActivationBalance,
params.BeaconConfig().MinActivationBalance,
},
PendingConsolidations: []*eth.PendingConsolidation{
{
SourceIndex: 0,
TargetIndex: 1,
},
},
}
st, err := state_native.InitializeFromProtoUnsafeElectra(pb)
require.NoError(t, err)
return st
}(),
check: func(t *testing.T, st state.BeaconState) {
// No balances are transferred from v0 to v1.
bal0, err := st.BalanceAtIndex(0)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance, bal0)
bal1, err := st.BalanceAtIndex(1)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance, bal1)
// The pending consolidation is still in the list.
num, err := st.NumPendingConsolidations()
require.NoError(t, err)
require.Equal(t, uint64(1), num)
},
wantErr: false,
},
{
name: "slashed validator is not consolidated",
state: func() state.BeaconState {
pb := &eth.BeaconStateElectra{
Validators: []*eth.Validator{
{
WithdrawalCredentials: []byte{0x01, 0xFF},
},
{
WithdrawalCredentials: []byte{0x01, 0xAB},
},
{
Slashed: true,
},
{
WithdrawalCredentials: []byte{0x01, 0xCC},
},
},
Balances: []uint64{
params.BeaconConfig().MinActivationBalance,
params.BeaconConfig().MinActivationBalance,
params.BeaconConfig().MinActivationBalance,
params.BeaconConfig().MinActivationBalance,
},
PendingConsolidations: []*eth.PendingConsolidation{
{
SourceIndex: 2,
TargetIndex: 3,
},
{
SourceIndex: 0,
TargetIndex: 1,
},
},
}
st, err := state_native.InitializeFromProtoUnsafeElectra(pb)
require.NoError(t, err)
return st
}(),
check: func(t *testing.T, st state.BeaconState) {
// No balances are transferred from v2 to v3.
bal0, err := st.BalanceAtIndex(2)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance, bal0)
bal1, err := st.BalanceAtIndex(3)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance, bal1)
// No pending consolidation remaining.
num, err := st.NumPendingConsolidations()
require.NoError(t, err)
require.Equal(t, uint64(0), num)
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := electra.ProcessPendingConsolidations(context.TODO(), tt.state)
require.Equal(t, tt.wantErr, err != nil)
if tt.check != nil {
tt.check(t, tt.state)
}
})
}
}
func stateWithActiveBalanceETH(t *testing.T, balETH uint64) state.BeaconState {
gwei := balETH * 1_000_000_000
balPerVal := params.BeaconConfig().MinActivationBalance
numVals := gwei / balPerVal
vals := make([]*eth.Validator, numVals)
bals := make([]uint64, numVals)
for i := uint64(0); i < numVals; i++ {
wc := make([]byte, 32)
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
wc[31] = byte(i)
vals[i] = &eth.Validator{
ActivationEpoch: 0,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: balPerVal,
WithdrawalCredentials: wc,
}
bals[i] = balPerVal
}
st, err := state_native.InitializeFromProtoUnsafeElectra(&eth.BeaconStateElectra{
Slot: 10 * params.BeaconConfig().SlotsPerEpoch,
Validators: vals,
Balances: bals,
Fork: &eth.Fork{
CurrentVersion: params.BeaconConfig().ElectraForkVersion,
},
})
require.NoError(t, err)
return st
}
func TestProcessConsolidations(t *testing.T) {
secretKeys, publicKeys, err := interop.DeterministicallyGenerateKeys(0, 2)
require.NoError(t, err)
genesisValidatorRoot := bytesutil.PadTo([]byte("genesisValidatorRoot"), fieldparams.RootLength)
_ = secretKeys
tests := []struct {
name string
state state.BeaconState
scs []*eth.SignedConsolidation
check func(*testing.T, state.BeaconState)
wantErr string
}{
{
name: "nil state",
scs: make([]*eth.SignedConsolidation, 10),
wantErr: "nil state",
},
{
name: "nil consolidation in slice",
state: stateWithActiveBalanceETH(t, 19_000_000),
scs: []*eth.SignedConsolidation{nil, nil},
wantErr: "nil consolidation",
},
{
name: "state is 100% full of pending consolidations",
state: func() state.BeaconState {
st := stateWithActiveBalanceETH(t, 19_000_000)
pc := make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit)
require.NoError(t, st.SetPendingConsolidations(pc))
return st
}(),
scs: []*eth.SignedConsolidation{{Message: &eth.Consolidation{}}},
wantErr: "pending consolidations queue is full",
},
{
name: "state has too little consolidation churn limit available to process a consolidation",
state: func() state.BeaconState {
st, _ := util.DeterministicGenesisStateElectra(t, 1)
return st
}(),
scs: []*eth.SignedConsolidation{{Message: &eth.Consolidation{}}},
wantErr: "too little available consolidation churn limit",
},
{
name: "consolidation with source and target as the same index is rejected",
state: stateWithActiveBalanceETH(t, 19_000_000),
scs: []*eth.SignedConsolidation{{Message: &eth.Consolidation{SourceIndex: 100, TargetIndex: 100}}},
wantErr: "source and target index are the same",
},
{
name: "consolidation with inactive source is rejected",
state: func() state.BeaconState {
st := stateWithActiveBalanceETH(t, 19_000_000)
val, err := st.ValidatorAtIndex(25)
require.NoError(t, err)
val.ActivationEpoch = params.BeaconConfig().FarFutureEpoch
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
return st
}(),
scs: []*eth.SignedConsolidation{{Message: &eth.Consolidation{SourceIndex: 25, TargetIndex: 100}}},
wantErr: "source is not active",
},
{
name: "consolidation with inactive target is rejected",
state: func() state.BeaconState {
st := stateWithActiveBalanceETH(t, 19_000_000)
val, err := st.ValidatorAtIndex(25)
require.NoError(t, err)
val.ActivationEpoch = params.BeaconConfig().FarFutureEpoch
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
return st
}(),
scs: []*eth.SignedConsolidation{{Message: &eth.Consolidation{SourceIndex: 100, TargetIndex: 25}}},
wantErr: "target is not active",
},
{
name: "consolidation with exiting source is rejected",
state: func() state.BeaconState {
st := stateWithActiveBalanceETH(t, 19_000_000)
val, err := st.ValidatorAtIndex(25)
require.NoError(t, err)
val.ExitEpoch = 256
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
return st
}(),
scs: []*eth.SignedConsolidation{{Message: &eth.Consolidation{SourceIndex: 25, TargetIndex: 100}}},
wantErr: "source exit epoch has been initiated",
},
{
name: "consolidation with exiting target is rejected",
state: func() state.BeaconState {
st := stateWithActiveBalanceETH(t, 19_000_000)
val, err := st.ValidatorAtIndex(25)
require.NoError(t, err)
val.ExitEpoch = 256
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
return st
}(),
scs: []*eth.SignedConsolidation{{Message: &eth.Consolidation{SourceIndex: 100, TargetIndex: 25}}},
wantErr: "target exit epoch has been initiated",
},
{
name: "consolidation with future epoch is rejected",
state: stateWithActiveBalanceETH(t, 19_000_000),
scs: []*eth.SignedConsolidation{{Message: &eth.Consolidation{SourceIndex: 100, TargetIndex: 25, Epoch: 55}}},
wantErr: "consolidation is not valid yet",
},
{
name: "source validator without withdrawal credentials is rejected",
state: func() state.BeaconState {
st := stateWithActiveBalanceETH(t, 19_000_000)
val, err := st.ValidatorAtIndex(25)
require.NoError(t, err)
val.WithdrawalCredentials = []byte{}
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
return st
}(),
scs: []*eth.SignedConsolidation{{Message: &eth.Consolidation{SourceIndex: 25, TargetIndex: 100}}},
wantErr: "source does not have execution withdrawal credentials",
},
{
name: "target validator without withdrawal credentials is rejected",
state: func() state.BeaconState {
st := stateWithActiveBalanceETH(t, 19_000_000)
val, err := st.ValidatorAtIndex(25)
require.NoError(t, err)
val.WithdrawalCredentials = []byte{}
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
return st
}(),
scs: []*eth.SignedConsolidation{{Message: &eth.Consolidation{SourceIndex: 100, TargetIndex: 25}}},
wantErr: "target does not have execution withdrawal credentials",
},
{
name: "source and target with different withdrawal credentials is rejected",
state: stateWithActiveBalanceETH(t, 19_000_000),
scs: []*eth.SignedConsolidation{{Message: &eth.Consolidation{SourceIndex: 100, TargetIndex: 25}}},
wantErr: "source and target have different withdrawal credentials",
},
{
name: "consolidation with valid signatures is OK",
state: func() state.BeaconState {
st := stateWithActiveBalanceETH(t, 19_000_000)
require.NoError(t, st.SetGenesisValidatorsRoot(genesisValidatorRoot))
source, err := st.ValidatorAtIndex(100)
require.NoError(t, err)
target, err := st.ValidatorAtIndex(25)
require.NoError(t, err)
source.PublicKey = publicKeys[0].Marshal()
source.WithdrawalCredentials = target.WithdrawalCredentials
require.NoError(t, st.UpdateValidatorAtIndex(100, source))
target.PublicKey = publicKeys[1].Marshal()
require.NoError(t, st.UpdateValidatorAtIndex(25, target))
return st
}(),
scs: func() []*eth.SignedConsolidation {
sc := &eth.SignedConsolidation{Message: &eth.Consolidation{SourceIndex: 100, TargetIndex: 25, Epoch: 8}}
domain, err := signing.ComputeDomain(
params.BeaconConfig().DomainConsolidation,
nil,
genesisValidatorRoot,
)
require.NoError(t, err)
sr, err := signing.ComputeSigningRoot(sc.Message, domain)
require.NoError(t, err)
sig0 := secretKeys[0].Sign(sr[:])
sig1 := secretKeys[1].Sign(sr[:])
sc.Signature = blst.AggregateSignatures([]common.Signature{sig0, sig1}).Marshal()
return []*eth.SignedConsolidation{sc}
}(),
check: func(t *testing.T, st state.BeaconState) {
source, err := st.ValidatorAtIndex(100)
require.NoError(t, err)
// The consolidated validator is exiting.
require.Equal(t, primitives.Epoch(15), source.ExitEpoch) // 15 = state.Epoch(10) + MIN_SEED_LOOKAHEAD(4) + 1
require.Equal(t, primitives.Epoch(15+params.BeaconConfig().MinValidatorWithdrawabilityDelay), source.WithdrawableEpoch)
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := electra.ProcessConsolidations(context.TODO(), tt.state, tt.scs)
if len(tt.wantErr) > 0 {
require.ErrorContains(t, tt.wantErr, err)
} else {
require.NoError(t, err)
}
if tt.check != nil {
tt.check(t, tt.state)
}
})
}
}

View File

@@ -1,87 +0,0 @@
package electra
import (
"context"
"errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
"go.opencensus.io/trace"
)
// ProcessPendingBalanceDeposits implements the spec definition below. This method mutates the state.
//
// Spec definition:
//
// def process_pending_balance_deposits(state: BeaconState) -> None:
// available_for_processing = state.deposit_balance_to_consume + get_activation_exit_churn_limit(state)
// processed_amount = 0
// next_deposit_index = 0
//
// for deposit in state.pending_balance_deposits:
// if processed_amount + deposit.amount > available_for_processing:
// break
// increase_balance(state, deposit.index, deposit.amount)
// processed_amount += deposit.amount
// next_deposit_index += 1
//
// state.pending_balance_deposits = state.pending_balance_deposits[next_deposit_index:]
//
// if len(state.pending_balance_deposits) == 0:
// state.deposit_balance_to_consume = Gwei(0)
// else:
// state.deposit_balance_to_consume = available_for_processing - processed_amount
func ProcessPendingBalanceDeposits(ctx context.Context, st state.BeaconState, activeBalance primitives.Gwei) error {
_, span := trace.StartSpan(ctx, "electra.ProcessPendingBalanceDeposits")
defer span.End()
if st == nil || st.IsNil() {
return errors.New("nil state")
}
depBalToConsume, err := st.DepositBalanceToConsume()
if err != nil {
return err
}
availableForProcessing := depBalToConsume + helpers.ActivationExitChurnLimit(activeBalance)
nextDepositIndex := 0
deposits, err := st.PendingBalanceDeposits()
if err != nil {
return err
}
for _, deposit := range deposits {
if primitives.Gwei(deposit.Amount) > availableForProcessing {
break
}
if err := helpers.IncreaseBalance(st, deposit.Index, deposit.Amount); err != nil {
return err
}
availableForProcessing -= primitives.Gwei(deposit.Amount)
nextDepositIndex++
}
deposits = deposits[nextDepositIndex:]
if err := st.SetPendingBalanceDeposits(deposits); err != nil {
return err
}
if len(deposits) == 0 {
return st.SetDepositBalanceToConsume(0)
} else {
return st.SetDepositBalanceToConsume(availableForProcessing)
}
}
// ProcessDepositRequests is a function as part of electra to process execution layer deposits
func ProcessDepositRequests(ctx context.Context, beaconState state.BeaconState, requests []*enginev1.DepositRequest) (state.BeaconState, error) {
_, span := trace.StartSpan(ctx, "electra.ProcessDepositRequests")
defer span.End()
// TODO: replace with 6110 logic
// return b.ProcessDepositRequests(beaconState, requests)
return beaconState, nil
}

View File

@@ -1,128 +0,0 @@
package electra_test
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestProcessPendingBalanceDeposits(t *testing.T) {
tests := []struct {
name string
state state.BeaconState
wantErr bool
check func(*testing.T, state.BeaconState)
}{
{
name: "nil state fails",
state: nil,
wantErr: true,
},
{
name: "no deposits resets balance to consume",
state: func() state.BeaconState {
st := stateWithActiveBalanceETH(t, 1_000)
require.NoError(t, st.SetDepositBalanceToConsume(100))
return st
}(),
check: func(t *testing.T, st state.BeaconState) {
res, err := st.DepositBalanceToConsume()
require.NoError(t, err)
require.Equal(t, primitives.Gwei(0), res)
},
},
{
name: "more deposits than balance to consume processes partial deposits",
state: func() state.BeaconState {
st := stateWithActiveBalanceETH(t, 1_000)
require.NoError(t, st.SetDepositBalanceToConsume(100))
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
deps := make([]*eth.PendingBalanceDeposit, 20)
for i := 0; i < len(deps); i += 1 {
deps[i] = &eth.PendingBalanceDeposit{
Amount: uint64(amountAvailForProcessing) / 10,
Index: primitives.ValidatorIndex(i),
}
}
require.NoError(t, st.SetPendingBalanceDeposits(deps))
return st
}(),
check: func(t *testing.T, st state.BeaconState) {
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
res, err := st.DepositBalanceToConsume()
require.NoError(t, err)
require.Equal(t, primitives.Gwei(100), res)
// Validators 0..9 should have their balance increased
for i := primitives.ValidatorIndex(0); i < 10; i++ {
b, err := st.BalanceAtIndex(i)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(amountAvailForProcessing)/10, b)
}
// Half of the balance deposits should have been processed.
remaining, err := st.PendingBalanceDeposits()
require.NoError(t, err)
require.Equal(t, 10, len(remaining))
},
},
{
name: "less deposits than balance to consume processes all deposits",
state: func() state.BeaconState {
st := stateWithActiveBalanceETH(t, 1_000)
require.NoError(t, st.SetDepositBalanceToConsume(0))
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
deps := make([]*eth.PendingBalanceDeposit, 5)
for i := 0; i < len(deps); i += 1 {
deps[i] = &eth.PendingBalanceDeposit{
Amount: uint64(amountAvailForProcessing) / 5,
Index: primitives.ValidatorIndex(i),
}
}
require.NoError(t, st.SetPendingBalanceDeposits(deps))
return st
}(),
check: func(t *testing.T, st state.BeaconState) {
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
res, err := st.DepositBalanceToConsume()
require.NoError(t, err)
require.Equal(t, primitives.Gwei(0), res)
// Validators 0..4 should have their balance increased
for i := primitives.ValidatorIndex(0); i < 4; i++ {
b, err := st.BalanceAtIndex(i)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(amountAvailForProcessing)/5, b)
}
// All of the balance deposits should have been processed.
remaining, err := st.PendingBalanceDeposits()
require.NoError(t, err)
require.Equal(t, 0, len(remaining))
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var tab uint64
var err error
if tt.state != nil {
// The caller of this method would normally have the precompute balance values for total
// active balance for this epoch. For ease of test setup, we will compute total active
// balance from the given state.
tab, err = helpers.TotalActiveBalance(tt.state)
}
require.NoError(t, err)
err = electra.ProcessPendingBalanceDeposits(context.TODO(), tt.state, primitives.Gwei(tab))
require.Equal(t, tt.wantErr, err != nil, "wantErr=%v, got err=%s", tt.wantErr, err)
if tt.check != nil {
tt.check(t, tt.state)
}
})
}
}

View File

@@ -1,65 +0,0 @@
package electra
import (
"fmt"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/params"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
// ProcessEffectiveBalanceUpdates processes effective balance updates during epoch processing.
//
// Spec pseudocode definition:
//
// def process_effective_balance_updates(state: BeaconState) -> None:
// # Update effective balances with hysteresis
// for index, validator in enumerate(state.validators):
// balance = state.balances[index]
// HYSTERESIS_INCREMENT = uint64(EFFECTIVE_BALANCE_INCREMENT // HYSTERESIS_QUOTIENT)
// DOWNWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER
// UPWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_UPWARD_MULTIPLIER
// EFFECTIVE_BALANCE_LIMIT = (
// MAX_EFFECTIVE_BALANCE_EIP7251 if has_compounding_withdrawal_credential(validator)
// else MIN_ACTIVATION_BALANCE
// )
//
// if (
// balance + DOWNWARD_THRESHOLD < validator.effective_balance
// or validator.effective_balance + UPWARD_THRESHOLD < balance
// ):
// validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, EFFECTIVE_BALANCE_LIMIT)
func ProcessEffectiveBalanceUpdates(state state.BeaconState) error {
effBalanceInc := params.BeaconConfig().EffectiveBalanceIncrement
hysteresisInc := effBalanceInc / params.BeaconConfig().HysteresisQuotient
downwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisDownwardMultiplier
upwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisUpwardMultiplier
bals := state.Balances()
// Update effective balances with hysteresis.
validatorFunc := func(idx int, val *ethpb.Validator) (bool, *ethpb.Validator, error) {
if val == nil {
return false, nil, fmt.Errorf("validator %d is nil in state", idx)
}
if idx >= len(bals) {
return false, nil, fmt.Errorf("validator index exceeds validator length in state %d >= %d", idx, len(state.Balances()))
}
balance := bals[idx]
effectiveBalanceLimit := params.BeaconConfig().MinActivationBalance
if helpers.HasCompoundingWithdrawalCredential(val) {
effectiveBalanceLimit = params.BeaconConfig().MaxEffectiveBalanceElectra
}
if balance+downwardThreshold < val.EffectiveBalance || val.EffectiveBalance+upwardThreshold < balance {
effectiveBal := min(balance-balance%effBalanceInc, effectiveBalanceLimit)
val.EffectiveBalance = effectiveBal
return false, val, nil
}
return false, val, nil
}
return state.ApplyToEveryValidator(validatorFunc)
}

View File

@@ -1,144 +0,0 @@
package electra_test
import (
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestProcessEffectiveBalnceUpdates(t *testing.T) {
effBalanceInc := params.BeaconConfig().EffectiveBalanceIncrement
hysteresisInc := effBalanceInc / params.BeaconConfig().HysteresisQuotient
downwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisDownwardMultiplier
upwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisUpwardMultiplier
tests := []struct {
name string
state state.BeaconState
wantErr bool
check func(*testing.T, state.BeaconState)
}{
{
name: "validator with compounding withdrawal credentials updates effective balance",
state: func() state.BeaconState {
pb := &eth.BeaconStateElectra{
Validators: []*eth.Validator{
{
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
WithdrawalCredentials: []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte, 0x11},
},
},
Balances: []uint64{
params.BeaconConfig().MaxEffectiveBalanceElectra * 2,
},
}
st, err := state_native.InitializeFromProtoElectra(pb)
require.NoError(t, err)
return st
}(),
check: func(t *testing.T, bs state.BeaconState) {
val, err := bs.ValidatorAtIndex(0)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MaxEffectiveBalanceElectra, val.EffectiveBalance)
},
},
{
name: "validator without compounding withdrawal credentials updates effective balance",
state: func() state.BeaconState {
pb := &eth.BeaconStateElectra{
Validators: []*eth.Validator{
{
EffectiveBalance: params.BeaconConfig().MinActivationBalance / 2,
WithdrawalCredentials: nil,
},
},
Balances: []uint64{
params.BeaconConfig().MaxEffectiveBalanceElectra,
},
}
st, err := state_native.InitializeFromProtoElectra(pb)
require.NoError(t, err)
return st
}(),
check: func(t *testing.T, bs state.BeaconState) {
val, err := bs.ValidatorAtIndex(0)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance, val.EffectiveBalance)
},
},
{
name: "validator effective balance moves only when outside of threshold",
state: func() state.BeaconState {
pb := &eth.BeaconStateElectra{
Validators: []*eth.Validator{
{
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
WithdrawalCredentials: []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte, 0x11},
},
{
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
WithdrawalCredentials: []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte, 0x11},
},
{
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
WithdrawalCredentials: []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte, 0x11},
},
{
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
WithdrawalCredentials: []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte, 0x11},
},
},
Balances: []uint64{
params.BeaconConfig().MinActivationBalance - downwardThreshold - 1, // beyond downward threshold
params.BeaconConfig().MinActivationBalance - downwardThreshold + 1, // within downward threshold
params.BeaconConfig().MinActivationBalance + upwardThreshold + 1, // beyond upward threshold
params.BeaconConfig().MinActivationBalance + upwardThreshold - 1, // within upward threshold
},
}
st, err := state_native.InitializeFromProtoElectra(pb)
require.NoError(t, err)
return st
}(),
check: func(t *testing.T, bs state.BeaconState) {
// validator 0 has a balance diff exceeding the threshold so a diff should be applied to
// effective balance and it moves by effective balance increment.
val, err := bs.ValidatorAtIndex(0)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance-params.BeaconConfig().EffectiveBalanceIncrement, val.EffectiveBalance)
// validator 1 has a balance diff within the threshold so the effective balance should not
// have changed.
val, err = bs.ValidatorAtIndex(1)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance, val.EffectiveBalance)
// Validator 2 has a balance diff exceeding the threshold so a diff should be applied to the
// effective balance and it moves by effective balance increment.
val, err = bs.ValidatorAtIndex(2)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance+params.BeaconConfig().EffectiveBalanceIncrement, val.EffectiveBalance)
// Validator 3 has a balance diff within the threshold so the effective balance should not
// have changed.
val, err = bs.ValidatorAtIndex(3)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance, val.EffectiveBalance)
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := electra.ProcessEffectiveBalanceUpdates(tt.state)
require.Equal(t, tt.wantErr, err != nil, "unexpected error returned wanted error=nil (%s), got error=%s", tt.wantErr, err)
if tt.check != nil {
tt.check(t, tt.state)
}
})
}
}

View File

@@ -1,34 +0,0 @@
package electra
import (
"context"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
)
// ProcessRegistryUpdates rotates validators in and out of active pool.
// the amount to rotate is determined churn limit.
//
// Spec pseudocode definition:
//
// def process_registry_updates(state: BeaconState) -> None:
// # Process activation eligibility and ejections
// for index, validator in enumerate(state.validators):
// if is_eligible_for_activation_queue(validator):
// validator.activation_eligibility_epoch = get_current_epoch(state) + 1
//
// if (
// is_active_validator(validator, get_current_epoch(state))
// and validator.effective_balance <= EJECTION_BALANCE
// ):
// initiate_validator_exit(state, ValidatorIndex(index))
//
// # Activate all eligible validators
// activation_epoch = compute_activation_exit_epoch(get_current_epoch(state))
// for validator in state.validators:
// if is_eligible_for_activation(state, validator):
// validator.activation_epoch = activation_epoch
func ProcessRegistryUpdates(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
// TODO: replace with real implementation
return state, nil
}

Some files were not shown because too many files have changed in this diff Show More