mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
38 Commits
fast-confi
...
nested-blo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6b49be1eef | ||
|
|
1272b9e186 | ||
|
|
fcbe19445a | ||
|
|
2b4dffa87d | ||
|
|
49a6d02e12 | ||
|
|
2b06dfd4a3 | ||
|
|
6e81b4e84b | ||
|
|
0de1282e1c | ||
|
|
e3db52ca1f | ||
|
|
c5a36d4c70 | ||
|
|
e28b6695ba | ||
|
|
de177f74fb | ||
|
|
e4310aef73 | ||
|
|
d71079e1d8 | ||
|
|
c08d2f36b0 | ||
|
|
839a80e339 | ||
|
|
a35535043e | ||
|
|
323dd7b22d | ||
|
|
102128ca2e | ||
|
|
f3dd75a2c4 | ||
|
|
0869814a0e | ||
|
|
41edee9fe9 | ||
|
|
2fa3694746 | ||
|
|
e9606b3635 | ||
|
|
ed7c4bb6a7 | ||
|
|
c93fea4ec4 | ||
|
|
aa847991e0 | ||
|
|
5f1b903bdf | ||
|
|
49f3531aed | ||
|
|
9b2934f1f6 | ||
|
|
26355768a0 | ||
|
|
80bff0dc2d | ||
|
|
c312a88aa3 | ||
|
|
625818d556 | ||
|
|
2c5a2e8ec7 | ||
|
|
ae16d5f52c | ||
|
|
d69be8a766 | ||
|
|
8df62a537b |
@@ -12,8 +12,7 @@
|
||||
build:remote-cache --remote_download_minimal
|
||||
build:remote-cache --remote_build_event_upload=minimal
|
||||
build:remote-cache --remote_cache=grpc://bazel-remote-cache:9092
|
||||
# Does not work with rules_oci. See https://github.com/bazel-contrib/rules_oci/issues/292
|
||||
#build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
|
||||
build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
|
||||
build:remote-cache --remote_local_fallback
|
||||
build:remote-cache --experimental_remote_cache_async
|
||||
build:remote-cache --experimental_remote_merkle_tree_cache
|
||||
|
||||
21
MODULE.bazel.lock
generated
21
MODULE.bazel.lock
generated
@@ -1123,6 +1123,27 @@
|
||||
"recordedRepoMappingEntries": []
|
||||
}
|
||||
},
|
||||
"@@bazel_tools//tools/test:extensions.bzl%remote_coverage_tools_extension": {
|
||||
"general": {
|
||||
"bzlTransitiveDigest": "l5mcjH2gWmbmIycx97bzI2stD0Q0M5gpDc0aLOHKIm8=",
|
||||
"recordedFileInputs": {},
|
||||
"recordedDirentsInputs": {},
|
||||
"envVariables": {},
|
||||
"generatedRepoSpecs": {
|
||||
"remote_coverage_tools": {
|
||||
"bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl",
|
||||
"ruleClassName": "http_archive",
|
||||
"attributes": {
|
||||
"sha256": "7006375f6756819b7013ca875eab70a541cf7d89142d9c511ed78ea4fefa38af",
|
||||
"urls": [
|
||||
"https://mirror.bazel.build/bazel_coverage_output_generator/releases/coverage_output_generator-v2.6.zip"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"recordedRepoMappingEntries": []
|
||||
}
|
||||
},
|
||||
"@@rules_java~//java:extensions.bzl%toolchains": {
|
||||
"general": {
|
||||
"bzlTransitiveDigest": "tJHbmWnq7m+9eUBnUdv7jZziQ26FmcGL9C5/hU3Q9UQ=",
|
||||
|
||||
10
WORKSPACE
10
WORKSPACE
@@ -227,7 +227,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.5.0-alpha.0"
|
||||
consensus_spec_version = "v1.5.0-alpha.2"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -243,7 +243,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "33c5547772b6d8d6f041dff7e7d26b0358c2392daed34394a3aa81147812a81c",
|
||||
integrity = "sha256-NNXBa7SZ2sFb68HPNahgu1p0yDBpjuKJuLfRCl7vvoQ=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -259,7 +259,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "06f286199cf2fedd4700487fb8feb0904e0ae18daaa4b3f70ea430ca9c388167",
|
||||
integrity = "sha256-7BnlBvGWU92iAB100cMaAXVQhRrqpMQbavgrI+/paCw=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -275,7 +275,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "5f2a4452b323075eba6bf950003f7d91fd04ebcbde5bd087beafb5d6f6325ad4",
|
||||
integrity = "sha256-VCHhcNt+fynf/sHK11qbRBAy608u9T1qAafvAGfxQhA=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -290,7 +290,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "fd7e83e8cbeb3e297f2aeb93776305f7d606272c97834d8d9be673984501ed36",
|
||||
integrity = "sha256-a2aCNFyFkYLtf6QSwGOHdx7xXHjA2NNT8x8ZuxB0aes=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -11,6 +11,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/builder",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
@@ -28,6 +29,7 @@ go_library(
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -49,6 +51,7 @@ go_test(
|
||||
"//math:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -14,11 +13,11 @@ import (
|
||||
"text/template"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -282,133 +281,68 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
|
||||
return err
|
||||
}
|
||||
|
||||
var errResponseVersionMismatch = errors.New("builder API response uses a different version than requested in " + api.VersionHeader + " header")
|
||||
|
||||
// SubmitBlindedBlock calls the builder API endpoint that binds the validator to the builder and submits the block.
|
||||
// The response is the full execution payload used to create the blinded block.
|
||||
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
if !sb.IsBlinded() {
|
||||
return nil, nil, errNotBlinded
|
||||
}
|
||||
switch sb.Version() {
|
||||
case version.Bellatrix:
|
||||
psb, err := sb.PbBlindedBellatrixBlock()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
|
||||
}
|
||||
b, err := structs.SignedBlindedBeaconBlockBellatrixFromConsensus(ðpb.SignedBlindedBeaconBlockBellatrix{Block: psb.Block, Signature: bytesutil.SafeCopyBytes(psb.Signature)})
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not convert SignedBlindedBeaconBlockBellatrix to json marshalable type")
|
||||
}
|
||||
body, err := json.Marshal(b)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockBellatrix value body in SubmitBlindedBlock")
|
||||
}
|
||||
versionOpt := func(r *http.Request) {
|
||||
r.Header.Add("Eth-Consensus-Version", version.String(version.Bellatrix))
|
||||
r.Header.Set("Content-Type", "application/json")
|
||||
r.Header.Set("Accept", "application/json")
|
||||
}
|
||||
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockBellatrix to the builder api")
|
||||
}
|
||||
ep := &ExecPayloadResponse{}
|
||||
if err := json.Unmarshal(rb, ep); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlock response")
|
||||
}
|
||||
if strings.ToLower(ep.Version) != version.String(version.Bellatrix) {
|
||||
return nil, nil, errors.New("not a bellatrix payload")
|
||||
}
|
||||
p, err := ep.ToProto()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
|
||||
}
|
||||
payload, err := blocks.WrappedExecutionPayload(p)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
|
||||
}
|
||||
return payload, nil, nil
|
||||
case version.Capella:
|
||||
psb, err := sb.PbBlindedCapellaBlock()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
|
||||
}
|
||||
b, err := structs.SignedBlindedBeaconBlockCapellaFromConsensus(ðpb.SignedBlindedBeaconBlockCapella{Block: psb.Block, Signature: bytesutil.SafeCopyBytes(psb.Signature)})
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not convert SignedBlindedBeaconBlockCapella to json marshalable type")
|
||||
}
|
||||
body, err := json.Marshal(b)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockCapella value body in SubmitBlindedBlockCapella")
|
||||
}
|
||||
versionOpt := func(r *http.Request) {
|
||||
r.Header.Add("Eth-Consensus-Version", version.String(version.Capella))
|
||||
r.Header.Set("Content-Type", "application/json")
|
||||
r.Header.Set("Accept", "application/json")
|
||||
}
|
||||
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockCapella to the builder api")
|
||||
}
|
||||
ep := &ExecPayloadResponseCapella{}
|
||||
if err := json.Unmarshal(rb, ep); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockCapella response")
|
||||
}
|
||||
if strings.ToLower(ep.Version) != version.String(version.Capella) {
|
||||
return nil, nil, errors.New("not a capella payload")
|
||||
}
|
||||
p, err := ep.ToProto()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
|
||||
}
|
||||
payload, err := blocks.WrappedExecutionPayloadCapella(p, big.NewInt(0))
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
|
||||
}
|
||||
return payload, nil, nil
|
||||
case version.Deneb:
|
||||
psb, err := sb.PbBlindedDenebBlock()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
|
||||
}
|
||||
b, err := structs.SignedBlindedBeaconBlockDenebFromConsensus(ðpb.SignedBlindedBeaconBlockDeneb{Message: psb.Message, Signature: bytesutil.SafeCopyBytes(psb.Signature)})
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not convert SignedBlindedBeaconBlockDeneb to json marshalable type")
|
||||
}
|
||||
body, err := json.Marshal(b)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockDeneb value body in SubmitBlindedBlockDeneb")
|
||||
}
|
||||
|
||||
versionOpt := func(r *http.Request) {
|
||||
r.Header.Add("Eth-Consensus-Version", version.String(version.Deneb))
|
||||
r.Header.Set("Content-Type", "application/json")
|
||||
r.Header.Set("Accept", "application/json")
|
||||
}
|
||||
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockDeneb to the builder api")
|
||||
}
|
||||
ep := &ExecPayloadResponseDeneb{}
|
||||
if err := json.Unmarshal(rb, ep); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockDeneb response")
|
||||
}
|
||||
if strings.ToLower(ep.Version) != version.String(version.Deneb) {
|
||||
return nil, nil, errors.New("not a deneb payload")
|
||||
}
|
||||
p, blobBundle, err := ep.ToProto()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
|
||||
}
|
||||
payload, err := blocks.WrappedExecutionPayloadDeneb(p, big.NewInt(0))
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
|
||||
}
|
||||
return payload, blobBundle, nil
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("unsupported block version %s", version.String(sb.Version()))
|
||||
// massage the proto struct type data into the api response type.
|
||||
mj, err := structs.SignedBeaconBlockMessageJsoner(sb)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error generating blinded beacon block post request")
|
||||
}
|
||||
|
||||
body, err := json.Marshal(mj)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error marshaling blinded block post request to json")
|
||||
}
|
||||
postOpts := func(r *http.Request) {
|
||||
r.Header.Add("Eth-Consensus-Version", version.String(sb.Version()))
|
||||
r.Header.Set("Content-Type", api.JsonMediaType)
|
||||
r.Header.Set("Accept", api.JsonMediaType)
|
||||
}
|
||||
// post the blinded block - the execution payload response should contain the unblinded payload, along with the
|
||||
// blobs bundle if it is post deneb.
|
||||
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), postOpts)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error posting the blinded block to the builder api")
|
||||
}
|
||||
// ExecutionPayloadResponse parses just the outer container and the Value key, enabling it to use the .Value
|
||||
// key to determine which underlying data type to use to finish the unmarshaling.
|
||||
ep := &ExecutionPayloadResponse{}
|
||||
if err := json.Unmarshal(rb, ep); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error unmarshaling the builder ExecutionPayloadResponse")
|
||||
}
|
||||
if strings.ToLower(ep.Version) != version.String(sb.Version()) {
|
||||
return nil, nil, errors.Wrapf(errResponseVersionMismatch, "req=%s, recv=%s", strings.ToLower(ep.Version), version.String(sb.Version()))
|
||||
}
|
||||
// This parses the rest of the response and returns the inner data field.
|
||||
pp, err := ep.ParsePayload()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "failed to parse execution payload from builder with version=%s", ep.Version)
|
||||
}
|
||||
// Get the payload as a proto.Message so it can be wrapped as an execution payload interface.
|
||||
pb, err := pp.PayloadProto()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ed, err := blocks.NewWrappedExecutionData(pb, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
bb, ok := pp.(BlobBundler)
|
||||
if ok {
|
||||
bbpb, err := bb.BundleProto()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "failed to extract blobs bundle from builder response with version=%s", ep.Version)
|
||||
}
|
||||
return ed, bbpb, nil
|
||||
}
|
||||
return ed, nil, nil
|
||||
}
|
||||
|
||||
// Status asks the remote builder server for a health check. A response of 200 with an empty body is the success/healthy
|
||||
|
||||
@@ -432,7 +432,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
|
||||
require.NoError(t, err)
|
||||
_, _, err = c.SubmitBlindedBlock(ctx, sbbb)
|
||||
require.ErrorContains(t, "not a bellatrix payload", err)
|
||||
require.ErrorIs(t, err, errResponseVersionMismatch)
|
||||
})
|
||||
t.Run("not blinded", func(t *testing.T) {
|
||||
sbb, err := blocks.NewSignedBeaconBlock(ð.SignedBeaconBlockBellatrix{Block: ð.BeaconBlockBellatrix{Body: ð.BeaconBlockBodyBellatrix{ExecutionPayload: &v1.ExecutionPayload{}}}})
|
||||
|
||||
@@ -9,11 +9,15 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var errInvalidUint256 = errors.New("invalid Uint256")
|
||||
@@ -44,6 +48,9 @@ func sszBytesToUint256(b []byte) (Uint256, error) {
|
||||
|
||||
// SSZBytes creates an ssz-style (little-endian byte slice) representation of the Uint256.
|
||||
func (s Uint256) SSZBytes() []byte {
|
||||
if s.Int == nil {
|
||||
s.Int = big.NewInt(0)
|
||||
}
|
||||
if !math.IsValidUint256(s.Int) {
|
||||
return []byte{}
|
||||
}
|
||||
@@ -91,6 +98,9 @@ func (s Uint256) MarshalJSON() ([]byte, error) {
|
||||
|
||||
// MarshalText returns a text byte representation of Uint256.
|
||||
func (s Uint256) MarshalText() ([]byte, error) {
|
||||
if s.Int == nil {
|
||||
s.Int = big.NewInt(0)
|
||||
}
|
||||
if !math.IsValidUint256(s.Int) {
|
||||
return nil, errors.Wrapf(errInvalidUint256, "value=%s", s.Int)
|
||||
}
|
||||
@@ -265,6 +275,11 @@ func (r *ExecPayloadResponse) ToProto() (*v1.ExecutionPayload, error) {
|
||||
return r.Data.ToProto()
|
||||
}
|
||||
|
||||
func (r *ExecutionPayload) PayloadProto() (proto.Message, error) {
|
||||
pb, err := r.ToProto()
|
||||
return pb, err
|
||||
}
|
||||
|
||||
// ToProto returns a ExecutionPayload Proto
|
||||
func (p *ExecutionPayload) ToProto() (*v1.ExecutionPayload, error) {
|
||||
txs := make([][]byte, len(p.Transactions))
|
||||
@@ -396,6 +411,51 @@ func FromProtoDeneb(payload *v1.ExecutionPayloadDeneb) (ExecutionPayloadDeneb, e
|
||||
}, nil
|
||||
}
|
||||
|
||||
var errInvalidTypeConversion = errors.New("unable to translate between api and foreign type")
|
||||
|
||||
// ExecutionPayloadResponseFromData converts an ExecutionData interface value to a payload response.
|
||||
// This involves serializing the execution payload value so that the abstract payload envelope can be used.
|
||||
func ExecutionPayloadResponseFromData(ed interfaces.ExecutionData, bundle *v1.BlobsBundle) (*ExecutionPayloadResponse, error) {
|
||||
pb := ed.Proto()
|
||||
var data interface{}
|
||||
var err error
|
||||
var ver string
|
||||
switch pbStruct := pb.(type) {
|
||||
case *v1.ExecutionPayload:
|
||||
ver = version.String(version.Bellatrix)
|
||||
data, err = FromProto(pbStruct)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert a Bellatrix ExecutionPayload to an API response")
|
||||
}
|
||||
case *v1.ExecutionPayloadCapella:
|
||||
ver = version.String(version.Capella)
|
||||
data, err = FromProtoCapella(pbStruct)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert a Capella ExecutionPayload to an API response")
|
||||
}
|
||||
case *v1.ExecutionPayloadDeneb:
|
||||
ver = version.String(version.Deneb)
|
||||
payloadStruct, err := FromProtoDeneb(pbStruct)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert a Deneb ExecutionPayload to an API response")
|
||||
}
|
||||
data = &ExecutionPayloadDenebAndBlobsBundle{
|
||||
ExecutionPayload: &payloadStruct,
|
||||
BlobsBundle: FromBundleProto(bundle),
|
||||
}
|
||||
default:
|
||||
return nil, errInvalidTypeConversion
|
||||
}
|
||||
encoded, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to marshal execution payload version=%s", ver)
|
||||
}
|
||||
return &ExecutionPayloadResponse{
|
||||
Version: ver,
|
||||
Data: encoded,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ExecHeaderResponseCapella is the response of builder API /eth/v1/builder/header/{slot}/{parent_hash}/{pubkey} for Capella.
|
||||
type ExecHeaderResponseCapella struct {
|
||||
Data struct {
|
||||
@@ -523,6 +583,42 @@ type ExecPayloadResponseCapella struct {
|
||||
Data ExecutionPayloadCapella `json:"data"`
|
||||
}
|
||||
|
||||
// ExecutionPayloadResponse allows for unmarshaling just the Version field of the payload.
|
||||
// This allows it to return different ExecutionPayload types based on the version field.
|
||||
type ExecutionPayloadResponse struct {
|
||||
Version string `json:"version"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
}
|
||||
|
||||
// ParsedPayload can retrieve the underlying protobuf message for the given execution payload response.
|
||||
type ParsedPayload interface {
|
||||
PayloadProto() (proto.Message, error)
|
||||
}
|
||||
|
||||
// BlobBundler can retrieve the underlying blob bundle protobuf message for the given execution payload response.
|
||||
type BlobBundler interface {
|
||||
BundleProto() (*v1.BlobsBundle, error)
|
||||
}
|
||||
|
||||
func (r *ExecutionPayloadResponse) ParsePayload() (ParsedPayload, error) {
|
||||
var toProto ParsedPayload
|
||||
switch r.Version {
|
||||
case version.String(version.Bellatrix):
|
||||
toProto = &ExecutionPayload{}
|
||||
case version.String(version.Capella):
|
||||
toProto = &ExecutionPayloadCapella{}
|
||||
case version.String(version.Deneb):
|
||||
toProto = &ExecutionPayloadDenebAndBlobsBundle{}
|
||||
default:
|
||||
return nil, consensusblocks.ErrUnsupportedVersion
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(r.Data, toProto); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal the response .Data field with the stated version schema")
|
||||
}
|
||||
return toProto, nil
|
||||
}
|
||||
|
||||
// ExecutionPayloadCapella is a field of ExecPayloadResponseCapella.
|
||||
type ExecutionPayloadCapella struct {
|
||||
ParentHash hexutil.Bytes `json:"parent_hash"`
|
||||
@@ -547,6 +643,11 @@ func (r *ExecPayloadResponseCapella) ToProto() (*v1.ExecutionPayloadCapella, err
|
||||
return r.Data.ToProto()
|
||||
}
|
||||
|
||||
func (p *ExecutionPayloadCapella) PayloadProto() (proto.Message, error) {
|
||||
pb, err := p.ToProto()
|
||||
return pb, err
|
||||
}
|
||||
|
||||
// ToProto returns a ExecutionPayloadCapella Proto.
|
||||
func (p *ExecutionPayloadCapella) ToProto() (*v1.ExecutionPayloadCapella, error) {
|
||||
txs := make([][]byte, len(p.Transactions))
|
||||
@@ -1128,6 +1229,12 @@ func (r *ExecPayloadResponseDeneb) ToProto() (*v1.ExecutionPayloadDeneb, *v1.Blo
|
||||
if r.Data == nil {
|
||||
return nil, nil, errors.New("data field in response is empty")
|
||||
}
|
||||
if r.Data.ExecutionPayload == nil {
|
||||
return nil, nil, errors.Wrap(consensusblocks.ErrNilObject, "nil execution payload")
|
||||
}
|
||||
if r.Data.BlobsBundle == nil {
|
||||
return nil, nil, errors.Wrap(consensusblocks.ErrNilObject, "nil blobs bundle")
|
||||
}
|
||||
payload, err := r.Data.ExecutionPayload.ToProto()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -1139,8 +1246,26 @@ func (r *ExecPayloadResponseDeneb) ToProto() (*v1.ExecutionPayloadDeneb, *v1.Blo
|
||||
return payload, bundle, nil
|
||||
}
|
||||
|
||||
func (r *ExecutionPayloadDenebAndBlobsBundle) PayloadProto() (proto.Message, error) {
|
||||
if r.ExecutionPayload == nil {
|
||||
return nil, errors.Wrap(consensusblocks.ErrNilObject, "nil execution payload in combined deneb payload")
|
||||
}
|
||||
pb, err := r.ExecutionPayload.ToProto()
|
||||
return pb, err
|
||||
}
|
||||
|
||||
func (r *ExecutionPayloadDenebAndBlobsBundle) BundleProto() (*v1.BlobsBundle, error) {
|
||||
if r.BlobsBundle == nil {
|
||||
return nil, errors.Wrap(consensusblocks.ErrNilObject, "nil blobs bundle")
|
||||
}
|
||||
return r.BlobsBundle.ToProto()
|
||||
}
|
||||
|
||||
// ToProto returns the ExecutionPayloadDeneb Proto.
|
||||
func (p *ExecutionPayloadDeneb) ToProto() (*v1.ExecutionPayloadDeneb, error) {
|
||||
if p == nil {
|
||||
return nil, errors.Wrap(consensusblocks.ErrNilObject, "nil execution payload")
|
||||
}
|
||||
txs := make([][]byte, len(p.Transactions))
|
||||
for i := range p.Transactions {
|
||||
txs[i] = bytesutil.SafeCopyBytes(p.Transactions[i])
|
||||
|
||||
@@ -15,9 +15,11 @@ import (
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
@@ -1600,7 +1602,6 @@ func TestBuilderBidUnmarshalUint256(t *testing.T) {
|
||||
require.NoError(t, expectedValue.UnmarshalText([]byte(base10)))
|
||||
r := &ExecHeaderResponse{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testBuilderBid), r))
|
||||
//require.Equal(t, expectedValue, r.Data.Message.Value)
|
||||
marshaled := r.Data.Message.Value.String()
|
||||
require.Equal(t, base10, marshaled)
|
||||
require.Equal(t, 0, expectedValue.Cmp(r.Data.Message.Value.Int))
|
||||
@@ -1907,3 +1908,40 @@ func TestErrorMessage_non200Err(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptyResponseBody(t *testing.T) {
|
||||
t.Run("empty buffer", func(t *testing.T) {
|
||||
var b []byte
|
||||
r := &ExecutionPayloadResponse{}
|
||||
err := json.Unmarshal(b, r)
|
||||
_, ok := err.(*json.SyntaxError)
|
||||
require.Equal(t, true, ok)
|
||||
})
|
||||
t.Run("empty object", func(t *testing.T) {
|
||||
empty := []byte("{}")
|
||||
emptyResponse := &ExecutionPayloadResponse{}
|
||||
require.NoError(t, json.Unmarshal(empty, emptyResponse))
|
||||
_, err := emptyResponse.ParsePayload()
|
||||
require.ErrorIs(t, err, consensusblocks.ErrUnsupportedVersion)
|
||||
})
|
||||
versions := []int{version.Bellatrix, version.Capella, version.Deneb}
|
||||
for i := range versions {
|
||||
vstr := version.String(versions[i])
|
||||
t.Run("populated version without payload"+vstr, func(t *testing.T) {
|
||||
in := &ExecutionPayloadResponse{Version: vstr}
|
||||
encoded, err := json.Marshal(in)
|
||||
require.NoError(t, err)
|
||||
epr := &ExecutionPayloadResponse{}
|
||||
require.NoError(t, json.Unmarshal(encoded, epr))
|
||||
pp, err := epr.ParsePayload()
|
||||
require.NoError(t, err)
|
||||
pb, err := pp.PayloadProto()
|
||||
if err == nil {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, pb == nil)
|
||||
} else {
|
||||
require.ErrorIs(t, err, consensusblocks.ErrNilObject)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ go_library(
|
||||
"//api/server:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/validator:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
|
||||
@@ -1,10 +1,34 @@
|
||||
package structs
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
// MessageJsoner describes a signed consensus type wrapper that can return the `.Message` field in a json envelope
|
||||
// encoded as a []byte, for use as a json.RawMessage value when encoding the outer envelope.
|
||||
type MessageJsoner interface {
|
||||
MessageRawJson() ([]byte, error)
|
||||
}
|
||||
|
||||
// SignedMessageJsoner embeds MessageJsoner and adds a method to also retrieve the Signature field as a string.
|
||||
type SignedMessageJsoner interface {
|
||||
MessageJsoner
|
||||
SigString() string
|
||||
}
|
||||
|
||||
type SignedBeaconBlock struct {
|
||||
Message *BeaconBlock `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBeaconBlock{}
|
||||
|
||||
func (s *SignedBeaconBlock) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBeaconBlock) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BeaconBlock struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
@@ -29,6 +53,16 @@ type SignedBeaconBlockAltair struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBeaconBlockAltair{}
|
||||
|
||||
func (s *SignedBeaconBlockAltair) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBeaconBlockAltair) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BeaconBlockAltair struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
@@ -54,6 +88,16 @@ type SignedBeaconBlockBellatrix struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBeaconBlockBellatrix{}
|
||||
|
||||
func (s *SignedBeaconBlockBellatrix) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBeaconBlockBellatrix) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BeaconBlockBellatrix struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
@@ -80,6 +124,16 @@ type SignedBlindedBeaconBlockBellatrix struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBlindedBeaconBlockBellatrix{}
|
||||
|
||||
func (s *SignedBlindedBeaconBlockBellatrix) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBlindedBeaconBlockBellatrix) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBellatrix struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
@@ -106,6 +160,16 @@ type SignedBeaconBlockCapella struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBeaconBlockCapella{}
|
||||
|
||||
func (s *SignedBeaconBlockCapella) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBeaconBlockCapella) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BeaconBlockCapella struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
@@ -133,6 +197,16 @@ type SignedBlindedBeaconBlockCapella struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBlindedBeaconBlockCapella{}
|
||||
|
||||
func (s *SignedBlindedBeaconBlockCapella) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBlindedBeaconBlockCapella) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockCapella struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
@@ -172,6 +246,16 @@ type SignedBeaconBlockDeneb struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBeaconBlockDeneb{}
|
||||
|
||||
func (s *SignedBeaconBlockDeneb) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBeaconBlockDeneb) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BeaconBlockDeneb struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
@@ -208,6 +292,16 @@ type SignedBlindedBeaconBlockDeneb struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBlindedBeaconBlockDeneb{}
|
||||
|
||||
func (s *SignedBlindedBeaconBlockDeneb) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBlindedBeaconBlockDeneb) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBodyDeneb struct {
|
||||
RandaoReveal string `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
|
||||
@@ -6,8 +6,10 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -15,6 +17,8 @@ import (
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
var ErrUnsupportedConversion = errors.New("Could not determine api struct type to use for value")
|
||||
|
||||
func (h *SignedBeaconBlockHeader) ToConsensus() (*eth.SignedBeaconBlockHeader, error) {
|
||||
msg, err := h.Message.ToConsensus()
|
||||
if err != nil {
|
||||
@@ -1852,7 +1856,34 @@ func BeaconBlockFromConsensus(b *eth.BeaconBlock) *BeaconBlock {
|
||||
}
|
||||
}
|
||||
|
||||
func SignedBeaconBlockFromConsensus(b *eth.SignedBeaconBlock) *SignedBeaconBlock {
|
||||
func SignedBeaconBlockMessageJsoner(block interfaces.ReadOnlySignedBeaconBlock) (SignedMessageJsoner, error) {
|
||||
pb, err := block.Proto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch pbStruct := pb.(type) {
|
||||
case *eth.SignedBeaconBlock:
|
||||
return SignedBeaconBlockPhase0FromConsensus(pbStruct), nil
|
||||
case *eth.SignedBeaconBlockAltair:
|
||||
return SignedBeaconBlockAltairFromConsensus(pbStruct), nil
|
||||
case *eth.SignedBlindedBeaconBlockBellatrix:
|
||||
return SignedBlindedBeaconBlockBellatrixFromConsensus(pbStruct)
|
||||
case *eth.SignedBeaconBlockBellatrix:
|
||||
return SignedBeaconBlockBellatrixFromConsensus(pbStruct)
|
||||
case *eth.SignedBlindedBeaconBlockCapella:
|
||||
return SignedBlindedBeaconBlockCapellaFromConsensus(pbStruct)
|
||||
case *eth.SignedBeaconBlockCapella:
|
||||
return SignedBeaconBlockCapellaFromConsensus(pbStruct)
|
||||
case *eth.SignedBlindedBeaconBlockDeneb:
|
||||
return SignedBlindedBeaconBlockDenebFromConsensus(pbStruct)
|
||||
case *eth.SignedBeaconBlockDeneb:
|
||||
return SignedBeaconBlockDenebFromConsensus(pbStruct)
|
||||
default:
|
||||
return nil, ErrUnsupportedConversion
|
||||
}
|
||||
}
|
||||
|
||||
func SignedBeaconBlockPhase0FromConsensus(b *eth.SignedBeaconBlock) *SignedBeaconBlock {
|
||||
return &SignedBeaconBlock{
|
||||
Message: BeaconBlockFromConsensus(b.Block),
|
||||
Signature: hexutil.Encode(b.Signature),
|
||||
|
||||
@@ -325,7 +325,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
var attr payloadattribute.Attributer
|
||||
switch st.Version() {
|
||||
case version.Deneb:
|
||||
withdrawals, err := st.ExpectedWithdrawals()
|
||||
withdrawals, _, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||
return emptyAttri
|
||||
@@ -342,7 +342,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
return emptyAttri
|
||||
}
|
||||
case version.Capella:
|
||||
withdrawals, err := st.ExpectedWithdrawals()
|
||||
withdrawals, _, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||
return emptyAttri
|
||||
|
||||
@@ -401,7 +401,7 @@ func (s *Service) saveOrphanedOperations(ctx context.Context, orphanedRoot [32]b
|
||||
}
|
||||
for _, a := range orphanedBlk.Block().Body().Attestations() {
|
||||
// if the attestation is one epoch older, it wouldn't been useful to save it.
|
||||
if a.Data.Slot+params.BeaconConfig().SlotsPerEpoch < s.CurrentSlot() {
|
||||
if a.GetData().Slot+params.BeaconConfig().SlotsPerEpoch < s.CurrentSlot() {
|
||||
continue
|
||||
}
|
||||
if helpers.IsAggregated(a) {
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
|
||||
@@ -312,14 +313,14 @@ func TestSaveOrphanedAtts(t *testing.T) {
|
||||
|
||||
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
|
||||
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
wantAtts := []*ethpb.Attestation{
|
||||
wantAtts := []interfaces.Attestation{
|
||||
blk3.Block.Body.Attestations[0],
|
||||
blk2.Block.Body.Attestations[0],
|
||||
blk1.Block.Body.Attestations[0],
|
||||
}
|
||||
atts := service.cfg.AttPool.AggregatedAttestations()
|
||||
sort.Slice(atts, func(i, j int) bool {
|
||||
return atts[i].Data.Slot > atts[j].Data.Slot
|
||||
return atts[i].GetData().Slot > atts[j].GetData().Slot
|
||||
})
|
||||
require.DeepEqual(t, wantAtts, atts)
|
||||
}
|
||||
@@ -389,14 +390,14 @@ func TestSaveOrphanedOps(t *testing.T) {
|
||||
|
||||
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
|
||||
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
wantAtts := []*ethpb.Attestation{
|
||||
wantAtts := []interfaces.Attestation{
|
||||
blk3.Block.Body.Attestations[0],
|
||||
blk2.Block.Body.Attestations[0],
|
||||
blk1.Block.Body.Attestations[0],
|
||||
}
|
||||
atts := service.cfg.AttPool.AggregatedAttestations()
|
||||
sort.Slice(atts, func(i, j int) bool {
|
||||
return atts[i].Data.Slot > atts[j].Data.Slot
|
||||
return atts[i].GetData().Slot > atts[j].GetData().Slot
|
||||
})
|
||||
require.DeepEqual(t, wantAtts, atts)
|
||||
require.Equal(t, 1, len(service.cfg.SlashingPool.PendingProposerSlashings(ctx, st, false)))
|
||||
@@ -517,14 +518,14 @@ func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
|
||||
|
||||
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
|
||||
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
wantAtts := []*ethpb.Attestation{
|
||||
wantAtts := []interfaces.Attestation{
|
||||
blk3.Block.Body.Attestations[0],
|
||||
blk2.Block.Body.Attestations[0],
|
||||
blk1.Block.Body.Attestations[0],
|
||||
}
|
||||
atts := service.cfg.AttPool.AggregatedAttestations()
|
||||
sort.Slice(atts, func(i, j int) bool {
|
||||
return atts[i].Data.Slot > atts[j].Data.Slot
|
||||
return atts[i].GetData().Slot > atts[j].GetData().Slot
|
||||
})
|
||||
require.DeepEqual(t, wantAtts, atts)
|
||||
}
|
||||
|
||||
@@ -369,6 +369,6 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
|
||||
|
||||
func reportAttestationInclusion(blk interfaces.ReadOnlyBeaconBlock) {
|
||||
for _, att := range blk.Body().Attestations() {
|
||||
attestationInclusionDelay.Observe(float64(blk.Slot() - att.Data.Slot))
|
||||
attestationInclusionDelay.Observe(float64(blk.Slot() - att.GetData().Slot))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
@@ -36,17 +37,17 @@ import (
|
||||
//
|
||||
// # Update latest messages for attesting indices
|
||||
// update_latest_messages(store, indexed_attestation.attesting_indices, attestation)
|
||||
func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation, disparity time.Duration) error {
|
||||
func (s *Service) OnAttestation(ctx context.Context, a interfaces.Attestation, disparity time.Duration) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onAttestation")
|
||||
defer span.End()
|
||||
|
||||
if err := helpers.ValidateNilAttestation(a); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.ValidateSlotTargetEpoch(a.Data); err != nil {
|
||||
if err := helpers.ValidateSlotTargetEpoch(a.GetData()); err != nil {
|
||||
return err
|
||||
}
|
||||
tgt := ethpb.CopyCheckpoint(a.Data.Target)
|
||||
tgt := ethpb.CopyCheckpoint(a.GetData().Target)
|
||||
|
||||
// Note that target root check is ignored here because it was performed in sync's validation pipeline:
|
||||
// validate_aggregate_proof.go and validate_beacon_attestation.go
|
||||
@@ -67,7 +68,7 @@ func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation, dispa
|
||||
}
|
||||
|
||||
// Verify attestation beacon block is known and not from the future.
|
||||
if err := s.verifyBeaconBlock(ctx, a.Data); err != nil {
|
||||
if err := s.verifyBeaconBlock(ctx, a.GetData()); err != nil {
|
||||
return errors.Wrap(err, "could not verify attestation beacon block")
|
||||
}
|
||||
|
||||
@@ -75,12 +76,12 @@ func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation, dispa
|
||||
// validate_aggregate_proof.go and validate_beacon_attestation.go
|
||||
|
||||
// Verify attestations can only affect the fork choice of subsequent slots.
|
||||
if err := slots.VerifyTime(genesisTime, a.Data.Slot+1, disparity); err != nil {
|
||||
if err := slots.VerifyTime(genesisTime, a.GetData().Slot+1, disparity); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Use the target state to verify attesting indices are valid.
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, baseState, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, baseState, a.GetData().Slot, a.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -97,7 +98,7 @@ func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation, dispa
|
||||
// We assume trusted attestation in this function has verified signature.
|
||||
|
||||
// Update forkchoice store with the new attestation for updating weight.
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indexedAtt.AttestingIndices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indexedAtt.GetAttestingIndices(), bytesutil.ToBytes32(a.GetData().BeaconBlockRoot), a.GetData().Target.Epoch)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -73,7 +74,7 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
a *ethpb.Attestation
|
||||
a interfaces.Attestation
|
||||
wantedErr string
|
||||
}{
|
||||
{
|
||||
|
||||
@@ -366,17 +366,17 @@ func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot,
|
||||
func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, st state.BeaconState) error {
|
||||
// Feed in block's attestations to fork choice store.
|
||||
for _, a := range blk.Body().Attestations() {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, st, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, st, a.GetData().Slot, a.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indices, err := attestation.AttestingIndices(a.AggregationBits, committee)
|
||||
indices, err := attestation.AttestingIndices(a, committee)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r := bytesutil.ToBytes32(a.Data.BeaconBlockRoot)
|
||||
r := bytesutil.ToBytes32(a.GetData().BeaconBlockRoot)
|
||||
if s.cfg.ForkChoiceStore.HasNode(r) {
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, r, a.Data.Target.Epoch)
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, r, a.GetData().Target.Epoch)
|
||||
} else if err := s.cfg.AttPool.SaveBlockAttestation(a); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -387,7 +387,7 @@ func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.Re
|
||||
// InsertSlashingsToForkChoiceStore inserts attester slashing indices to fork choice store.
|
||||
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
|
||||
// This function requires a write lock on forkchoice.
|
||||
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []*ethpb.AttesterSlashing) {
|
||||
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []interfaces.AttesterSlashing) {
|
||||
for _, slashing := range slashings {
|
||||
indices := blocks.SlashableAttesterIndices(slashing)
|
||||
for _, index := range indices {
|
||||
|
||||
@@ -824,7 +824,10 @@ func TestRemoveBlockAttestationsInPool(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: r[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, r))
|
||||
|
||||
atts := b.Block.Body.Attestations
|
||||
atts := make([]interfaces.Attestation, len(b.Block.Body.Attestations))
|
||||
for i, a := range b.Block.Body.Attestations {
|
||||
atts[i] = a
|
||||
}
|
||||
require.NoError(t, service.cfg.AttPool.SaveAggregatedAttestations(atts))
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
@@ -2010,12 +2013,12 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a := wsb.Block().Body().Attestations()[0]
|
||||
r := bytesutil.ToBytes32(a.Data.BeaconBlockRoot)
|
||||
r := bytesutil.ToBytes32(a.GetData().BeaconBlockRoot)
|
||||
require.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r))
|
||||
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a3 := wsb3.Block().Body().Attestations()[0]
|
||||
r3 := bytesutil.ToBytes32(a3.Data.BeaconBlockRoot)
|
||||
r3 := bytesutil.ToBytes32(a3.GetData().BeaconBlockRoot)
|
||||
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
|
||||
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committee as st
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -31,7 +32,7 @@ type AttestationStateFetcher interface {
|
||||
// AttestationReceiver interface defines the methods of chain service receive and processing new attestations.
|
||||
type AttestationReceiver interface {
|
||||
AttestationStateFetcher
|
||||
VerifyLmdFfgConsistency(ctx context.Context, att *ethpb.Attestation) error
|
||||
VerifyLmdFfgConsistency(ctx context.Context, att interfaces.Attestation) error
|
||||
InForkchoice([32]byte) bool
|
||||
}
|
||||
|
||||
@@ -51,13 +52,13 @@ func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Chec
|
||||
}
|
||||
|
||||
// VerifyLmdFfgConsistency verifies that attestation's LMD and FFG votes are consistency to each other.
|
||||
func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestation) error {
|
||||
r, err := s.TargetRootForEpoch([32]byte(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
|
||||
func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a interfaces.Attestation) error {
|
||||
r, err := s.TargetRootForEpoch([32]byte(a.GetData().BeaconBlockRoot), a.GetData().Target.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(a.Data.Target.Root, r[:]) {
|
||||
return fmt.Errorf("FFG and LMD votes are not consistent, block root: %#x, target root: %#x, canonical target root: %#x", a.Data.BeaconBlockRoot, a.Data.Target.Root, r)
|
||||
if !bytes.Equal(a.GetData().Target.Root, r[:]) {
|
||||
return fmt.Errorf("FFG and LMD votes are not consistent, block root: %#x, target root: %#x, canonical target root: %#x", a.GetData().BeaconBlockRoot, a.GetData().Target.Root, r)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -170,13 +171,13 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
|
||||
// Based on the spec, don't process the attestation until the subsequent slot.
|
||||
// This delays consideration in the fork choice until their slot is in the past.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation
|
||||
nextSlot := a.Data.Slot + 1
|
||||
nextSlot := a.GetData().Slot + 1
|
||||
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), nextSlot, disparity); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
hasState := s.cfg.BeaconDB.HasStateSummary(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
|
||||
hasBlock := s.hasBlock(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
|
||||
hasState := s.cfg.BeaconDB.HasStateSummary(ctx, bytesutil.ToBytes32(a.GetData().BeaconBlockRoot))
|
||||
hasBlock := s.hasBlock(ctx, bytesutil.ToBytes32(a.GetData().BeaconBlockRoot))
|
||||
if !(hasState && hasBlock) {
|
||||
continue
|
||||
}
|
||||
@@ -185,17 +186,17 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
|
||||
log.WithError(err).Error("Could not delete fork choice attestation in pool")
|
||||
}
|
||||
|
||||
if !helpers.VerifyCheckpointEpoch(a.Data.Target, s.genesisTime) {
|
||||
if !helpers.VerifyCheckpointEpoch(a.GetData().Target, s.genesisTime) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.receiveAttestationNoPubsub(ctx, a, disparity); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": a.Data.Slot,
|
||||
"committeeIndex": a.Data.CommitteeIndex,
|
||||
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.Data.BeaconBlockRoot)),
|
||||
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.Data.Target.Root)),
|
||||
"aggregationCount": a.AggregationBits.Count(),
|
||||
"slot": a.GetData().Slot,
|
||||
"committeeIndex": a.GetData().CommitteeIndex,
|
||||
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().BeaconBlockRoot)),
|
||||
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().Target.Root)),
|
||||
"aggregationCount": a.GetAggregationBits().Count(),
|
||||
}).WithError(err).Warn("Could not process attestation for fork choice")
|
||||
}
|
||||
}
|
||||
@@ -206,7 +207,7 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
|
||||
// 1. Validate attestation, update validator's latest vote
|
||||
// 2. Apply fork choice to the processed attestation
|
||||
// 3. Save latest head info
|
||||
func (s *Service) receiveAttestationNoPubsub(ctx context.Context, att *ethpb.Attestation, disparity time.Duration) error {
|
||||
func (s *Service) receiveAttestationNoPubsub(ctx context.Context, att interfaces.Attestation, disparity time.Duration) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.receiveAttestationNoPubsub")
|
||||
defer span.End()
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -83,7 +84,11 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
|
||||
attsToSave := make([]interfaces.Attestation, len(atts))
|
||||
for i, a := range atts {
|
||||
attsToSave[i] = a
|
||||
}
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(attsToSave))
|
||||
service.processAttestations(ctx, 0)
|
||||
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations()))
|
||||
require.LogsDoNotContain(t, hook, "Could not process attestation for fork choice")
|
||||
@@ -121,7 +126,11 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
// Generate attestations for this block in Slot 1
|
||||
atts, err := util.GenerateAttestations(copied, pks, 1, 1, false)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
|
||||
attsToSave := make([]interfaces.Attestation, len(atts))
|
||||
for i, a := range atts {
|
||||
attsToSave[i] = a
|
||||
}
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(attsToSave))
|
||||
// Verify the target is in forkchoice
|
||||
require.Equal(t, true, fcs.HasNode(bytesutil.ToBytes32(atts[0].Data.BeaconBlockRoot)))
|
||||
require.Equal(t, tRoot, bytesutil.ToBytes32(atts[0].Data.BeaconBlockRoot))
|
||||
|
||||
@@ -52,7 +52,7 @@ type BlobReceiver interface {
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
type SlashingReceiver interface {
|
||||
ReceiveAttesterSlashing(ctx context.Context, slashings *ethpb.AttesterSlashing)
|
||||
ReceiveAttesterSlashing(ctx context.Context, slashing interfaces.AttesterSlashing)
|
||||
}
|
||||
|
||||
// ReceiveBlock is a function that defines the operations (minus pubsub)
|
||||
@@ -295,10 +295,10 @@ func (s *Service) HasBlock(ctx context.Context, root [32]byte) bool {
|
||||
}
|
||||
|
||||
// ReceiveAttesterSlashing receives an attester slashing and inserts it to forkchoice
|
||||
func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing *ethpb.AttesterSlashing) {
|
||||
func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing interfaces.AttesterSlashing) {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, []*ethpb.AttesterSlashing{slashing})
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, []interfaces.AttesterSlashing{slashing})
|
||||
}
|
||||
|
||||
// prunePostBlockOperationPools only runs on new head otherwise should return a nil.
|
||||
@@ -479,7 +479,7 @@ func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySigne
|
||||
// is done in the background to avoid adding more load to this critical code path.
|
||||
ctx := context.TODO()
|
||||
for _, att := range signed.Block().Body().Attestations() {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get attestation committee")
|
||||
return
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -49,7 +50,7 @@ func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Attestation) error {
|
||||
func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ interfaces.Attestation) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -414,8 +414,8 @@ func (*ChainService) HeadGenesisValidatorsRoot() [32]byte {
|
||||
}
|
||||
|
||||
// VerifyLmdFfgConsistency mocks VerifyLmdFfgConsistency and always returns nil.
|
||||
func (*ChainService) VerifyLmdFfgConsistency(_ context.Context, a *ethpb.Attestation) error {
|
||||
if !bytes.Equal(a.Data.BeaconBlockRoot, a.Data.Target.Root) {
|
||||
func (*ChainService) VerifyLmdFfgConsistency(_ context.Context, a interfaces.Attestation) error {
|
||||
if !bytes.Equal(a.GetData().BeaconBlockRoot, a.GetData().Target.Root) {
|
||||
return errors.New("LMD and FFG miss matched")
|
||||
}
|
||||
return nil
|
||||
@@ -495,7 +495,7 @@ func (s *ChainService) UpdateHead(ctx context.Context, slot primitives.Slot) {
|
||||
}
|
||||
|
||||
// ReceiveAttesterSlashing mocks the same method in the chain service.
|
||||
func (*ChainService) ReceiveAttesterSlashing(context.Context, *ethpb.AttesterSlashing) {}
|
||||
func (*ChainService) ReceiveAttesterSlashing(context.Context, interfaces.AttesterSlashing) {}
|
||||
|
||||
// IsFinalized mocks the same method in the chain service.
|
||||
func (s *ChainService) IsFinalized(_ context.Context, blockRoot [32]byte) bool {
|
||||
|
||||
@@ -48,7 +48,7 @@ func ProcessAttestationsNoVerifySignature(
|
||||
func ProcessAttestationNoVerifySignature(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
att *ethpb.Attestation,
|
||||
att interfaces.Attestation,
|
||||
totalBalance uint64,
|
||||
) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "altair.ProcessAttestationNoVerifySignature")
|
||||
@@ -58,24 +58,24 @@ func ProcessAttestationNoVerifySignature(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
delay, err := beaconState.Slot().SafeSubSlot(att.Data.Slot)
|
||||
delay, err := beaconState.Slot().SafeSubSlot(att.GetData().Slot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("att slot %d can't be greater than state slot %d", att.Data.Slot, beaconState.Slot())
|
||||
return nil, fmt.Errorf("att slot %d can't be greater than state slot %d", att.GetData().Slot, beaconState.Slot())
|
||||
}
|
||||
participatedFlags, err := AttestationParticipationFlagIndices(beaconState, att.Data, delay)
|
||||
participatedFlags, err := AttestationParticipationFlagIndices(beaconState, att.GetData(), delay)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indices, err := attestation.AttestingIndices(att.AggregationBits, committee)
|
||||
indices, err := attestation.AttestingIndices(att, committee)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return SetParticipationAndRewardProposer(ctx, beaconState, att.Data.Target.Epoch, indices, participatedFlags, totalBalance)
|
||||
return SetParticipationAndRewardProposer(ctx, beaconState, att.GetData().Target.Epoch, indices, participatedFlags, totalBalance)
|
||||
}
|
||||
|
||||
// SetParticipationAndRewardProposer retrieves and sets the epoch participation bits in state. Based on the epoch participation, it rewards
|
||||
|
||||
@@ -215,7 +215,7 @@ func TestProcessAttestations_OK(t *testing.T) {
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
require.NoError(t, err)
|
||||
attestingIndices, err := attestation.AttestingIndices(att.AggregationBits, committee)
|
||||
attestingIndices, err := attestation.AttestingIndices(att, committee)
|
||||
require.NoError(t, err)
|
||||
sigs := make([]bls.Signature, len(attestingIndices))
|
||||
for i, indice := range attestingIndices {
|
||||
@@ -273,7 +273,7 @@ func TestProcessAttestationNoVerify_SourceTargetHead(t *testing.T) {
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
require.NoError(t, err)
|
||||
indices, err := attestation.AttestingIndices(att.AggregationBits, committee)
|
||||
indices, err := attestation.AttestingIndices(att, committee)
|
||||
require.NoError(t, err)
|
||||
for _, index := range indices {
|
||||
has, err := altair.HasValidatorFlag(p[index], params.BeaconConfig().TimelyHeadFlagIndex)
|
||||
|
||||
@@ -158,7 +158,7 @@ func TranslateParticipation(ctx context.Context, state state.BeaconState, atts [
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indices, err := attestation.AttestingIndices(att.AggregationBits, committee)
|
||||
indices, err := attestation.AttestingIndices(att, committee)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -55,7 +55,7 @@ func TestTranslateParticipation(t *testing.T) {
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, s, pendingAtts[0].Data.Slot, pendingAtts[0].Data.CommitteeIndex)
|
||||
require.NoError(t, err)
|
||||
indices, err := attestation.AttestingIndices(pendingAtts[0].AggregationBits, committee)
|
||||
indices, err := attestation.AttestingIndices(pendingAtts[0], committee)
|
||||
require.NoError(t, err)
|
||||
for _, index := range indices {
|
||||
has, err := altair.HasValidatorFlag(participation[index], params.BeaconConfig().TimelySourceFlagIndex)
|
||||
|
||||
@@ -46,7 +46,7 @@ func ProcessAttestationsNoVerifySignature(
|
||||
func VerifyAttestationNoVerifySignature(
|
||||
ctx context.Context,
|
||||
beaconState state.ReadOnlyBeaconState,
|
||||
att *ethpb.Attestation,
|
||||
att interfaces.Attestation,
|
||||
) error {
|
||||
ctx, span := trace.StartSpan(ctx, "core.VerifyAttestationNoVerifySignature")
|
||||
defer span.End()
|
||||
@@ -56,7 +56,7 @@ func VerifyAttestationNoVerifySignature(
|
||||
}
|
||||
currEpoch := time.CurrentEpoch(beaconState)
|
||||
prevEpoch := time.PrevEpoch(beaconState)
|
||||
data := att.Data
|
||||
data := att.GetData()
|
||||
if data.Target.Epoch != prevEpoch && data.Target.Epoch != currEpoch {
|
||||
return fmt.Errorf(
|
||||
"expected target epoch (%d) to be the previous epoch (%d) or the current epoch (%d)",
|
||||
@@ -76,11 +76,11 @@ func VerifyAttestationNoVerifySignature(
|
||||
}
|
||||
}
|
||||
|
||||
if err := helpers.ValidateSlotTargetEpoch(att.Data); err != nil {
|
||||
if err := helpers.ValidateSlotTargetEpoch(att.GetData()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s := att.Data.Slot
|
||||
s := att.GetData().Slot
|
||||
minInclusionCheck := s+params.BeaconConfig().MinAttestationInclusionDelay <= beaconState.Slot()
|
||||
if !minInclusionCheck {
|
||||
return fmt.Errorf(
|
||||
@@ -102,27 +102,58 @@ func VerifyAttestationNoVerifySignature(
|
||||
)
|
||||
}
|
||||
}
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, beaconState, att.Data.Target.Epoch)
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, beaconState, att.GetData().Target.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c := helpers.SlotCommitteeCount(activeValidatorCount)
|
||||
if uint64(att.Data.CommitteeIndex) >= c {
|
||||
return fmt.Errorf("committee index %d >= committee count %d", att.Data.CommitteeIndex, c)
|
||||
}
|
||||
|
||||
if err := helpers.VerifyAttestationBitfieldLengths(ctx, beaconState, att); err != nil {
|
||||
return errors.Wrap(err, "could not verify attestation bitfields")
|
||||
}
|
||||
var indexedAtt ethpb.IndexedAtt
|
||||
|
||||
// Verify attesting indices are correct.
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committee)
|
||||
if err != nil {
|
||||
return err
|
||||
if att.Version() < version.Electra {
|
||||
if uint64(att.GetData().CommitteeIndex) >= c {
|
||||
return fmt.Errorf("committee index %d >= committee count %d", att.GetData().CommitteeIndex, c)
|
||||
}
|
||||
|
||||
if err = helpers.VerifyAttestationBitfieldLengths(ctx, beaconState, att); err != nil {
|
||||
return errors.Wrap(err, "could not verify attestation bitfields")
|
||||
}
|
||||
|
||||
// Verify attesting indices are correct.
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indexedAtt, err = attestation.ConvertToIndexed(ctx, att, committee)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if att.GetData().CommitteeIndex != 0 {
|
||||
return errors.New("committee index must be 0 post-Electra")
|
||||
}
|
||||
|
||||
committeeIndices := att.GetCommitteeBitsVal().BitIndices()
|
||||
committees := make([][]primitives.ValidatorIndex, len(committeeIndices))
|
||||
participantsCount := 0
|
||||
var err error
|
||||
for i, ci := range committeeIndices {
|
||||
if uint64(ci) >= c {
|
||||
return fmt.Errorf("committee index %d >= committee count %d", ci, c)
|
||||
}
|
||||
committees[i], err = helpers.BeaconCommitteeFromState(ctx, beaconState, att.GetData().Slot, primitives.CommitteeIndex(ci))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
participantsCount += len(committees[i])
|
||||
}
|
||||
if att.GetAggregationBits().Len() != uint64(participantsCount) {
|
||||
return fmt.Errorf("aggregation bits count %d is different than participant count %d", att.GetAggregationBits().Len(), participantsCount)
|
||||
}
|
||||
indexedAtt, err = attestation.ConvertToIndexed(ctx, att, committees...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return attestation.IsValidAttestationIndices(ctx, indexedAtt)
|
||||
@@ -133,7 +164,7 @@ func VerifyAttestationNoVerifySignature(
|
||||
func ProcessAttestationNoVerifySignature(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
att *ethpb.Attestation,
|
||||
att interfaces.Attestation,
|
||||
) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.ProcessAttestationNoVerifySignature")
|
||||
defer span.End()
|
||||
@@ -143,15 +174,15 @@ func ProcessAttestationNoVerifySignature(
|
||||
}
|
||||
|
||||
currEpoch := time.CurrentEpoch(beaconState)
|
||||
data := att.Data
|
||||
s := att.Data.Slot
|
||||
data := att.GetData()
|
||||
s := att.GetData().Slot
|
||||
proposerIndex, err := helpers.BeaconProposerIndex(ctx, beaconState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pendingAtt := ðpb.PendingAttestation{
|
||||
Data: data,
|
||||
AggregationBits: att.AggregationBits,
|
||||
AggregationBits: att.GetAggregationBits(),
|
||||
InclusionDelay: beaconState.Slot() - s,
|
||||
ProposerIndex: proposerIndex,
|
||||
}
|
||||
@@ -171,11 +202,11 @@ func ProcessAttestationNoVerifySignature(
|
||||
|
||||
// VerifyAttestationSignature converts and attestation into an indexed attestation and verifies
|
||||
// the signature in that attestation.
|
||||
func VerifyAttestationSignature(ctx context.Context, beaconState state.ReadOnlyBeaconState, att *ethpb.Attestation) error {
|
||||
func VerifyAttestationSignature(ctx context.Context, beaconState state.ReadOnlyBeaconState, att interfaces.Attestation) error {
|
||||
if err := helpers.ValidateNilAttestation(att); err != nil {
|
||||
return err
|
||||
}
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -203,7 +234,7 @@ func VerifyAttestationSignature(ctx context.Context, beaconState state.ReadOnlyB
|
||||
// domain = get_domain(state, DOMAIN_BEACON_ATTESTER, indexed_attestation.data.target.epoch)
|
||||
// signing_root = compute_signing_root(indexed_attestation.data, domain)
|
||||
// return bls.FastAggregateVerify(pubkeys, signing_root, indexed_attestation.signature)
|
||||
func VerifyIndexedAttestation(ctx context.Context, beaconState state.ReadOnlyBeaconState, indexedAtt *ethpb.IndexedAttestation) error {
|
||||
func VerifyIndexedAttestation(ctx context.Context, beaconState state.ReadOnlyBeaconState, indexedAtt ethpb.IndexedAtt) error {
|
||||
ctx, span := trace.StartSpan(ctx, "core.VerifyIndexedAttestation")
|
||||
defer span.End()
|
||||
|
||||
@@ -212,14 +243,14 @@ func VerifyIndexedAttestation(ctx context.Context, beaconState state.ReadOnlyBea
|
||||
}
|
||||
domain, err := signing.Domain(
|
||||
beaconState.Fork(),
|
||||
indexedAtt.Data.Target.Epoch,
|
||||
indexedAtt.GetData().Target.Epoch,
|
||||
params.BeaconConfig().DomainBeaconAttester,
|
||||
beaconState.GenesisValidatorsRoot(),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indices := indexedAtt.AttestingIndices
|
||||
indices := indexedAtt.GetAttestingIndices()
|
||||
var pubkeys []bls.PublicKey
|
||||
for i := 0; i < len(indices); i++ {
|
||||
pubkeyAtIdx := beaconState.PubkeyAtIndex(primitives.ValidatorIndex(indices[i]))
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -44,7 +45,7 @@ func TestProcessAggregatedAttestation_OverlappingBits(t *testing.T) {
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att1.Data.Slot, att1.Data.CommitteeIndex)
|
||||
require.NoError(t, err)
|
||||
attestingIndices1, err := attestation.AttestingIndices(att1.AggregationBits, committee)
|
||||
attestingIndices1, err := attestation.AttestingIndices(att1, committee)
|
||||
require.NoError(t, err)
|
||||
sigs := make([]bls.Signature, len(attestingIndices1))
|
||||
for i, indice := range attestingIndices1 {
|
||||
@@ -66,7 +67,7 @@ func TestProcessAggregatedAttestation_OverlappingBits(t *testing.T) {
|
||||
|
||||
committee, err = helpers.BeaconCommitteeFromState(context.Background(), beaconState, att2.Data.Slot, att2.Data.CommitteeIndex)
|
||||
require.NoError(t, err)
|
||||
attestingIndices2, err := attestation.AttestingIndices(att2.AggregationBits, committee)
|
||||
attestingIndices2, err := attestation.AttestingIndices(att2, committee)
|
||||
require.NoError(t, err)
|
||||
sigs = make([]bls.Signature, len(attestingIndices2))
|
||||
for i, indice := range attestingIndices2 {
|
||||
@@ -221,6 +222,83 @@ func TestVerifyAttestationNoVerifySignature_BadAttIdx(t *testing.T) {
|
||||
require.ErrorContains(t, "committee index 100 >= committee count 1", err)
|
||||
}
|
||||
|
||||
func TestVerifyAttestationNoVerifySignature_Electra(t *testing.T) {
|
||||
var mockRoot [32]byte
|
||||
copy(mockRoot[:], "hello-world")
|
||||
var zeroSig [fieldparams.BLSSignatureLength]byte
|
||||
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 100)
|
||||
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
|
||||
require.NoError(t, err)
|
||||
ckp := beaconState.CurrentJustifiedCheckpoint()
|
||||
copy(ckp.Root, "hello-world")
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(ckp))
|
||||
require.NoError(t, beaconState.AppendCurrentEpochAttestations(ðpb.PendingAttestation{}))
|
||||
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
aggBits := bitfield.NewBitlist(3)
|
||||
aggBits.SetBitAt(1, true)
|
||||
committeeBits := bitfield.NewBitvector64()
|
||||
committeeBits.SetBitAt(0, true)
|
||||
att := ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
CommitteeBits: committeeBits,
|
||||
}
|
||||
att.Signature = zeroSig[:]
|
||||
err = blocks.VerifyAttestationNoVerifySignature(context.TODO(), beaconState, att)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("non-zero committee index", func(t *testing.T) {
|
||||
att := ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
AggregationBits: bitfield.NewBitlist(1),
|
||||
CommitteeBits: bitfield.NewBitvector64(),
|
||||
}
|
||||
err = blocks.VerifyAttestationNoVerifySignature(context.TODO(), beaconState, att)
|
||||
assert.ErrorContains(t, "committee index must be 0 post-Electra", err)
|
||||
})
|
||||
t.Run("index of committee too big", func(t *testing.T) {
|
||||
aggBits := bitfield.NewBitlist(3)
|
||||
committeeBits := bitfield.NewBitvector64()
|
||||
committeeBits.SetBitAt(63, true)
|
||||
att := ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
CommitteeBits: committeeBits,
|
||||
}
|
||||
att.Signature = zeroSig[:]
|
||||
err = blocks.VerifyAttestationNoVerifySignature(context.TODO(), beaconState, att)
|
||||
assert.ErrorContains(t, "committee index 63 >= committee count 1", err)
|
||||
})
|
||||
t.Run("wrong aggregation bits count", func(t *testing.T) {
|
||||
aggBits := bitfield.NewBitlist(123)
|
||||
committeeBits := bitfield.NewBitvector64()
|
||||
committeeBits.SetBitAt(0, true)
|
||||
att := ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
CommitteeBits: committeeBits,
|
||||
}
|
||||
att.Signature = zeroSig[:]
|
||||
err = blocks.VerifyAttestationNoVerifySignature(context.TODO(), beaconState, att)
|
||||
assert.ErrorContains(t, "aggregation bits count 123 is different than participant count 3", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestConvertToIndexed_OK(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
validators := make([]*ethpb.Validator, 2*params.BeaconConfig().SlotsPerEpoch)
|
||||
@@ -386,7 +464,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
|
||||
|
||||
sig := keys[0].Sign([]byte{'t', 'e', 's', 't'})
|
||||
list := bitfield.Bitlist{0b11111}
|
||||
var atts []*ethpb.Attestation
|
||||
var atts []interfaces.Attestation
|
||||
for i := uint64(0); i < 1000; i++ {
|
||||
atts = append(atts, ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
@@ -402,7 +480,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
|
||||
_, err := blocks.AttestationSignatureBatch(context.Background(), beaconState, atts)
|
||||
assert.ErrorContains(t, want, err)
|
||||
|
||||
atts = []*ethpb.Attestation{}
|
||||
atts = []interfaces.Attestation{}
|
||||
list = bitfield.Bitlist{0b10000}
|
||||
for i := uint64(0); i < 1000; i++ {
|
||||
atts = append(atts, ðpb.Attestation{
|
||||
@@ -543,7 +621,7 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing
|
||||
}
|
||||
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
set, err := blocks.AttestationSignatureBatch(ctx, st, []*ethpb.Attestation{att1, att2})
|
||||
set, err := blocks.AttestationSignatureBatch(ctx, st, []interfaces.Attestation{att1, att2})
|
||||
require.NoError(t, err)
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
@@ -607,6 +685,6 @@ func TestRetrieveAttestationSignatureSet_AcrossFork(t *testing.T) {
|
||||
}
|
||||
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
_, err = blocks.AttestationSignatureBatch(ctx, st, []*ethpb.Attestation{att1, att2})
|
||||
_, err = blocks.AttestationSignatureBatch(ctx, st, []interfaces.Attestation{att1, att2})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -7,13 +7,12 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/slashings"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
@@ -40,7 +39,7 @@ import (
|
||||
func ProcessAttesterSlashings(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
slashings []*ethpb.AttesterSlashing,
|
||||
slashings []interfaces.AttesterSlashing,
|
||||
slashFunc slashValidatorFunc,
|
||||
) (state.BeaconState, error) {
|
||||
var err error
|
||||
@@ -57,7 +56,7 @@ func ProcessAttesterSlashings(
|
||||
func ProcessAttesterSlashing(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
slashing *ethpb.AttesterSlashing,
|
||||
slashing interfaces.AttesterSlashing,
|
||||
slashFunc slashValidatorFunc,
|
||||
) (state.BeaconState, error) {
|
||||
if err := VerifyAttesterSlashing(ctx, beaconState, slashing); err != nil {
|
||||
@@ -77,19 +76,7 @@ func ProcessAttesterSlashing(
|
||||
return nil, err
|
||||
}
|
||||
if helpers.IsSlashableValidator(val.ActivationEpoch(), val.WithdrawableEpoch(), val.Slashed(), currentEpoch) {
|
||||
cfg := params.BeaconConfig()
|
||||
var slashingQuotient uint64
|
||||
switch {
|
||||
case beaconState.Version() == version.Phase0:
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotient
|
||||
case beaconState.Version() == version.Altair:
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotientAltair
|
||||
case beaconState.Version() >= version.Bellatrix:
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotientBellatrix
|
||||
default:
|
||||
return nil, errors.New("unknown state version")
|
||||
}
|
||||
beaconState, err = slashFunc(ctx, beaconState, primitives.ValidatorIndex(validatorIndex), slashingQuotient, cfg.ProposerRewardQuotient)
|
||||
beaconState, err = slashFunc(ctx, beaconState, primitives.ValidatorIndex(validatorIndex))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not slash validator index %d",
|
||||
validatorIndex)
|
||||
@@ -104,20 +91,20 @@ func ProcessAttesterSlashing(
|
||||
}
|
||||
|
||||
// VerifyAttesterSlashing validates the attestation data in both attestations in the slashing object.
|
||||
func VerifyAttesterSlashing(ctx context.Context, beaconState state.ReadOnlyBeaconState, slashing *ethpb.AttesterSlashing) error {
|
||||
func VerifyAttesterSlashing(ctx context.Context, beaconState state.ReadOnlyBeaconState, slashing interfaces.AttesterSlashing) error {
|
||||
if slashing == nil {
|
||||
return errors.New("nil slashing")
|
||||
}
|
||||
if slashing.Attestation_1 == nil || slashing.Attestation_2 == nil {
|
||||
if slashing.GetFirstAttestation() == nil || slashing.GetSecondAttestation() == nil {
|
||||
return errors.New("nil attestation")
|
||||
}
|
||||
if slashing.Attestation_1.Data == nil || slashing.Attestation_2.Data == nil {
|
||||
if slashing.GetFirstAttestation().GetData() == nil || slashing.GetSecondAttestation().GetData() == nil {
|
||||
return errors.New("nil attestation data")
|
||||
}
|
||||
att1 := slashing.Attestation_1
|
||||
att2 := slashing.Attestation_2
|
||||
data1 := att1.Data
|
||||
data2 := att2.Data
|
||||
att1 := slashing.GetFirstAttestation()
|
||||
att2 := slashing.GetSecondAttestation()
|
||||
data1 := att1.GetData()
|
||||
data2 := att2.GetData()
|
||||
if !IsSlashableAttestationData(data1, data2) {
|
||||
return errors.New("attestations are not slashable")
|
||||
}
|
||||
@@ -157,11 +144,11 @@ func IsSlashableAttestationData(data1, data2 *ethpb.AttestationData) bool {
|
||||
}
|
||||
|
||||
// SlashableAttesterIndices returns the intersection of attester indices from both attestations in this slashing.
|
||||
func SlashableAttesterIndices(slashing *ethpb.AttesterSlashing) []uint64 {
|
||||
if slashing == nil || slashing.Attestation_1 == nil || slashing.Attestation_2 == nil {
|
||||
func SlashableAttesterIndices(slashing interfaces.AttesterSlashing) []uint64 {
|
||||
if slashing == nil || slashing.GetFirstAttestation() == nil || slashing.GetSecondAttestation() == nil {
|
||||
return nil
|
||||
}
|
||||
indices1 := slashing.Attestation_1.AttestingIndices
|
||||
indices2 := slashing.Attestation_2.AttestingIndices
|
||||
indices1 := slashing.GetFirstAttestation().GetAttestingIndices()
|
||||
indices2 := slashing.GetSecondAttestation().GetAttestingIndices()
|
||||
return slice.IntersectionUint64(indices1, indices2)
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
v "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -57,7 +58,11 @@ func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
|
||||
AttesterSlashings: slashings,
|
||||
},
|
||||
}
|
||||
_, err = blocks.ProcessAttesterSlashings(context.Background(), beaconState, b.Block.Body.AttesterSlashings, v.SlashValidator)
|
||||
ss := make([]interfaces.AttesterSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
_, err = blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
|
||||
assert.ErrorContains(t, "attestations are not slashable", err)
|
||||
}
|
||||
|
||||
@@ -92,7 +97,11 @@ func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T)
|
||||
},
|
||||
}
|
||||
|
||||
_, err = blocks.ProcessAttesterSlashings(context.Background(), beaconState, b.Block.Body.AttesterSlashings, v.SlashValidator)
|
||||
ss := make([]interfaces.AttesterSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
_, err = blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
|
||||
assert.ErrorContains(t, "validator indices count exceeds MAX_VALIDATORS_PER_COMMITTEE", err)
|
||||
}
|
||||
|
||||
@@ -144,7 +153,11 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, b.Block.Body.AttesterSlashings, v.SlashValidator)
|
||||
ss := make([]interfaces.AttesterSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
|
||||
require.NoError(t, err)
|
||||
newRegistry := newState.Validators()
|
||||
|
||||
@@ -213,7 +226,11 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusAltair(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, b.Block.Body.AttesterSlashings, v.SlashValidator)
|
||||
ss := make([]interfaces.AttesterSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
|
||||
require.NoError(t, err)
|
||||
newRegistry := newState.Validators()
|
||||
|
||||
@@ -282,7 +299,11 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusBellatrix(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, b.Block.Body.AttesterSlashings, v.SlashValidator)
|
||||
ss := make([]interfaces.AttesterSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
|
||||
require.NoError(t, err)
|
||||
newRegistry := newState.Validators()
|
||||
|
||||
@@ -351,7 +372,11 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusCapella(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, b.Block.Body.AttesterSlashings, v.SlashValidator)
|
||||
ss := make([]interfaces.AttesterSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
|
||||
require.NoError(t, err)
|
||||
newRegistry := newState.Validators()
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
@@ -216,7 +217,7 @@ func TestFuzzProcessAttesterSlashings_10000(t *testing.T) {
|
||||
fuzzer.Fuzz(a)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessAttesterSlashings(ctx, s, []*ethpb.AttesterSlashing{a}, v.SlashValidator)
|
||||
r, err := ProcessAttesterSlashings(ctx, s, []interfaces.AttesterSlashing{a}, v.SlashValidator)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and slashing: %v", r, err, state, a)
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
v "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -91,7 +92,11 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, b.Block.Body.AttesterSlashings, v.SlashValidator)
|
||||
ss := make([]interfaces.AttesterSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
|
||||
require.NoError(t, err)
|
||||
newRegistry := newState.Validators()
|
||||
if !newRegistry[expectedSlashedVal].Slashed {
|
||||
|
||||
@@ -135,8 +135,6 @@ func TestProcessVoluntaryExits_AppliesCorrectStatus(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestVerifyExitAndSignature(t *testing.T) {
|
||||
undo := util.HackDenebMaxuint(t)
|
||||
defer undo()
|
||||
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
require.NoError(t, err)
|
||||
tests := []struct {
|
||||
|
||||
@@ -12,12 +12,14 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type slashValidatorFunc func(ctx context.Context, st state.BeaconState, vid primitives.ValidatorIndex, penaltyQuotient, proposerRewardQuotient uint64) (state.BeaconState, error)
|
||||
type slashValidatorFunc func(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
vid primitives.ValidatorIndex) (state.BeaconState, error)
|
||||
|
||||
// ProcessProposerSlashings is one of the operations performed
|
||||
// on each processed beacon block to slash proposers based on
|
||||
@@ -75,19 +77,7 @@ func ProcessProposerSlashing(
|
||||
if err = VerifyProposerSlashing(beaconState, slashing); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify proposer slashing")
|
||||
}
|
||||
cfg := params.BeaconConfig()
|
||||
var slashingQuotient uint64
|
||||
switch {
|
||||
case beaconState.Version() == version.Phase0:
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotient
|
||||
case beaconState.Version() == version.Altair:
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotientAltair
|
||||
case beaconState.Version() >= version.Bellatrix:
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotientBellatrix
|
||||
default:
|
||||
return nil, errors.New("unknown state version")
|
||||
}
|
||||
beaconState, err = slashFunc(ctx, beaconState, slashing.Header_1.Header.ProposerIndex, slashingQuotient, cfg.ProposerRewardQuotient)
|
||||
beaconState, err = slashFunc(ctx, beaconState, slashing.Header_1.Header.ProposerIndex)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex)
|
||||
}
|
||||
|
||||
@@ -179,7 +179,7 @@ func randaoSigningData(ctx context.Context, beaconState state.ReadOnlyBeaconStat
|
||||
func createAttestationSignatureBatch(
|
||||
ctx context.Context,
|
||||
beaconState state.ReadOnlyBeaconState,
|
||||
atts []*ethpb.Attestation,
|
||||
atts []interfaces.Attestation,
|
||||
domain []byte,
|
||||
) (*bls.SignatureBatch, error) {
|
||||
if len(atts) == 0 {
|
||||
@@ -191,8 +191,8 @@ func createAttestationSignatureBatch(
|
||||
msgs := make([][32]byte, len(atts))
|
||||
descs := make([]string, len(atts))
|
||||
for i, a := range atts {
|
||||
sigs[i] = a.Signature
|
||||
c, err := helpers.BeaconCommitteeFromState(ctx, beaconState, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
sigs[i] = a.GetSignature()
|
||||
c, err := helpers.BeaconCommitteeFromState(ctx, beaconState, a.GetData().Slot, a.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -203,7 +203,7 @@ func createAttestationSignatureBatch(
|
||||
if err := attestation.IsValidAttestationIndices(ctx, ia); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indices := ia.AttestingIndices
|
||||
indices := ia.GetAttestingIndices()
|
||||
pubkeys := make([][]byte, len(indices))
|
||||
for i := 0; i < len(indices); i++ {
|
||||
pubkeyAtIdx := beaconState.PubkeyAtIndex(primitives.ValidatorIndex(indices[i]))
|
||||
@@ -215,7 +215,7 @@ func createAttestationSignatureBatch(
|
||||
}
|
||||
pks[i] = aggP
|
||||
|
||||
root, err := signing.ComputeSigningRoot(ia.Data, domain)
|
||||
root, err := signing.ComputeSigningRoot(ia.GetData(), domain)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get signing root of object")
|
||||
}
|
||||
@@ -233,7 +233,7 @@ func createAttestationSignatureBatch(
|
||||
|
||||
// AttestationSignatureBatch retrieves all the related attestation signature data such as the relevant public keys,
|
||||
// signatures and attestation signing data and collate it into a signature batch object.
|
||||
func AttestationSignatureBatch(ctx context.Context, beaconState state.ReadOnlyBeaconState, atts []*ethpb.Attestation) (*bls.SignatureBatch, error) {
|
||||
func AttestationSignatureBatch(ctx context.Context, beaconState state.ReadOnlyBeaconState, atts []interfaces.Attestation) (*bls.SignatureBatch, error) {
|
||||
if len(atts) == 0 {
|
||||
return bls.NewSet(), nil
|
||||
}
|
||||
@@ -243,10 +243,10 @@ func AttestationSignatureBatch(ctx context.Context, beaconState state.ReadOnlyBe
|
||||
dt := params.BeaconConfig().DomainBeaconAttester
|
||||
|
||||
// Split attestations by fork. Note: the signature domain will differ based on the fork.
|
||||
var preForkAtts []*ethpb.Attestation
|
||||
var postForkAtts []*ethpb.Attestation
|
||||
var preForkAtts []interfaces.Attestation
|
||||
var postForkAtts []interfaces.Attestation
|
||||
for _, a := range atts {
|
||||
if slots.ToEpoch(a.Data.Slot) < fork.Epoch {
|
||||
if slots.ToEpoch(a.GetData().Slot) < fork.Epoch {
|
||||
preForkAtts = append(preForkAtts, a)
|
||||
} else {
|
||||
postForkAtts = append(postForkAtts, a)
|
||||
|
||||
@@ -145,7 +145,7 @@ func ValidateBLSToExecutionChange(st state.ReadOnlyBeaconState, signed *ethpb.Si
|
||||
// next_validator_index = ValidatorIndex(next_index % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
func ProcessWithdrawals(st state.BeaconState, executionData interfaces.ExecutionData) (state.BeaconState, error) {
|
||||
expectedWithdrawals, err := st.ExpectedWithdrawals()
|
||||
expectedWithdrawals, _, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get expected withdrawals")
|
||||
}
|
||||
|
||||
66
beacon-chain/core/electra/BUILD.bazel
Normal file
66
beacon-chain/core/electra/BUILD.bazel
Normal file
@@ -0,0 +1,66 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"balance_deposits.go",
|
||||
"churn.go",
|
||||
"consolidations.go",
|
||||
"transition.go",
|
||||
"upgrade.go",
|
||||
"validator.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"balance_deposits_test.go",
|
||||
"churn_test.go",
|
||||
"consolidations_test.go",
|
||||
"upgrade_test.go",
|
||||
"validator_test.go",
|
||||
],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls/blst:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/interop:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
],
|
||||
)
|
||||
77
beacon-chain/core/electra/balance_deposits.go
Normal file
77
beacon-chain/core/electra/balance_deposits.go
Normal file
@@ -0,0 +1,77 @@
|
||||
package electra
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ProcessPendingBalanceDeposits implements the spec definition below. This method mutates the state.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def process_pending_balance_deposits(state: BeaconState) -> None:
|
||||
// available_for_processing = state.deposit_balance_to_consume + get_activation_exit_churn_limit(state)
|
||||
// processed_amount = 0
|
||||
// next_deposit_index = 0
|
||||
//
|
||||
// for deposit in state.pending_balance_deposits:
|
||||
// if processed_amount + deposit.amount > available_for_processing:
|
||||
// break
|
||||
// increase_balance(state, deposit.index, deposit.amount)
|
||||
// processed_amount += deposit.amount
|
||||
// next_deposit_index += 1
|
||||
//
|
||||
// state.pending_balance_deposits = state.pending_balance_deposits[next_deposit_index:]
|
||||
//
|
||||
// if len(state.pending_balance_deposits) == 0:
|
||||
// state.deposit_balance_to_consume = Gwei(0)
|
||||
// else:
|
||||
// state.deposit_balance_to_consume = available_for_processing - processed_amount
|
||||
func ProcessPendingBalanceDeposits(ctx context.Context, st state.BeaconState, activeBalance math.Gwei) error {
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessPendingBalanceDeposits")
|
||||
defer span.End()
|
||||
|
||||
if st == nil || st.IsNil() {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
|
||||
depBalToConsume, err := st.DepositBalanceToConsume()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
availableForProcessing := depBalToConsume + helpers.ActivationExitChurnLimit(activeBalance)
|
||||
nextDepositIndex := 0
|
||||
|
||||
deposits, err := st.PendingBalanceDeposits()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, deposit := range deposits {
|
||||
if math.Gwei(deposit.Amount) > availableForProcessing {
|
||||
break
|
||||
}
|
||||
if err := helpers.IncreaseBalance(st, deposit.Index, deposit.Amount); err != nil {
|
||||
return err
|
||||
}
|
||||
availableForProcessing -= math.Gwei(deposit.Amount)
|
||||
nextDepositIndex++
|
||||
}
|
||||
|
||||
deposits = deposits[nextDepositIndex:]
|
||||
if err := st.SetPendingBalanceDeposits(deposits); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(deposits) == 0 {
|
||||
return st.SetDepositBalanceToConsume(0)
|
||||
} else {
|
||||
return st.SetDepositBalanceToConsume(availableForProcessing)
|
||||
}
|
||||
}
|
||||
129
beacon-chain/core/electra/balance_deposits_test.go
Normal file
129
beacon-chain/core/electra/balance_deposits_test.go
Normal file
@@ -0,0 +1,129 @@
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
state state.BeaconState
|
||||
wantErr bool
|
||||
check func(*testing.T, state.BeaconState)
|
||||
}{
|
||||
{
|
||||
name: "nil state fails",
|
||||
state: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no deposits resets balance to consume",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 1_000)
|
||||
require.NoError(t, st.SetDepositBalanceToConsume(100))
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
res, err := st.DepositBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, math.Gwei(0), res)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "more deposits than balance to consume processes partial deposits",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 1_000)
|
||||
require.NoError(t, st.SetDepositBalanceToConsume(100))
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
deps := make([]*eth.PendingBalanceDeposit, 20)
|
||||
for i := 0; i < len(deps); i += 1 {
|
||||
deps[i] = ð.PendingBalanceDeposit{
|
||||
Amount: uint64(amountAvailForProcessing) / 10,
|
||||
Index: primitives.ValidatorIndex(i),
|
||||
}
|
||||
}
|
||||
require.NoError(t, st.SetPendingBalanceDeposits(deps))
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
res, err := st.DepositBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, math.Gwei(100), res)
|
||||
// Validators 0..9 should have their balance increased
|
||||
for i := primitives.ValidatorIndex(0); i < 10; i++ {
|
||||
b, err := st.BalanceAtIndex(i)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(amountAvailForProcessing)/10, b)
|
||||
}
|
||||
|
||||
// Half of the balance deposits should have been processed.
|
||||
remaining, err := st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 10, len(remaining))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "less deposits than balance to consume processes all deposits",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 1_000)
|
||||
require.NoError(t, st.SetDepositBalanceToConsume(0))
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
deps := make([]*eth.PendingBalanceDeposit, 5)
|
||||
for i := 0; i < len(deps); i += 1 {
|
||||
deps[i] = ð.PendingBalanceDeposit{
|
||||
Amount: uint64(amountAvailForProcessing) / 5,
|
||||
Index: primitives.ValidatorIndex(i),
|
||||
}
|
||||
}
|
||||
require.NoError(t, st.SetPendingBalanceDeposits(deps))
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
res, err := st.DepositBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, math.Gwei(0), res)
|
||||
// Validators 0..4 should have their balance increased
|
||||
for i := primitives.ValidatorIndex(0); i < 4; i++ {
|
||||
b, err := st.BalanceAtIndex(i)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(amountAvailForProcessing)/5, b)
|
||||
}
|
||||
|
||||
// All of the balance deposits should have been processed.
|
||||
remaining, err := st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(remaining))
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var tab uint64
|
||||
var err error
|
||||
if tt.state != nil {
|
||||
// The caller of this method would normally have the precompute balance values for total
|
||||
// active balance for this epoch. For ease of test setup, we will compute total active
|
||||
// balance from the given state.
|
||||
tab, err = helpers.TotalActiveBalance(tt.state)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
err = electra.ProcessPendingBalanceDeposits(context.TODO(), tt.state, math.Gwei(tab))
|
||||
require.Equal(t, tt.wantErr, err != nil, "wantErr=%v, got err=%s", tt.wantErr, err)
|
||||
if tt.check != nil {
|
||||
tt.check(t, tt.state)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
85
beacon-chain/core/electra/churn.go
Normal file
85
beacon-chain/core/electra/churn.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package electra
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
// ComputeConsolidationEpochAndUpdateChurn fulfills the consensus spec definition below. This method
|
||||
// calls mutating methods to the beacon state.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def compute_consolidation_epoch_and_update_churn(state: BeaconState, consolidation_balance: Gwei) -> Epoch:
|
||||
// earliest_consolidation_epoch = max(
|
||||
// state.earliest_consolidation_epoch, compute_activation_exit_epoch(get_current_epoch(state)))
|
||||
// per_epoch_consolidation_churn = get_consolidation_churn_limit(state)
|
||||
// # New epoch for consolidations.
|
||||
// if state.earliest_consolidation_epoch < earliest_consolidation_epoch:
|
||||
// consolidation_balance_to_consume = per_epoch_consolidation_churn
|
||||
// else:
|
||||
// consolidation_balance_to_consume = state.consolidation_balance_to_consume
|
||||
//
|
||||
// # Consolidation doesn't fit in the current earliest epoch.
|
||||
// if consolidation_balance > consolidation_balance_to_consume:
|
||||
// balance_to_process = consolidation_balance - consolidation_balance_to_consume
|
||||
// additional_epochs = (balance_to_process - 1) // per_epoch_consolidation_churn + 1
|
||||
// earliest_consolidation_epoch += additional_epochs
|
||||
// consolidation_balance_to_consume += additional_epochs * per_epoch_consolidation_churn
|
||||
//
|
||||
// # Consume the balance and update state variables.
|
||||
// state.consolidation_balance_to_consume = consolidation_balance_to_consume - consolidation_balance
|
||||
// state.earliest_consolidation_epoch = earliest_consolidation_epoch
|
||||
//
|
||||
// return state.earliest_consolidation_epoch
|
||||
func ComputeConsolidationEpochAndUpdateChurn(ctx context.Context, s state.BeaconState, consolidationBalance math.Gwei) (primitives.Epoch, error) {
|
||||
earliestEpoch, err := s.EarliestConsolidationEpoch()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
earliestConsolidationEpoch := max(earliestEpoch, helpers.ActivationExitEpoch(slots.ToEpoch(s.Slot())))
|
||||
activeBal, err := helpers.TotalActiveBalance(s)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
perEpochConsolidationChurn := helpers.ConsolidationChurnLimit(math.Gwei(activeBal))
|
||||
|
||||
// New epoch for consolidations.
|
||||
var consolidationBalanceToConsume math.Gwei
|
||||
if earliestEpoch < earliestConsolidationEpoch {
|
||||
consolidationBalanceToConsume = perEpochConsolidationChurn
|
||||
} else {
|
||||
consolidationBalanceToConsume, err = s.ConsolidationBalanceToConsume()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
// Consolidation doesn't fit in the current earliest epoch.
|
||||
if consolidationBalance > consolidationBalanceToConsume {
|
||||
balanceToProcess := consolidationBalance - consolidationBalanceToConsume
|
||||
// additional_epochs = (balance_to_process - 1) // per_epoch_consolidation_churn + 1
|
||||
additionalEpochs, err := math.Div64(uint64(balanceToProcess-1), uint64(perEpochConsolidationChurn))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
additionalEpochs++
|
||||
earliestConsolidationEpoch += primitives.Epoch(additionalEpochs)
|
||||
consolidationBalanceToConsume += math.Gwei(additionalEpochs) * perEpochConsolidationChurn
|
||||
}
|
||||
|
||||
// Consume the balance and update state variables.
|
||||
if err := s.SetConsolidationBalanceToConsume(consolidationBalanceToConsume - consolidationBalance); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := s.SetEarliestConsolidationEpoch(earliestConsolidationEpoch); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return earliestConsolidationEpoch, nil
|
||||
}
|
||||
142
beacon-chain/core/electra/churn_test.go
Normal file
142
beacon-chain/core/electra/churn_test.go
Normal file
@@ -0,0 +1,142 @@
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func createValidatorsWithTotalActiveBalance(totalBal math.Gwei) []*eth.Validator {
|
||||
num := totalBal / math.Gwei(params.BeaconConfig().MinActivationBalance)
|
||||
vals := make([]*eth.Validator, num)
|
||||
for i := range vals {
|
||||
vals[i] = ð.Validator{
|
||||
ActivationEpoch: primitives.Epoch(0),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
}
|
||||
}
|
||||
if totalBal%math.Gwei(params.BeaconConfig().MinActivationBalance) != 0 {
|
||||
vals = append(vals, ð.Validator{
|
||||
ActivationEpoch: primitives.Epoch(0),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: uint64(totalBal) % params.BeaconConfig().MinActivationBalance,
|
||||
})
|
||||
}
|
||||
return vals
|
||||
}
|
||||
|
||||
func TestComputeConsolidationEpochAndUpdateChurn(t *testing.T) {
|
||||
// Test setup: create a state with 32M ETH total active balance.
|
||||
// In this state, the churn is expected to be 232 ETH per epoch.
|
||||
tests := []struct {
|
||||
name string
|
||||
state state.BeaconState
|
||||
consolidationBalance math.Gwei
|
||||
expectedEpoch primitives.Epoch
|
||||
expectedConsolidationBalanceToConsume math.Gwei
|
||||
}{
|
||||
{
|
||||
name: "compute consolidation with no consolidation balance",
|
||||
state: func(t *testing.T) state.BeaconState {
|
||||
s, err := state_native.InitializeFromProtoUnsafeElectra(ð.BeaconStateElectra{
|
||||
Slot: slots.UnsafeEpochStart(10),
|
||||
EarliestConsolidationEpoch: 9,
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(t),
|
||||
consolidationBalance: 0, // 0 ETH
|
||||
expectedEpoch: 15, // current epoch + 1 + MaxSeedLookahead
|
||||
expectedConsolidationBalanceToConsume: 232000000000, // 232 ETH
|
||||
},
|
||||
{
|
||||
name: "new epoch for consolidations",
|
||||
state: func(t *testing.T) state.BeaconState {
|
||||
s, err := state_native.InitializeFromProtoUnsafeElectra(ð.BeaconStateElectra{
|
||||
Slot: slots.UnsafeEpochStart(10),
|
||||
EarliestConsolidationEpoch: 9,
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(t),
|
||||
consolidationBalance: 32000000000, // 32 ETH
|
||||
expectedEpoch: 15, // current epoch + 1 + MaxSeedLookahead
|
||||
expectedConsolidationBalanceToConsume: 200000000000, // 200 ETH
|
||||
},
|
||||
{
|
||||
name: "flows into another epoch",
|
||||
state: func(t *testing.T) state.BeaconState {
|
||||
s, err := state_native.InitializeFromProtoUnsafeElectra(ð.BeaconStateElectra{
|
||||
Slot: slots.UnsafeEpochStart(10),
|
||||
EarliestConsolidationEpoch: 9,
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(t),
|
||||
consolidationBalance: 235000000000, // 235 ETH
|
||||
expectedEpoch: 16, // Flows into another epoch.
|
||||
expectedConsolidationBalanceToConsume: 229000000000, // 229 ETH
|
||||
},
|
||||
{
|
||||
name: "not a new epoch, fits in remaining balance of current epoch",
|
||||
state: func(t *testing.T) state.BeaconState {
|
||||
s, err := state_native.InitializeFromProtoUnsafeElectra(ð.BeaconStateElectra{
|
||||
Slot: slots.UnsafeEpochStart(10),
|
||||
EarliestConsolidationEpoch: 15,
|
||||
ConsolidationBalanceToConsume: 200000000000, // 200 ETH
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(t),
|
||||
consolidationBalance: 32000000000, // 32 ETH
|
||||
expectedEpoch: 15, // Fits into current earliest consolidation epoch.
|
||||
expectedConsolidationBalanceToConsume: 168000000000, // 126 ETH
|
||||
},
|
||||
{
|
||||
name: "not a new epoch, fits in remaining balance of current epoch",
|
||||
state: func(t *testing.T) state.BeaconState {
|
||||
s, err := state_native.InitializeFromProtoUnsafeElectra(ð.BeaconStateElectra{
|
||||
Slot: slots.UnsafeEpochStart(10),
|
||||
EarliestConsolidationEpoch: 15,
|
||||
ConsolidationBalanceToConsume: 200000000000, // 200 ETH
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(t),
|
||||
consolidationBalance: 232000000000, // 232 ETH
|
||||
expectedEpoch: 16, // Flows into another epoch.
|
||||
expectedConsolidationBalanceToConsume: 200000000000, // 200 ETH
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotEpoch, err := electra.ComputeConsolidationEpochAndUpdateChurn(context.TODO(), tt.state, tt.consolidationBalance)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expectedEpoch, gotEpoch)
|
||||
// Check consolidation balance to consume is set on the state.
|
||||
cbtc, err := tt.state.ConsolidationBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expectedConsolidationBalanceToConsume, cbtc)
|
||||
// Check earliest consolidation epoch was set on the state.
|
||||
gotEpoch, err = tt.state.EarliestConsolidationEpoch()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expectedEpoch, gotEpoch)
|
||||
})
|
||||
}
|
||||
}
|
||||
258
beacon-chain/core/electra/consolidations.go
Normal file
258
beacon-chain/core/electra/consolidations.go
Normal file
@@ -0,0 +1,258 @@
|
||||
package electra
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ProcessPendingConsolidations implements the spec definition below. This method makes mutating
|
||||
// calls to the beacon state.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def process_pending_consolidations(state: BeaconState) -> None:
|
||||
// next_pending_consolidation = 0
|
||||
// for pending_consolidation in state.pending_consolidations:
|
||||
// source_validator = state.validators[pending_consolidation.source_index]
|
||||
// if source_validator.slashed:
|
||||
// next_pending_consolidation += 1
|
||||
// continue
|
||||
// if source_validator.withdrawable_epoch > get_current_epoch(state):
|
||||
// break
|
||||
//
|
||||
// # Churn any target excess active balance of target and raise its max
|
||||
// switch_to_compounding_validator(state, pending_consolidation.target_index)
|
||||
// # Move active balance to target. Excess balance is withdrawable.
|
||||
// active_balance = get_active_balance(state, pending_consolidation.source_index)
|
||||
// decrease_balance(state, pending_consolidation.source_index, active_balance)
|
||||
// increase_balance(state, pending_consolidation.target_index, active_balance)
|
||||
// next_pending_consolidation += 1
|
||||
//
|
||||
// state.pending_consolidations = state.pending_consolidations[next_pending_consolidation:]
|
||||
func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) error {
|
||||
ctx, span := trace.StartSpan(ctx, "electra.ProcessPendingConsolidations")
|
||||
defer span.End()
|
||||
|
||||
if st == nil || st.IsNil() {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
|
||||
currentEpoch := slots.ToEpoch(st.Slot())
|
||||
|
||||
var nextPendingConsolidation uint64
|
||||
pendingConsolidations, err := st.PendingConsolidations()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, pc := range pendingConsolidations {
|
||||
sourceValidator, err := st.ValidatorAtIndex(pc.SourceIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if sourceValidator.Slashed {
|
||||
nextPendingConsolidation++
|
||||
continue
|
||||
}
|
||||
if sourceValidator.WithdrawableEpoch > currentEpoch {
|
||||
break
|
||||
}
|
||||
|
||||
if err := SwitchToCompoundingValidator(ctx, st, pc.TargetIndex); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
activeBalance, err := st.ActiveBalanceAtIndex(pc.SourceIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.DecreaseBalance(st, pc.SourceIndex, activeBalance); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.IncreaseBalance(st, pc.TargetIndex, activeBalance); err != nil {
|
||||
return err
|
||||
}
|
||||
nextPendingConsolidation++
|
||||
}
|
||||
|
||||
if nextPendingConsolidation > 0 {
|
||||
return st.SetPendingConsolidations(pendingConsolidations[nextPendingConsolidation:])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessConsolidations implements the spec definition below. This method makes mutating calls to
|
||||
// the beacon state.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def process_consolidation(state: BeaconState, signed_consolidation: SignedConsolidation) -> None:
|
||||
// # If the pending consolidations queue is full, no consolidations are allowed in the block
|
||||
// assert len(state.pending_consolidations) < PENDING_CONSOLIDATIONS_LIMIT
|
||||
// # If there is too little available consolidation churn limit, no consolidations are allowed in the block
|
||||
// assert get_consolidation_churn_limit(state) > MIN_ACTIVATION_BALANCE
|
||||
// consolidation = signed_consolidation.message
|
||||
// # Verify that source != target, so a consolidation cannot be used as an exit.
|
||||
// assert consolidation.source_index != consolidation.target_index
|
||||
//
|
||||
// source_validator = state.validators[consolidation.source_index]
|
||||
// target_validator = state.validators[consolidation.target_index]
|
||||
// # Verify the source and the target are active
|
||||
// current_epoch = get_current_epoch(state)
|
||||
// assert is_active_validator(source_validator, current_epoch)
|
||||
// assert is_active_validator(target_validator, current_epoch)
|
||||
// # Verify exits for source and target have not been initiated
|
||||
// assert source_validator.exit_epoch == FAR_FUTURE_EPOCH
|
||||
// assert target_validator.exit_epoch == FAR_FUTURE_EPOCH
|
||||
// # Consolidations must specify an epoch when they become valid; they are not valid before then
|
||||
// assert current_epoch >= consolidation.epoch
|
||||
//
|
||||
// # Verify the source and the target have Execution layer withdrawal credentials
|
||||
// assert has_execution_withdrawal_credential(source_validator)
|
||||
// assert has_execution_withdrawal_credential(target_validator)
|
||||
// # Verify the same withdrawal address
|
||||
// assert source_validator.withdrawal_credentials[12:] == target_validator.withdrawal_credentials[12:]
|
||||
//
|
||||
// # Verify consolidation is signed by the source and the target
|
||||
// domain = compute_domain(DOMAIN_CONSOLIDATION, genesis_validators_root=state.genesis_validators_root)
|
||||
// signing_root = compute_signing_root(consolidation, domain)
|
||||
// pubkeys = [source_validator.pubkey, target_validator.pubkey]
|
||||
// assert bls.FastAggregateVerify(pubkeys, signing_root, signed_consolidation.signature)
|
||||
//
|
||||
// # Initiate source validator exit and append pending consolidation
|
||||
// source_validator.exit_epoch = compute_consolidation_epoch_and_update_churn(
|
||||
// state, source_validator.effective_balance)
|
||||
// source_validator.withdrawable_epoch = Epoch(
|
||||
// source_validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
// )
|
||||
// state.pending_consolidations.append(PendingConsolidation(
|
||||
// source_index=consolidation.source_index,
|
||||
// target_index=consolidation.target_index
|
||||
// ))
|
||||
func ProcessConsolidations(ctx context.Context, st state.BeaconState, cs []*ethpb.SignedConsolidation) error {
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessConsolidations")
|
||||
defer span.End()
|
||||
|
||||
if st == nil || st.IsNil() {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
|
||||
if len(cs) == 0 {
|
||||
return nil // Nothing to process.
|
||||
}
|
||||
|
||||
domain, err := signing.ComputeDomain(
|
||||
params.BeaconConfig().DomainConsolidation,
|
||||
nil, // Use genesis fork version
|
||||
st.GenesisValidatorsRoot(),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
totalBalance, err := helpers.TotalActiveBalance(st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if helpers.ConsolidationChurnLimit(math.Gwei(totalBalance)) <= math.Gwei(params.BeaconConfig().MinActivationBalance) {
|
||||
return errors.New("too little available consolidation churn limit")
|
||||
}
|
||||
|
||||
currentEpoch := slots.ToEpoch(st.Slot())
|
||||
|
||||
for _, c := range cs {
|
||||
if c == nil || c.Message == nil {
|
||||
return errors.New("nil consolidation")
|
||||
}
|
||||
|
||||
if n, err := st.NumPendingConsolidations(); err != nil {
|
||||
return err
|
||||
} else if n >= params.BeaconConfig().PendingConsolidationsLimit {
|
||||
return errors.New("pending consolidations queue is full")
|
||||
}
|
||||
|
||||
if c.Message.SourceIndex == c.Message.TargetIndex {
|
||||
return errors.New("source and target index are the same")
|
||||
}
|
||||
source, err := st.ValidatorAtIndex(c.Message.SourceIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
target, err := st.ValidatorAtIndex(c.Message.TargetIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !helpers.IsActiveValidator(source, currentEpoch) {
|
||||
return errors.New("source is not active")
|
||||
}
|
||||
if !helpers.IsActiveValidator(target, currentEpoch) {
|
||||
return errors.New("target is not active")
|
||||
}
|
||||
if source.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
return errors.New("source exit epoch has been initiated")
|
||||
}
|
||||
if target.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
return errors.New("target exit epoch has been initiated")
|
||||
}
|
||||
if currentEpoch < c.Message.Epoch {
|
||||
return errors.New("consolidation is not valid yet")
|
||||
}
|
||||
|
||||
if !helpers.HasExecutionWithdrawalCredentials(source) {
|
||||
return errors.New("source does not have execution withdrawal credentials")
|
||||
}
|
||||
if !helpers.HasExecutionWithdrawalCredentials(target) {
|
||||
return errors.New("target does not have execution withdrawal credentials")
|
||||
}
|
||||
if !helpers.IsSameWithdrawalCredentials(source, target) {
|
||||
return errors.New("source and target have different withdrawal credentials")
|
||||
}
|
||||
|
||||
sr, err := signing.ComputeSigningRoot(c.Message, domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sourcePk, err := bls.PublicKeyFromBytes(source.PublicKey)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not convert source public key bytes to bls public key")
|
||||
}
|
||||
targetPk, err := bls.PublicKeyFromBytes(target.PublicKey)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not convert target public key bytes to bls public key")
|
||||
}
|
||||
sig, err := bls.SignatureFromBytes(c.Signature)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not convert bytes to signature")
|
||||
}
|
||||
if !sig.FastAggregateVerify([]bls.PublicKey{sourcePk, targetPk}, sr) {
|
||||
return errors.New("consolidation signature verification failed")
|
||||
}
|
||||
|
||||
sEE, err := ComputeConsolidationEpochAndUpdateChurn(ctx, st, math.Gwei(source.EffectiveBalance))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
source.ExitEpoch = sEE
|
||||
source.WithdrawableEpoch = sEE + params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||
if err := st.UpdateValidatorAtIndex(c.Message.SourceIndex, source); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := st.AppendPendingConsolidation(c.Message.ToPendingConsolidation()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
441
beacon-chain/core/electra/consolidations_test.go
Normal file
441
beacon-chain/core/electra/consolidations_test.go
Normal file
@@ -0,0 +1,441 @@
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls/blst"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls/common"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/interop"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestProcessPendingConsolidations(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
state state.BeaconState
|
||||
check func(*testing.T, state.BeaconState)
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "nil state",
|
||||
state: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no pending consolidations",
|
||||
state: func() state.BeaconState {
|
||||
pb := ð.BeaconStateElectra{}
|
||||
|
||||
st, err := state_native.InitializeFromProtoUnsafeElectra(pb)
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}(),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "processes pending consolidation successfully",
|
||||
state: func() state.BeaconState {
|
||||
pb := ð.BeaconStateElectra{
|
||||
Validators: []*eth.Validator{
|
||||
{
|
||||
WithdrawalCredentials: []byte{0x01, 0xFF},
|
||||
},
|
||||
{
|
||||
WithdrawalCredentials: []byte{0x01, 0xAB},
|
||||
},
|
||||
},
|
||||
Balances: []uint64{
|
||||
params.BeaconConfig().MinActivationBalance,
|
||||
params.BeaconConfig().MinActivationBalance,
|
||||
},
|
||||
PendingConsolidations: []*eth.PendingConsolidation{
|
||||
{
|
||||
SourceIndex: 0,
|
||||
TargetIndex: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
st, err := state_native.InitializeFromProtoUnsafeElectra(pb)
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
// Balances are transferred from v0 to v1.
|
||||
bal0, err := st.BalanceAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), bal0)
|
||||
bal1, err := st.BalanceAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2*params.BeaconConfig().MinActivationBalance, bal1)
|
||||
|
||||
// The pending consolidation is removed from the list.
|
||||
num, err := st.NumPendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), num)
|
||||
|
||||
// v1 is switched to compounding validator.
|
||||
v1, err := st.ValidatorAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().CompoundingWithdrawalPrefixByte, v1.WithdrawalCredentials[0])
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "stop processing when a source val withdrawable epoch is in the future",
|
||||
state: func() state.BeaconState {
|
||||
pb := ð.BeaconStateElectra{
|
||||
Validators: []*eth.Validator{
|
||||
{
|
||||
WithdrawalCredentials: []byte{0x01, 0xFF},
|
||||
WithdrawableEpoch: 100,
|
||||
},
|
||||
{
|
||||
WithdrawalCredentials: []byte{0x01, 0xAB},
|
||||
},
|
||||
},
|
||||
Balances: []uint64{
|
||||
params.BeaconConfig().MinActivationBalance,
|
||||
params.BeaconConfig().MinActivationBalance,
|
||||
},
|
||||
PendingConsolidations: []*eth.PendingConsolidation{
|
||||
{
|
||||
SourceIndex: 0,
|
||||
TargetIndex: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
st, err := state_native.InitializeFromProtoUnsafeElectra(pb)
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
// No balances are transferred from v0 to v1.
|
||||
bal0, err := st.BalanceAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, bal0)
|
||||
bal1, err := st.BalanceAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, bal1)
|
||||
|
||||
// The pending consolidation is still in the list.
|
||||
num, err := st.NumPendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), num)
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "slashed validator is not consolidated",
|
||||
state: func() state.BeaconState {
|
||||
pb := ð.BeaconStateElectra{
|
||||
Validators: []*eth.Validator{
|
||||
{
|
||||
WithdrawalCredentials: []byte{0x01, 0xFF},
|
||||
},
|
||||
{
|
||||
WithdrawalCredentials: []byte{0x01, 0xAB},
|
||||
},
|
||||
{
|
||||
Slashed: true,
|
||||
},
|
||||
{
|
||||
WithdrawalCredentials: []byte{0x01, 0xCC},
|
||||
},
|
||||
},
|
||||
Balances: []uint64{
|
||||
params.BeaconConfig().MinActivationBalance,
|
||||
params.BeaconConfig().MinActivationBalance,
|
||||
params.BeaconConfig().MinActivationBalance,
|
||||
params.BeaconConfig().MinActivationBalance,
|
||||
},
|
||||
PendingConsolidations: []*eth.PendingConsolidation{
|
||||
{
|
||||
SourceIndex: 2,
|
||||
TargetIndex: 3,
|
||||
},
|
||||
{
|
||||
SourceIndex: 0,
|
||||
TargetIndex: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
st, err := state_native.InitializeFromProtoUnsafeElectra(pb)
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
// No balances are transferred from v2 to v3.
|
||||
bal0, err := st.BalanceAtIndex(2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, bal0)
|
||||
bal1, err := st.BalanceAtIndex(3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, bal1)
|
||||
|
||||
// No pending consolidation remaining.
|
||||
num, err := st.NumPendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), num)
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := electra.ProcessPendingConsolidations(context.TODO(), tt.state)
|
||||
require.Equal(t, tt.wantErr, err != nil)
|
||||
if tt.check != nil {
|
||||
tt.check(t, tt.state)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func stateWithActiveBalanceETH(t *testing.T, balETH uint64) state.BeaconState {
|
||||
gwei := balETH * 1_000_000_000
|
||||
balPerVal := params.BeaconConfig().MinActivationBalance
|
||||
numVals := gwei / balPerVal
|
||||
|
||||
vals := make([]*eth.Validator, numVals)
|
||||
bals := make([]uint64, numVals)
|
||||
for i := uint64(0); i < numVals; i++ {
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(i)
|
||||
vals[i] = ð.Validator{
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: balPerVal,
|
||||
WithdrawalCredentials: wc,
|
||||
}
|
||||
bals[i] = balPerVal
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoUnsafeElectra(ð.BeaconStateElectra{
|
||||
Slot: 10 * params.BeaconConfig().SlotsPerEpoch,
|
||||
Validators: vals,
|
||||
Balances: bals,
|
||||
Fork: ð.Fork{
|
||||
CurrentVersion: params.BeaconConfig().ElectraForkVersion,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
return st
|
||||
}
|
||||
|
||||
func TestProcessConsolidations(t *testing.T) {
|
||||
secretKeys, publicKeys, err := interop.DeterministicallyGenerateKeys(0, 2)
|
||||
require.NoError(t, err)
|
||||
|
||||
genesisValidatorRoot := bytesutil.PadTo([]byte("genesisValidatorRoot"), fieldparams.RootLength)
|
||||
|
||||
_ = secretKeys
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
state state.BeaconState
|
||||
scs []*eth.SignedConsolidation
|
||||
check func(*testing.T, state.BeaconState)
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "nil state",
|
||||
scs: make([]*eth.SignedConsolidation, 10),
|
||||
wantErr: "nil state",
|
||||
},
|
||||
{
|
||||
name: "nil consolidation in slice",
|
||||
state: stateWithActiveBalanceETH(t, 19_000_000),
|
||||
scs: []*eth.SignedConsolidation{nil, nil},
|
||||
wantErr: "nil consolidation",
|
||||
},
|
||||
{
|
||||
name: "state is 100% full of pending consolidations",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 19_000_000)
|
||||
pc := make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit)
|
||||
require.NoError(t, st.SetPendingConsolidations(pc))
|
||||
return st
|
||||
}(),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{}}},
|
||||
wantErr: "pending consolidations queue is full",
|
||||
},
|
||||
{
|
||||
name: "state has too little consolidation churn limit available to process a consolidation",
|
||||
state: func() state.BeaconState {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
return st
|
||||
}(),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{}}},
|
||||
wantErr: "too little available consolidation churn limit",
|
||||
},
|
||||
{
|
||||
name: "consolidation with source and target as the same index is rejected",
|
||||
state: stateWithActiveBalanceETH(t, 19_000_000),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{SourceIndex: 100, TargetIndex: 100}}},
|
||||
wantErr: "source and target index are the same",
|
||||
},
|
||||
{
|
||||
name: "consolidation with inactive source is rejected",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 19_000_000)
|
||||
val, err := st.ValidatorAtIndex(25)
|
||||
require.NoError(t, err)
|
||||
val.ActivationEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
|
||||
return st
|
||||
}(),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{SourceIndex: 25, TargetIndex: 100}}},
|
||||
wantErr: "source is not active",
|
||||
},
|
||||
{
|
||||
name: "consolidation with inactive target is rejected",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 19_000_000)
|
||||
val, err := st.ValidatorAtIndex(25)
|
||||
require.NoError(t, err)
|
||||
val.ActivationEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
|
||||
return st
|
||||
}(),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{SourceIndex: 100, TargetIndex: 25}}},
|
||||
wantErr: "target is not active",
|
||||
},
|
||||
{
|
||||
name: "consolidation with exiting source is rejected",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 19_000_000)
|
||||
val, err := st.ValidatorAtIndex(25)
|
||||
require.NoError(t, err)
|
||||
val.ExitEpoch = 256
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
|
||||
return st
|
||||
}(),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{SourceIndex: 25, TargetIndex: 100}}},
|
||||
wantErr: "source exit epoch has been initiated",
|
||||
},
|
||||
{
|
||||
name: "consolidation with exiting target is rejected",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 19_000_000)
|
||||
val, err := st.ValidatorAtIndex(25)
|
||||
require.NoError(t, err)
|
||||
val.ExitEpoch = 256
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
|
||||
return st
|
||||
}(),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{SourceIndex: 100, TargetIndex: 25}}},
|
||||
wantErr: "target exit epoch has been initiated",
|
||||
},
|
||||
{
|
||||
name: "consolidation with future epoch is rejected",
|
||||
state: stateWithActiveBalanceETH(t, 19_000_000),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{SourceIndex: 100, TargetIndex: 25, Epoch: 55}}},
|
||||
wantErr: "consolidation is not valid yet",
|
||||
},
|
||||
{
|
||||
name: "source validator without withdrawal credentials is rejected",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 19_000_000)
|
||||
val, err := st.ValidatorAtIndex(25)
|
||||
require.NoError(t, err)
|
||||
val.WithdrawalCredentials = []byte{}
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
|
||||
return st
|
||||
}(),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{SourceIndex: 25, TargetIndex: 100}}},
|
||||
wantErr: "source does not have execution withdrawal credentials",
|
||||
},
|
||||
{
|
||||
name: "target validator without withdrawal credentials is rejected",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 19_000_000)
|
||||
val, err := st.ValidatorAtIndex(25)
|
||||
require.NoError(t, err)
|
||||
val.WithdrawalCredentials = []byte{}
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
|
||||
return st
|
||||
}(),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{SourceIndex: 100, TargetIndex: 25}}},
|
||||
wantErr: "target does not have execution withdrawal credentials",
|
||||
},
|
||||
{
|
||||
name: "source and target with different withdrawal credentials is rejected",
|
||||
state: stateWithActiveBalanceETH(t, 19_000_000),
|
||||
scs: []*eth.SignedConsolidation{{Message: ð.Consolidation{SourceIndex: 100, TargetIndex: 25}}},
|
||||
wantErr: "source and target have different withdrawal credentials",
|
||||
},
|
||||
{
|
||||
name: "consolidation with valid signatures is OK",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 19_000_000)
|
||||
require.NoError(t, st.SetGenesisValidatorsRoot(genesisValidatorRoot))
|
||||
source, err := st.ValidatorAtIndex(100)
|
||||
require.NoError(t, err)
|
||||
target, err := st.ValidatorAtIndex(25)
|
||||
require.NoError(t, err)
|
||||
source.PublicKey = publicKeys[0].Marshal()
|
||||
source.WithdrawalCredentials = target.WithdrawalCredentials
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(100, source))
|
||||
target.PublicKey = publicKeys[1].Marshal()
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(25, target))
|
||||
return st
|
||||
}(),
|
||||
scs: func() []*eth.SignedConsolidation {
|
||||
sc := ð.SignedConsolidation{Message: ð.Consolidation{SourceIndex: 100, TargetIndex: 25, Epoch: 8}}
|
||||
|
||||
domain, err := signing.ComputeDomain(
|
||||
params.BeaconConfig().DomainConsolidation,
|
||||
nil,
|
||||
genesisValidatorRoot,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
sr, err := signing.ComputeSigningRoot(sc.Message, domain)
|
||||
require.NoError(t, err)
|
||||
|
||||
sig0 := secretKeys[0].Sign(sr[:])
|
||||
sig1 := secretKeys[1].Sign(sr[:])
|
||||
|
||||
sc.Signature = blst.AggregateSignatures([]common.Signature{sig0, sig1}).Marshal()
|
||||
|
||||
return []*eth.SignedConsolidation{sc}
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
source, err := st.ValidatorAtIndex(100)
|
||||
require.NoError(t, err)
|
||||
// The consolidated validator is exiting.
|
||||
require.Equal(t, primitives.Epoch(15), source.ExitEpoch) // 15 = state.Epoch(10) + MIN_SEED_LOOKAHEAD(4) + 1
|
||||
require.Equal(t, primitives.Epoch(15+params.BeaconConfig().MinValidatorWithdrawabilityDelay), source.WithdrawableEpoch)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := electra.ProcessConsolidations(context.TODO(), tt.state, tt.scs)
|
||||
if len(tt.wantErr) > 0 {
|
||||
require.ErrorContains(t, tt.wantErr, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if tt.check != nil {
|
||||
tt.check(t, tt.state)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
23
beacon-chain/core/electra/transition.go
Normal file
23
beacon-chain/core/electra/transition.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package electra
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
e "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch"
|
||||
)
|
||||
|
||||
// Re-exports for methods that haven't changed in Electra.
|
||||
var (
|
||||
InitializePrecomputeValidators = altair.InitializePrecomputeValidators
|
||||
ProcessEpochParticipation = altair.ProcessEpochParticipation
|
||||
ProcessInactivityScores = altair.ProcessInactivityScores
|
||||
ProcessRewardsAndPenaltiesPrecompute = altair.ProcessRewardsAndPenaltiesPrecompute
|
||||
ProcessSlashings = e.ProcessSlashings
|
||||
ProcessEth1DataReset = e.ProcessEth1DataReset
|
||||
ProcessSlashingsReset = e.ProcessSlashingsReset
|
||||
ProcessRandaoMixesReset = e.ProcessRandaoMixesReset
|
||||
ProcessHistoricalDataUpdate = e.ProcessHistoricalDataUpdate
|
||||
ProcessParticipationFlagUpdates = altair.ProcessParticipationFlagUpdates
|
||||
ProcessSyncCommitteeUpdates = altair.ProcessSyncCommitteeUpdates
|
||||
AttestationsDelta = altair.AttestationsDelta
|
||||
ProcessSyncAggregate = altair.ProcessSyncAggregate
|
||||
)
|
||||
312
beacon-chain/core/electra/upgrade.go
Normal file
312
beacon-chain/core/electra/upgrade.go
Normal file
@@ -0,0 +1,312 @@
|
||||
package electra
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
// UpgradeToElectra updates inputs a generic state to return the version Electra state.
|
||||
// def upgrade_to_electra(pre: deneb.BeaconState) -> BeaconState:
|
||||
//
|
||||
// epoch = deneb.get_current_epoch(pre)
|
||||
// latest_execution_payload_header = ExecutionPayloadHeader(
|
||||
// parent_hash=pre.latest_execution_payload_header.parent_hash,
|
||||
// fee_recipient=pre.latest_execution_payload_header.fee_recipient,
|
||||
// state_root=pre.latest_execution_payload_header.state_root,
|
||||
// receipts_root=pre.latest_execution_payload_header.receipts_root,
|
||||
// logs_bloom=pre.latest_execution_payload_header.logs_bloom,
|
||||
// prev_randao=pre.latest_execution_payload_header.prev_randao,
|
||||
// block_number=pre.latest_execution_payload_header.block_number,
|
||||
// gas_limit=pre.latest_execution_payload_header.gas_limit,
|
||||
// gas_used=pre.latest_execution_payload_header.gas_used,
|
||||
// timestamp=pre.latest_execution_payload_header.timestamp,
|
||||
// extra_data=pre.latest_execution_payload_header.extra_data,
|
||||
// base_fee_per_gas=pre.latest_execution_payload_header.base_fee_per_gas,
|
||||
// block_hash=pre.latest_execution_payload_header.block_hash,
|
||||
// transactions_root=pre.latest_execution_payload_header.transactions_root,
|
||||
// withdrawals_root=pre.latest_execution_payload_header.withdrawals_root,
|
||||
// blob_gas_used=pre.latest_execution_payload_header.blob_gas_used,
|
||||
// excess_blob_gas=pre.latest_execution_payload_header.excess_blob_gas,
|
||||
// deposit_receipts_root=Root(), # [New in Electra:EIP6110]
|
||||
// withdrawal_requests_root=Root(), # [New in Electra:EIP7002],
|
||||
// )
|
||||
//
|
||||
// exit_epochs = [v.exit_epoch for v in pre.validators if v.exit_epoch != FAR_FUTURE_EPOCH]
|
||||
// if not exit_epochs:
|
||||
// exit_epochs = [get_current_epoch(pre)]
|
||||
// earliest_exit_epoch = max(exit_epochs) + 1
|
||||
//
|
||||
// post = BeaconState(
|
||||
// # Versioning
|
||||
// genesis_time=pre.genesis_time,
|
||||
// genesis_validators_root=pre.genesis_validators_root,
|
||||
// slot=pre.slot,
|
||||
// fork=Fork(
|
||||
// previous_version=pre.fork.current_version,
|
||||
// current_version=ELECTRA_FORK_VERSION, # [Modified in Electra:EIP6110]
|
||||
// epoch=epoch,
|
||||
// ),
|
||||
// # History
|
||||
// latest_block_header=pre.latest_block_header,
|
||||
// block_roots=pre.block_roots,
|
||||
// state_roots=pre.state_roots,
|
||||
// historical_roots=pre.historical_roots,
|
||||
// # Eth1
|
||||
// eth1_data=pre.eth1_data,
|
||||
// eth1_data_votes=pre.eth1_data_votes,
|
||||
// eth1_deposit_index=pre.eth1_deposit_index,
|
||||
// # Registry
|
||||
// validators=pre.validators,
|
||||
// balances=pre.balances,
|
||||
// # Randomness
|
||||
// randao_mixes=pre.randao_mixes,
|
||||
// # Slashings
|
||||
// slashings=pre.slashings,
|
||||
// # Participation
|
||||
// previous_epoch_participation=pre.previous_epoch_participation,
|
||||
// current_epoch_participation=pre.current_epoch_participation,
|
||||
// # Finality
|
||||
// justification_bits=pre.justification_bits,
|
||||
// previous_justified_checkpoint=pre.previous_justified_checkpoint,
|
||||
// current_justified_checkpoint=pre.current_justified_checkpoint,
|
||||
// finalized_checkpoint=pre.finalized_checkpoint,
|
||||
// # Inactivity
|
||||
// inactivity_scores=pre.inactivity_scores,
|
||||
// # Sync
|
||||
// current_sync_committee=pre.current_sync_committee,
|
||||
// next_sync_committee=pre.next_sync_committee,
|
||||
// # Execution-layer
|
||||
// latest_execution_payload_header=latest_execution_payload_header, # [Modified in Electra:EIP6110:EIP7002]
|
||||
// # Withdrawals
|
||||
// next_withdrawal_index=pre.next_withdrawal_index,
|
||||
// next_withdrawal_validator_index=pre.next_withdrawal_validator_index,
|
||||
// # Deep history valid from Capella onwards
|
||||
// historical_summaries=pre.historical_summaries,
|
||||
// # [New in Electra:EIP6110]
|
||||
// deposit_receipts_start_index=UNSET_DEPOSIT_RECEIPTS_START_INDEX,
|
||||
// # [New in Electra:EIP7251]
|
||||
// deposit_balance_to_consume=0,
|
||||
// exit_balance_to_consume=0,
|
||||
// earliest_exit_epoch=earliest_exit_epoch,
|
||||
// consolidation_balance_to_consume=0,
|
||||
// earliest_consolidation_epoch=compute_activation_exit_epoch(get_current_epoch(pre)),
|
||||
// pending_balance_deposits=[],
|
||||
// pending_partial_withdrawals=[],
|
||||
// pending_consolidations=[],
|
||||
// )
|
||||
//
|
||||
// post.exit_balance_to_consume = get_activation_exit_churn_limit(post)
|
||||
// post.consolidation_balance_to_consume = get_consolidation_churn_limit(post)
|
||||
//
|
||||
// # [New in Electra:EIP7251]
|
||||
// # add validators that are not yet active to pending balance deposits
|
||||
// pre_activation = sorted([
|
||||
// index for index, validator in enumerate(post.validators)
|
||||
// if validator.activation_epoch == FAR_FUTURE_EPOCH
|
||||
// ], key=lambda index: (
|
||||
// post.validators[index].activation_eligibility_epoch,
|
||||
// index
|
||||
// ))
|
||||
//
|
||||
// for index in pre_activation:
|
||||
// queue_entire_balance_and_reset_validator(post, ValidatorIndex(index))
|
||||
//
|
||||
// # Ensure early adopters of compounding credentials go through the activation churn
|
||||
// for index, validator in enumerate(post.validators):
|
||||
// if has_compounding_withdrawal_credential(validator):
|
||||
// queue_excess_active_balance(post, ValidatorIndex(index))
|
||||
//
|
||||
// return post
|
||||
func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
currentSyncCommittee, err := beaconState.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nextSyncCommittee, err := beaconState.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prevEpochParticipation, err := beaconState.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentEpochParticipation, err := beaconState.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inactivityScores, err := beaconState.InactivityScores()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloadHeader, err := beaconState.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txRoot, err := payloadHeader.TransactionsRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wdRoot, err := payloadHeader.WithdrawalsRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wi, err := beaconState.NextWithdrawalIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vi, err := beaconState.NextWithdrawalValidatorIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
summaries, err := beaconState.HistoricalSummaries()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
historicalRoots, err := beaconState.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
excessBlobGas, err := payloadHeader.ExcessBlobGas()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobGasUsed, err := payloadHeader.BlobGasUsed()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// [New in Electra:EIP7251]
|
||||
earliestExitEpoch := time.CurrentEpoch(beaconState)
|
||||
preActivationIndices := make([]primitives.ValidatorIndex, 0)
|
||||
compoundWithdrawalIndices := make([]primitives.ValidatorIndex, 0)
|
||||
if err = beaconState.ReadFromEveryValidator(func(index int, val state.ReadOnlyValidator) error {
|
||||
if val.ExitEpoch() != params.BeaconConfig().FarFutureEpoch && val.ExitEpoch() > earliestExitEpoch {
|
||||
earliestExitEpoch = val.ExitEpoch()
|
||||
}
|
||||
if val.ActivationEpoch() == params.BeaconConfig().FarFutureEpoch {
|
||||
preActivationIndices = append(preActivationIndices, primitives.ValidatorIndex(index))
|
||||
}
|
||||
if helpers.HasCompoundingWithdrawalCredential(val) {
|
||||
compoundWithdrawalIndices = append(compoundWithdrawalIndices, primitives.ValidatorIndex(index))
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
earliestExitEpoch++ // Increment to find the earliest possible exit epoch
|
||||
|
||||
// note: should be the same in prestate and post beaconState.
|
||||
// we are deviating from the specs a bit as it calls for using the post beaconState
|
||||
tab, err := helpers.TotalActiveBalance(beaconState)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get total active balance")
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateElectra{
|
||||
GenesisTime: beaconState.GenesisTime(),
|
||||
GenesisValidatorsRoot: beaconState.GenesisValidatorsRoot(),
|
||||
Slot: beaconState.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: beaconState.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().ElectraForkVersion,
|
||||
Epoch: time.CurrentEpoch(beaconState),
|
||||
},
|
||||
LatestBlockHeader: beaconState.LatestBlockHeader(),
|
||||
BlockRoots: beaconState.BlockRoots(),
|
||||
StateRoots: beaconState.StateRoots(),
|
||||
HistoricalRoots: historicalRoots,
|
||||
Eth1Data: beaconState.Eth1Data(),
|
||||
Eth1DataVotes: beaconState.Eth1DataVotes(),
|
||||
Eth1DepositIndex: beaconState.Eth1DepositIndex(),
|
||||
Validators: beaconState.Validators(),
|
||||
Balances: beaconState.Balances(),
|
||||
RandaoMixes: beaconState.RandaoMixes(),
|
||||
Slashings: beaconState.Slashings(),
|
||||
PreviousEpochParticipation: prevEpochParticipation,
|
||||
CurrentEpochParticipation: currentEpochParticipation,
|
||||
JustificationBits: beaconState.JustificationBits(),
|
||||
PreviousJustifiedCheckpoint: beaconState.PreviousJustifiedCheckpoint(),
|
||||
CurrentJustifiedCheckpoint: beaconState.CurrentJustifiedCheckpoint(),
|
||||
FinalizedCheckpoint: beaconState.FinalizedCheckpoint(),
|
||||
InactivityScores: inactivityScores,
|
||||
CurrentSyncCommittee: currentSyncCommittee,
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderElectra{
|
||||
ParentHash: payloadHeader.ParentHash(),
|
||||
FeeRecipient: payloadHeader.FeeRecipient(),
|
||||
StateRoot: payloadHeader.StateRoot(),
|
||||
ReceiptsRoot: payloadHeader.ReceiptsRoot(),
|
||||
LogsBloom: payloadHeader.LogsBloom(),
|
||||
PrevRandao: payloadHeader.PrevRandao(),
|
||||
BlockNumber: payloadHeader.BlockNumber(),
|
||||
GasLimit: payloadHeader.GasLimit(),
|
||||
GasUsed: payloadHeader.GasUsed(),
|
||||
Timestamp: payloadHeader.Timestamp(),
|
||||
ExtraData: payloadHeader.ExtraData(),
|
||||
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
|
||||
BlockHash: payloadHeader.BlockHash(),
|
||||
TransactionsRoot: txRoot,
|
||||
WithdrawalsRoot: wdRoot,
|
||||
ExcessBlobGas: excessBlobGas,
|
||||
BlobGasUsed: blobGasUsed,
|
||||
DepositReceiptsRoot: bytesutil.Bytes32(0), // [New in Electra:EIP6110]
|
||||
WithdrawalRequestsRoot: bytesutil.Bytes32(0), // [New in Electra:EIP7002]
|
||||
},
|
||||
NextWithdrawalIndex: wi,
|
||||
NextWithdrawalValidatorIndex: vi,
|
||||
HistoricalSummaries: summaries,
|
||||
|
||||
DepositReceiptsStartIndex: params.BeaconConfig().UnsetDepositReceiptsStartIndex,
|
||||
DepositBalanceToConsume: 0,
|
||||
ExitBalanceToConsume: helpers.ActivationExitChurnLimit(math.Gwei(tab)),
|
||||
EarliestExitEpoch: earliestExitEpoch,
|
||||
ConsolidationBalanceToConsume: helpers.ConsolidationChurnLimit(math.Gwei(tab)),
|
||||
EarliestConsolidationEpoch: helpers.ActivationExitEpoch(slots.ToEpoch(beaconState.Slot())),
|
||||
PendingBalanceDeposits: make([]*ethpb.PendingBalanceDeposit, 0),
|
||||
PendingPartialWithdrawals: make([]*ethpb.PendingPartialWithdrawal, 0),
|
||||
PendingConsolidations: make([]*ethpb.PendingConsolidation, 0),
|
||||
}
|
||||
|
||||
// Sorting preActivationIndices based on a custom criteria
|
||||
sort.Slice(preActivationIndices, func(i, j int) bool {
|
||||
// Comparing based on ActivationEligibilityEpoch and then by index if the epochs are the same
|
||||
if s.Validators[preActivationIndices[i]].ActivationEligibilityEpoch == s.Validators[preActivationIndices[j]].ActivationEligibilityEpoch {
|
||||
return preActivationIndices[i] < preActivationIndices[j]
|
||||
}
|
||||
return s.Validators[preActivationIndices[i]].ActivationEligibilityEpoch < s.Validators[preActivationIndices[j]].ActivationEligibilityEpoch
|
||||
})
|
||||
|
||||
// need to cast the beaconState to use in helper functions
|
||||
post, err := state_native.InitializeFromProtoUnsafeElectra(s)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to initialize post electra beaconState")
|
||||
}
|
||||
|
||||
for _, index := range preActivationIndices {
|
||||
if err := helpers.QueueEntireBalanceAndResetValidator(post, index); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to queue entire balance and reset validator")
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure early adopters of compounding credentials go through the activation churn
|
||||
for _, index := range compoundWithdrawalIndices {
|
||||
if err := helpers.QueueExcessActiveBalance(post, index); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to queue excess active balance")
|
||||
}
|
||||
}
|
||||
|
||||
return post, nil
|
||||
}
|
||||
189
beacon-chain/core/electra/upgrade_test.go
Normal file
189
beacon-chain/core/electra/upgrade_test.go
Normal file
@@ -0,0 +1,189 @@
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func TestUpgradeToElectra(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateDeneb(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
require.NoError(t, st.SetHistoricalRoots([][]byte{{1}}))
|
||||
vals := st.Validators()
|
||||
vals[0].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
vals[1].WithdrawalCredentials = []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte}
|
||||
require.NoError(t, st.SetValidators(vals))
|
||||
bals := st.Balances()
|
||||
bals[1] = params.BeaconConfig().MinActivationBalance + 1000
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
|
||||
preForkState := st.Copy()
|
||||
mSt, err := electra.UpgradeToElectra(st)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, preForkState.GenesisTime(), mSt.GenesisTime())
|
||||
require.DeepSSZEqual(t, preForkState.GenesisValidatorsRoot(), mSt.GenesisValidatorsRoot())
|
||||
require.Equal(t, preForkState.Slot(), mSt.Slot())
|
||||
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), mSt.LatestBlockHeader())
|
||||
require.DeepSSZEqual(t, preForkState.BlockRoots(), mSt.BlockRoots())
|
||||
require.DeepSSZEqual(t, preForkState.StateRoots(), mSt.StateRoots())
|
||||
require.DeepSSZEqual(t, preForkState.Validators()[2:], mSt.Validators()[2:])
|
||||
require.DeepSSZEqual(t, preForkState.Balances()[2:], mSt.Balances()[2:])
|
||||
require.DeepSSZEqual(t, preForkState.Eth1Data(), mSt.Eth1Data())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DataVotes(), mSt.Eth1DataVotes())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DepositIndex(), mSt.Eth1DepositIndex())
|
||||
require.DeepSSZEqual(t, preForkState.RandaoMixes(), mSt.RandaoMixes())
|
||||
require.DeepSSZEqual(t, preForkState.Slashings(), mSt.Slashings())
|
||||
require.DeepSSZEqual(t, preForkState.JustificationBits(), mSt.JustificationBits())
|
||||
require.DeepSSZEqual(t, preForkState.PreviousJustifiedCheckpoint(), mSt.PreviousJustifiedCheckpoint())
|
||||
require.DeepSSZEqual(t, preForkState.CurrentJustifiedCheckpoint(), mSt.CurrentJustifiedCheckpoint())
|
||||
require.DeepSSZEqual(t, preForkState.FinalizedCheckpoint(), mSt.FinalizedCheckpoint())
|
||||
|
||||
require.Equal(t, len(preForkState.Validators()), len(mSt.Validators()))
|
||||
|
||||
preVal, err := preForkState.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, preVal.EffectiveBalance)
|
||||
|
||||
preVal2, err := preForkState.ValidatorAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, preVal2.EffectiveBalance)
|
||||
|
||||
mVal, err := mSt.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), mVal.EffectiveBalance)
|
||||
|
||||
mVal2, err := mSt.ValidatorAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, mVal2.EffectiveBalance)
|
||||
|
||||
numValidators := mSt.NumValidators()
|
||||
p, err := mSt.PreviousEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, make([]byte, numValidators), p)
|
||||
p, err = mSt.CurrentEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, make([]byte, numValidators), p)
|
||||
s, err := mSt.InactivityScores()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, make([]uint64, numValidators), s)
|
||||
|
||||
hr1, err := preForkState.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
hr2, err := mSt.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, hr1, hr2)
|
||||
|
||||
f := mSt.Fork()
|
||||
require.DeepSSZEqual(t, ðpb.Fork{
|
||||
PreviousVersion: st.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().ElectraForkVersion,
|
||||
Epoch: time.CurrentEpoch(st),
|
||||
}, f)
|
||||
csc, err := mSt.CurrentSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
psc, err := preForkState.CurrentSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, psc, csc)
|
||||
nsc, err := mSt.NextSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
psc, err = preForkState.NextSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, psc, nsc)
|
||||
|
||||
header, err := mSt.LatestExecutionPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
protoHeader, ok := header.Proto().(*enginev1.ExecutionPayloadHeaderElectra)
|
||||
require.Equal(t, true, ok)
|
||||
prevHeader, err := preForkState.LatestExecutionPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
txRoot, err := prevHeader.TransactionsRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
wdRoot, err := prevHeader.WithdrawalsRoot()
|
||||
require.NoError(t, err)
|
||||
wanted := &enginev1.ExecutionPayloadHeaderElectra{
|
||||
ParentHash: prevHeader.ParentHash(),
|
||||
FeeRecipient: prevHeader.FeeRecipient(),
|
||||
StateRoot: prevHeader.StateRoot(),
|
||||
ReceiptsRoot: prevHeader.ReceiptsRoot(),
|
||||
LogsBloom: prevHeader.LogsBloom(),
|
||||
PrevRandao: prevHeader.PrevRandao(),
|
||||
BlockNumber: prevHeader.BlockNumber(),
|
||||
GasLimit: prevHeader.GasLimit(),
|
||||
GasUsed: prevHeader.GasUsed(),
|
||||
Timestamp: prevHeader.Timestamp(),
|
||||
ExtraData: prevHeader.ExtraData(),
|
||||
BaseFeePerGas: prevHeader.BaseFeePerGas(),
|
||||
BlockHash: prevHeader.BlockHash(),
|
||||
TransactionsRoot: txRoot,
|
||||
WithdrawalsRoot: wdRoot,
|
||||
DepositReceiptsRoot: bytesutil.Bytes32(0),
|
||||
WithdrawalRequestsRoot: bytesutil.Bytes32(0),
|
||||
}
|
||||
require.DeepEqual(t, wanted, protoHeader)
|
||||
|
||||
nwi, err := mSt.NextWithdrawalIndex()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), nwi)
|
||||
|
||||
lwvi, err := mSt.NextWithdrawalValidatorIndex()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), lwvi)
|
||||
|
||||
summaries, err := mSt.HistoricalSummaries()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(summaries))
|
||||
|
||||
startIndex, err := mSt.DepositReceiptsStartIndex()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().UnsetDepositReceiptsStartIndex, startIndex)
|
||||
|
||||
balance, err := mSt.DepositBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, math.Gwei(0), balance)
|
||||
|
||||
tab, err := helpers.TotalActiveBalance(mSt)
|
||||
require.NoError(t, err)
|
||||
|
||||
ebtc, err := mSt.ExitBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, helpers.ActivationExitChurnLimit(math.Gwei(tab)), ebtc)
|
||||
|
||||
eee, err := mSt.EarliestExitEpoch()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Epoch(1), eee)
|
||||
|
||||
cbtc, err := mSt.ConsolidationBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, helpers.ConsolidationChurnLimit(math.Gwei(tab)), cbtc)
|
||||
|
||||
earliestConsolidationEpoch, err := mSt.EarliestConsolidationEpoch()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, helpers.ActivationExitEpoch(slots.ToEpoch(preForkState.Slot())), earliestConsolidationEpoch)
|
||||
|
||||
pendingBalanceDeposits, err := mSt.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(pendingBalanceDeposits))
|
||||
require.Equal(t, uint64(1000), pendingBalanceDeposits[1].Amount)
|
||||
|
||||
numPendingPartialWithdrawals, err := mSt.NumPendingPartialWithdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), numPendingPartialWithdrawals)
|
||||
|
||||
consolidations, err := mSt.PendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(consolidations))
|
||||
|
||||
}
|
||||
105
beacon-chain/core/electra/validator.go
Normal file
105
beacon-chain/core/electra/validator.go
Normal file
@@ -0,0 +1,105 @@
|
||||
package electra
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// SwitchToCompoundingValidator
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def switch_to_compounding_validator(state: BeaconState, index: ValidatorIndex) -> None:
|
||||
// validator = state.validators[index]
|
||||
// if has_eth1_withdrawal_credential(validator):
|
||||
// validator.withdrawal_credentials = COMPOUNDING_WITHDRAWAL_PREFIX + validator.withdrawal_credentials[1:]
|
||||
// queue_excess_active_balance(state, index)
|
||||
func SwitchToCompoundingValidator(ctx context.Context, s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
v, err := s.ValidatorAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(v.WithdrawalCredentials) == 0 {
|
||||
return errors.New("validator has no withdrawal credentials")
|
||||
}
|
||||
if helpers.HasETH1WithdrawalCredential(v) {
|
||||
v.WithdrawalCredentials[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
||||
if err := s.UpdateValidatorAtIndex(idx, v); err != nil {
|
||||
return err
|
||||
}
|
||||
return queueExcessActiveBalance(ctx, s, idx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// queueExcessActiveBalance
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def queue_excess_active_balance(state: BeaconState, index: ValidatorIndex) -> None:
|
||||
// balance = state.balances[index]
|
||||
// if balance > MIN_ACTIVATION_BALANCE:
|
||||
// excess_balance = balance - MIN_ACTIVATION_BALANCE
|
||||
// state.balances[index] = MIN_ACTIVATION_BALANCE
|
||||
// state.pending_balance_deposits.append(
|
||||
// PendingBalanceDeposit(index=index, amount=excess_balance)
|
||||
// )
|
||||
func queueExcessActiveBalance(ctx context.Context, s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
bal, err := s.BalanceAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bal > params.BeaconConfig().MinActivationBalance {
|
||||
excessBalance := bal - params.BeaconConfig().MinActivationBalance
|
||||
if err := s.UpdateBalancesAtIndex(idx, params.BeaconConfig().MinActivationBalance); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.AppendPendingBalanceDeposit(idx, excessBalance)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// QueueEntireBalanceAndResetValidator queues the entire balance and resets the validator. This is used in electra fork logic.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def queue_entire_balance_and_reset_validator(state: BeaconState, index: ValidatorIndex) -> None:
|
||||
// balance = state.balances[index]
|
||||
// state.balances[index] = 0
|
||||
// validator = state.validators[index]
|
||||
// validator.effective_balance = 0
|
||||
// validator.activation_eligibility_epoch = FAR_FUTURE_EPOCH
|
||||
// state.pending_balance_deposits.append(
|
||||
// PendingBalanceDeposit(index=index, amount=balance)
|
||||
// )
|
||||
//
|
||||
//nolint:dupword
|
||||
func QueueEntireBalanceAndResetValidator(ctx context.Context, s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
bal, err := s.BalanceAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.UpdateBalancesAtIndex(idx, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v, err := s.ValidatorAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v.EffectiveBalance = 0
|
||||
v.ActivationEligibilityEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
if err := s.UpdateValidatorAtIndex(idx, v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.AppendPendingBalanceDeposit(idx, bal)
|
||||
}
|
||||
90
beacon-chain/core/electra/validator_test.go
Normal file
90
beacon-chain/core/electra/validator_test.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestSwitchToCompoundingValidator(t *testing.T) {
|
||||
s, err := state_native.InitializeFromProtoElectra(ð.BeaconStateElectra{
|
||||
Validators: []*eth.Validator{
|
||||
{
|
||||
WithdrawalCredentials: []byte{}, // No withdrawal credentials
|
||||
},
|
||||
{
|
||||
WithdrawalCredentials: []byte{0x01, 0xFF}, // Has withdrawal credentials
|
||||
},
|
||||
{
|
||||
WithdrawalCredentials: []byte{0x01, 0xFF}, // Has withdrawal credentials
|
||||
},
|
||||
},
|
||||
Balances: []uint64{
|
||||
params.BeaconConfig().MinActivationBalance,
|
||||
params.BeaconConfig().MinActivationBalance,
|
||||
params.BeaconConfig().MinActivationBalance + 100_000, // Has excess balance
|
||||
},
|
||||
})
|
||||
// Test that a validator with no withdrawal credentials cannot be switched to compounding.
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, "validator has no withdrawal credentials", electra.SwitchToCompoundingValidator(context.TODO(), s, 0))
|
||||
|
||||
// Test that a validator with withdrawal credentials can be switched to compounding.
|
||||
require.NoError(t, electra.SwitchToCompoundingValidator(context.TODO(), s, 1))
|
||||
v, err := s.ValidatorAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.HasPrefix(v.WithdrawalCredentials, []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte}), "withdrawal credentials were not updated")
|
||||
// val_1 Balance is not changed
|
||||
b, err := s.BalanceAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, b, "balance was changed")
|
||||
pbd, err := s.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(pbd), "pending balance deposits should be empty")
|
||||
|
||||
// Test that a validator with excess balance can be switched to compounding, excess balance is queued.
|
||||
require.NoError(t, electra.SwitchToCompoundingValidator(context.TODO(), s, 2))
|
||||
b, err = s.BalanceAtIndex(2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, b, "balance was not changed")
|
||||
pbd, err = s.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(pbd), "pending balance deposits should have one element")
|
||||
require.Equal(t, uint64(100_000), pbd[0].Amount, "pending balance deposit amount is incorrect")
|
||||
require.Equal(t, primitives.ValidatorIndex(2), pbd[0].Index, "pending balance deposit index is incorrect")
|
||||
}
|
||||
|
||||
func TestQueueEntireBalanceAndResetValidator(t *testing.T) {
|
||||
s, err := state_native.InitializeFromProtoElectra(ð.BeaconStateElectra{
|
||||
Validators: []*eth.Validator{
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance + 100_000,
|
||||
ActivationEligibilityEpoch: primitives.Epoch(100),
|
||||
},
|
||||
},
|
||||
Balances: []uint64{
|
||||
params.BeaconConfig().MinActivationBalance + 100_000,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, electra.QueueEntireBalanceAndResetValidator(context.TODO(), s, 0))
|
||||
b, err := s.BalanceAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), b, "balance was not changed")
|
||||
v, err := s.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), v.EffectiveBalance, "effective balance was not reset")
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, v.ActivationEligibilityEpoch, "activation eligibility epoch was not reset")
|
||||
pbd, err := s.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(pbd), "pending balance deposits should have one element")
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+100_000, pbd[0].Amount, "pending balance deposit amount is incorrect")
|
||||
require.Equal(t, primitives.ValidatorIndex(0), pbd[0].Index, "pending balance deposit index is incorrect")
|
||||
}
|
||||
@@ -99,7 +99,7 @@ func ProcessRegistryUpdates(ctx context.Context, state state.BeaconState) (state
|
||||
activationEligibilityEpoch := time.CurrentEpoch(state) + 1
|
||||
for idx, validator := range vals {
|
||||
// Process the validators for activation eligibility.
|
||||
if helpers.IsEligibleForActivationQueue(validator) {
|
||||
if helpers.IsEligibleForActivationQueue(validator, currentEpoch) {
|
||||
validator.ActivationEligibilityEpoch = activationEligibilityEpoch
|
||||
if err := state.UpdateValidatorAtIndex(primitives.ValidatorIndex(idx), validator); err != nil {
|
||||
return nil, err
|
||||
@@ -474,7 +474,7 @@ func UnslashedAttestingIndices(ctx context.Context, state state.ReadOnlyBeaconSt
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attestingIndices, err := attestation.AttestingIndices(att.AggregationBits, committee)
|
||||
attestingIndices, err := attestation.AttestingIndices(att, committee)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -54,11 +54,11 @@ func ProcessAttestations(
|
||||
return nil, nil, errors.Wrap(err, "could not check validator attested previous epoch")
|
||||
}
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, state, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, state, a.GetData().Slot, a.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
indices, err := attestation.AttestingIndices(a.AggregationBits, committee)
|
||||
indices, err := attestation.AttestingIndices(a, committee)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
@@ -211,7 +211,7 @@ func TestProcessAttestations(t *testing.T) {
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att1.Data.Slot, att1.Data.CommitteeIndex)
|
||||
require.NoError(t, err)
|
||||
indices, err := attestation.AttestingIndices(att1.AggregationBits, committee)
|
||||
indices, err := attestation.AttestingIndices(att1, committee)
|
||||
require.NoError(t, err)
|
||||
for _, i := range indices {
|
||||
if !pVals[i].IsPrevEpochAttester {
|
||||
@@ -220,7 +220,7 @@ func TestProcessAttestations(t *testing.T) {
|
||||
}
|
||||
committee, err = helpers.BeaconCommitteeFromState(context.Background(), beaconState, att2.Data.Slot, att2.Data.CommitteeIndex)
|
||||
require.NoError(t, err)
|
||||
indices, err = attestation.AttestingIndices(att2.AggregationBits, committee)
|
||||
indices, err = attestation.AttestingIndices(att2, committee)
|
||||
require.NoError(t, err)
|
||||
for _, i := range indices {
|
||||
assert.Equal(t, true, pVals[i].IsPrevEpochAttester, "Not a prev epoch attester")
|
||||
|
||||
@@ -11,6 +11,7 @@ go_library(
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -3,6 +3,7 @@ package operation
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -37,7 +38,7 @@ const (
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
type UnAggregatedAttReceivedData struct {
|
||||
// Attestation is the unaggregated attestation object.
|
||||
Attestation *ethpb.Attestation
|
||||
Attestation interfaces.Attestation
|
||||
}
|
||||
|
||||
// AggregatedAttReceivedData is the data sent with AggregatedAttReceived events.
|
||||
@@ -75,5 +76,5 @@ type ProposerSlashingReceivedData struct {
|
||||
|
||||
// AttesterSlashingReceivedData is the data sent with AttesterSlashingReceived events.
|
||||
type AttesterSlashingReceivedData struct {
|
||||
AttesterSlashing *ethpb.AttesterSlashing
|
||||
AttesterSlashing interfaces.AttesterSlashing
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ go_library(
|
||||
"rewards_penalties.go",
|
||||
"shuffle.go",
|
||||
"sync_committee.go",
|
||||
"validator_churn.go",
|
||||
"validators.go",
|
||||
"weak_subjectivity.go",
|
||||
],
|
||||
@@ -24,6 +25,7 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
@@ -56,6 +58,7 @@ go_test(
|
||||
"rewards_penalties_test.go",
|
||||
"shuffle_test.go",
|
||||
"sync_committee_test.go",
|
||||
"validator_churn_test.go",
|
||||
"validators_test.go",
|
||||
"weak_subjectivity_test.go",
|
||||
],
|
||||
@@ -70,10 +73,12 @@ go_test(
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -21,20 +22,20 @@ var (
|
||||
// ValidateNilAttestation checks if any composite field of input attestation is nil.
|
||||
// Access to these nil fields will result in run time panic,
|
||||
// it is recommended to run these checks as first line of defense.
|
||||
func ValidateNilAttestation(attestation *ethpb.Attestation) error {
|
||||
func ValidateNilAttestation(attestation interfaces.Attestation) error {
|
||||
if attestation == nil {
|
||||
return errors.New("attestation can't be nil")
|
||||
}
|
||||
if attestation.Data == nil {
|
||||
if attestation.GetData() == nil {
|
||||
return errors.New("attestation's data can't be nil")
|
||||
}
|
||||
if attestation.Data.Source == nil {
|
||||
if attestation.GetData().Source == nil {
|
||||
return errors.New("attestation's source can't be nil")
|
||||
}
|
||||
if attestation.Data.Target == nil {
|
||||
if attestation.GetData().Target == nil {
|
||||
return errors.New("attestation's target can't be nil")
|
||||
}
|
||||
if attestation.AggregationBits == nil {
|
||||
if attestation.GetAggregationBits() == nil {
|
||||
return errors.New("attestation's bitfield can't be nil")
|
||||
}
|
||||
return nil
|
||||
@@ -71,8 +72,8 @@ func IsAggregator(committeeCount uint64, slotSig []byte) (bool, error) {
|
||||
|
||||
// IsAggregated returns true if the attestation is an aggregated attestation,
|
||||
// false otherwise.
|
||||
func IsAggregated(attestation *ethpb.Attestation) bool {
|
||||
return attestation.AggregationBits.Count() > 1
|
||||
func IsAggregated(attestation interfaces.Attestation) bool {
|
||||
return attestation.GetAggregationBits().Count() > 1
|
||||
}
|
||||
|
||||
// ComputeSubnetForAttestation returns the subnet for which the provided attestation will be broadcasted to.
|
||||
@@ -90,8 +91,8 @@ func IsAggregated(attestation *ethpb.Attestation) bool {
|
||||
// committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
|
||||
//
|
||||
// return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
|
||||
func ComputeSubnetForAttestation(activeValCount uint64, att *ethpb.Attestation) uint64 {
|
||||
return ComputeSubnetFromCommitteeAndSlot(activeValCount, att.Data.CommitteeIndex, att.Data.Slot)
|
||||
func ComputeSubnetForAttestation(activeValCount uint64, att interfaces.Attestation) uint64 {
|
||||
return ComputeSubnetFromCommitteeAndSlot(activeValCount, att.GetData().CommitteeIndex, att.GetData().Slot)
|
||||
}
|
||||
|
||||
// ComputeSubnetFromCommitteeAndSlot is a flattened version of ComputeSubnetForAttestation where we only pass in
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
@@ -238,7 +239,7 @@ func TestVerifyCheckpointEpoch_Ok(t *testing.T) {
|
||||
func TestValidateNilAttestation(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
attestation *ethpb.Attestation
|
||||
attestation interfaces.Attestation
|
||||
errString string
|
||||
}{
|
||||
{
|
||||
|
||||
@@ -15,12 +15,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
@@ -257,8 +257,8 @@ func VerifyBitfieldLength(bf bitfield.Bitfield, committeeSize uint64) error {
|
||||
|
||||
// VerifyAttestationBitfieldLengths verifies that an attestations aggregation bitfields is
|
||||
// a valid length matching the size of the committee.
|
||||
func VerifyAttestationBitfieldLengths(ctx context.Context, state state.ReadOnlyBeaconState, att *ethpb.Attestation) error {
|
||||
committee, err := BeaconCommitteeFromState(ctx, state, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
func VerifyAttestationBitfieldLengths(ctx context.Context, state state.ReadOnlyBeaconState, att interfaces.Attestation) error {
|
||||
committee, err := BeaconCommitteeFromState(ctx, state, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve beacon committees")
|
||||
}
|
||||
@@ -267,7 +267,7 @@ func VerifyAttestationBitfieldLengths(ctx context.Context, state state.ReadOnlyB
|
||||
return errors.New("no committee exist for this attestation")
|
||||
}
|
||||
|
||||
if err := VerifyBitfieldLength(att.AggregationBits, uint64(len(committee))); err != nil {
|
||||
if err := VerifyBitfieldLength(att.GetAggregationBits(), uint64(len(committee))); err != nil {
|
||||
return errors.Wrap(err, "failed to verify aggregation bitfield")
|
||||
}
|
||||
return nil
|
||||
@@ -295,6 +295,21 @@ func ShuffledIndices(s state.ReadOnlyBeaconState, epoch primitives.Epoch) ([]pri
|
||||
return UnshuffleList(indices, seed)
|
||||
}
|
||||
|
||||
// CommitteeIndices return beacon committee indices corresponding to bits that are set on the argument bitfield.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def get_committee_indices(committee_bits: Bitvector) -> Sequence[CommitteeIndex]:
|
||||
// return [CommitteeIndex(index) for index, bit in enumerate(committee_bits) if bit]
|
||||
func CommitteeIndices(committeeBits bitfield.Bitfield) []primitives.CommitteeIndex {
|
||||
indices := committeeBits.BitIndices()
|
||||
committeeIndices := make([]primitives.CommitteeIndex, len(indices))
|
||||
for i, ix := range indices {
|
||||
committeeIndices[i] = primitives.CommitteeIndex(uint64(ix))
|
||||
}
|
||||
return committeeIndices
|
||||
}
|
||||
|
||||
// UpdateCommitteeCache gets called at the beginning of every epoch to cache the committee shuffled indices
|
||||
// list with committee index and epoch number. It caches the shuffled indices for the input epoch.
|
||||
func UpdateCommitteeCache(ctx context.Context, state state.ReadOnlyBeaconState, e primitives.Epoch) error {
|
||||
|
||||
@@ -699,3 +699,12 @@ func TestPrecomputeProposerIndices_Ok(t *testing.T) {
|
||||
}
|
||||
assert.DeepEqual(t, wantedProposerIndices, proposerIndices, "Did not precompute proposer indices correctly")
|
||||
}
|
||||
|
||||
func TestCommitteeIndices(t *testing.T) {
|
||||
bitfield := bitfield.NewBitvector4()
|
||||
bitfield.SetBitAt(0, true)
|
||||
bitfield.SetBitAt(1, true)
|
||||
bitfield.SetBitAt(3, true)
|
||||
indices := helpers.CommitteeIndices(bitfield)
|
||||
assert.DeepEqual(t, []primitives.CommitteeIndex{0, 1, 3}, indices)
|
||||
}
|
||||
|
||||
53
beacon-chain/core/helpers/validator_churn.go
Normal file
53
beacon-chain/core/helpers/validator_churn.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
)
|
||||
|
||||
// BalanceChurnLimit for the current active balance, in gwei.
|
||||
// New in Electra EIP-7251: https://eips.ethereum.org/EIPS/eip-7251
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def get_balance_churn_limit(state: BeaconState) -> Gwei:
|
||||
// """
|
||||
// Return the churn limit for the current epoch.
|
||||
// """
|
||||
// churn = max(
|
||||
// MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA,
|
||||
// get_total_active_balance(state) // CHURN_LIMIT_QUOTIENT
|
||||
// )
|
||||
// return churn - churn % EFFECTIVE_BALANCE_INCREMENT
|
||||
func BalanceChurnLimit(activeBalance math.Gwei) math.Gwei {
|
||||
churn := max(
|
||||
params.BeaconConfig().MinPerEpochChurnLimitElectra,
|
||||
(uint64(activeBalance) / params.BeaconConfig().ChurnLimitQuotient),
|
||||
)
|
||||
return math.Gwei(churn - churn%params.BeaconConfig().EffectiveBalanceIncrement)
|
||||
}
|
||||
|
||||
// ActivationExitChurnLimit for the current active balance, in gwei.
|
||||
// New in Electra EIP-7251: https://eips.ethereum.org/EIPS/eip-7251
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def get_activation_exit_churn_limit(state: BeaconState) -> Gwei:
|
||||
// """
|
||||
// Return the churn limit for the current epoch dedicated to activations and exits.
|
||||
// """
|
||||
// return min(MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT, get_balance_churn_limit(state))
|
||||
func ActivationExitChurnLimit(activeBalance math.Gwei) math.Gwei {
|
||||
return min(math.Gwei(params.BeaconConfig().MaxPerEpochActivationExitChurnLimit), BalanceChurnLimit(activeBalance))
|
||||
}
|
||||
|
||||
// ConsolidationChurnLimit for the current active balance, in gwei.
|
||||
// New in EIP-7251: https://eips.ethereum.org/EIPS/eip-7251
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def get_consolidation_churn_limit(state: BeaconState) -> Gwei:
|
||||
// return get_balance_churn_limit(state) - get_activation_exit_churn_limit(state)
|
||||
func ConsolidationChurnLimit(activeBalance math.Gwei) math.Gwei {
|
||||
return BalanceChurnLimit(activeBalance) - ActivationExitChurnLimit(activeBalance)
|
||||
}
|
||||
72
beacon-chain/core/helpers/validator_churn_test.go
Normal file
72
beacon-chain/core/helpers/validator_churn_test.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package helpers_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
)
|
||||
|
||||
func TestBalanceChurnLimit(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
activeBalance math.Gwei
|
||||
expected math.Gwei
|
||||
}{
|
||||
{
|
||||
name: "less than MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA",
|
||||
activeBalance: 111,
|
||||
expected: math.Gwei(params.BeaconConfig().MinPerEpochChurnLimitElectra),
|
||||
},
|
||||
{
|
||||
name: "modulo EFFECTIVE_BALANCE_INCREMENT",
|
||||
activeBalance: math.Gwei(111 + params.BeaconConfig().MinPerEpochChurnLimitElectra*params.BeaconConfig().ChurnLimitQuotient),
|
||||
expected: math.Gwei(params.BeaconConfig().MinPerEpochChurnLimitElectra),
|
||||
},
|
||||
{
|
||||
name: "more than MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA",
|
||||
activeBalance: math.Gwei(2000 * params.BeaconConfig().EffectiveBalanceIncrement * params.BeaconConfig().ChurnLimitQuotient),
|
||||
expected: math.Gwei(2000 * params.BeaconConfig().EffectiveBalanceIncrement),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.expected, helpers.BalanceChurnLimit(tt.activeBalance))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestActivationExitChurnLimit(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
activeBalance math.Gwei
|
||||
expected math.Gwei
|
||||
}{
|
||||
{
|
||||
name: "less than MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT",
|
||||
activeBalance: 1,
|
||||
expected: math.Gwei(params.BeaconConfig().MinPerEpochChurnLimitElectra),
|
||||
},
|
||||
{
|
||||
name: "more than MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT",
|
||||
activeBalance: math.Gwei(2000 * params.BeaconConfig().EffectiveBalanceIncrement * params.BeaconConfig().ChurnLimitQuotient),
|
||||
expected: math.Gwei(params.BeaconConfig().MaxPerEpochActivationExitChurnLimit),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.expected, helpers.ActivationExitChurnLimit(tt.activeBalance))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// FuzzConsolidationChurnLimit exercises BalanceChurnLimit and ActivationExitChurnLimit
|
||||
func FuzzConsolidationChurnLimit(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, activeBalance uint64) {
|
||||
helpers.ConsolidationChurnLimit(math.Gwei(activeBalance))
|
||||
})
|
||||
}
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -393,6 +394,24 @@ func ComputeProposerIndex(bState state.ReadOnlyValidators, activeIndices []primi
|
||||
// IsEligibleForActivationQueue checks if the validator is eligible to
|
||||
// be placed into the activation queue.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def is_eligible_for_activation_queue(validator: Validator) -> bool:
|
||||
// """
|
||||
// Check if ``validator`` is eligible to be placed into the activation queue.
|
||||
// """
|
||||
// return (
|
||||
// validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH
|
||||
// and validator.effective_balance >= MIN_ACTIVATION_BALANCE # [Modified in Electra:EIP7251]
|
||||
// )
|
||||
func IsEligibleForActivationQueue(validator *ethpb.Validator, currentEpoch primitives.Epoch) bool {
|
||||
if currentEpoch >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return isEligibleForActivationQueueElectra(validator.ActivationEligibilityEpoch, validator.EffectiveBalance)
|
||||
}
|
||||
return isEligibleForActivationQueue(validator.ActivationEligibilityEpoch, validator.EffectiveBalance)
|
||||
}
|
||||
|
||||
// isEligibleForActivationQueue carries out the logic for IsEligibleForActivationQueue
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def is_eligible_for_activation_queue(validator: Validator) -> bool:
|
||||
@@ -403,22 +422,29 @@ func ComputeProposerIndex(bState state.ReadOnlyValidators, activeIndices []primi
|
||||
// validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH
|
||||
// and validator.effective_balance == MAX_EFFECTIVE_BALANCE
|
||||
// )
|
||||
func IsEligibleForActivationQueue(validator *ethpb.Validator) bool {
|
||||
return isEligibileForActivationQueue(validator.ActivationEligibilityEpoch, validator.EffectiveBalance)
|
||||
}
|
||||
|
||||
// IsEligibleForActivationQueueUsingTrie checks if the read-only validator is eligible to
|
||||
// be placed into the activation queue.
|
||||
func IsEligibleForActivationQueueUsingTrie(validator state.ReadOnlyValidator) bool {
|
||||
return isEligibileForActivationQueue(validator.ActivationEligibilityEpoch(), validator.EffectiveBalance())
|
||||
}
|
||||
|
||||
// isEligibleForActivationQueue carries out the logic for IsEligibleForActivationQueue*
|
||||
func isEligibileForActivationQueue(activationEligibilityEpoch primitives.Epoch, effectiveBalance uint64) bool {
|
||||
func isEligibleForActivationQueue(activationEligibilityEpoch primitives.Epoch, effectiveBalance uint64) bool {
|
||||
return activationEligibilityEpoch == params.BeaconConfig().FarFutureEpoch &&
|
||||
effectiveBalance == params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
|
||||
// IsEligibleForActivationQueue checks if the validator is eligible to
|
||||
// be placed into the activation queue.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def is_eligible_for_activation_queue(validator: Validator) -> bool:
|
||||
// """
|
||||
// Check if ``validator`` is eligible to be placed into the activation queue.
|
||||
// """
|
||||
// return (
|
||||
// validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH
|
||||
// and validator.effective_balance >= MIN_ACTIVATION_BALANCE # [Modified in Electra:EIP7251]
|
||||
// )
|
||||
func isEligibleForActivationQueueElectra(activationEligibilityEpoch primitives.Epoch, effectiveBalance uint64) bool {
|
||||
return activationEligibilityEpoch == params.BeaconConfig().FarFutureEpoch &&
|
||||
effectiveBalance >= params.BeaconConfig().MinActivationBalance
|
||||
}
|
||||
|
||||
// IsEligibleForActivation checks if the validator is eligible for activation.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
@@ -471,3 +497,245 @@ func LastActivatedValidatorIndex(ctx context.Context, st state.ReadOnlyBeaconSta
|
||||
}
|
||||
return lastActivatedvalidatorIndex, nil
|
||||
}
|
||||
|
||||
// hasETH1WithdrawalCredential returns whether the validator has an ETH1
|
||||
// Withdrawal prefix. It assumes that the caller has a lock on the state
|
||||
func HasETH1WithdrawalCredential(val *ethpb.Validator) bool {
|
||||
if val == nil {
|
||||
return false
|
||||
}
|
||||
return isETH1WithdrawalCredential(val.WithdrawalCredentials)
|
||||
}
|
||||
|
||||
func isETH1WithdrawalCredential(creds []byte) bool {
|
||||
return bytes.HasPrefix(creds, []byte{params.BeaconConfig().ETH1AddressWithdrawalPrefixByte})
|
||||
}
|
||||
|
||||
// HasCompoundingWithdrawalCredential checks if the validator has a compounding withdrawal credential.
|
||||
// New in Electra EIP-7251: https://eips.ethereum.org/EIPS/eip-7251
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def has_compounding_withdrawal_credential(validator: Validator) -> bool:
|
||||
// """
|
||||
// Check if ``validator`` has an 0x02 prefixed "compounding" withdrawal credential.
|
||||
// """
|
||||
// return is_compounding_withdrawal_credential(validator.withdrawal_credentials)
|
||||
func HasCompoundingWithdrawalCredential(v interfaces.WithWithdrawalCredentials) bool {
|
||||
if v == nil {
|
||||
return false
|
||||
}
|
||||
return isCompoundingWithdrawalCredential(v.GetWithdrawalCredentials())
|
||||
}
|
||||
|
||||
// isCompoundingWithdrawalCredential checks if the credentials are a compounding withdrawal credential.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def is_compounding_withdrawal_credential(withdrawal_credentials: Bytes32) -> bool:
|
||||
// return withdrawal_credentials[:1] == COMPOUNDING_WITHDRAWAL_PREFIX
|
||||
func isCompoundingWithdrawalCredential(creds []byte) bool {
|
||||
return bytes.HasPrefix(creds, []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte})
|
||||
}
|
||||
|
||||
// HasExecutionWithdrawalCredentials checks if the validator has an execution withdrawal credential or compounding credential.
|
||||
// New in Electra EIP-7251: https://eips.ethereum.org/EIPS/eip-7251
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def has_execution_withdrawal_credential(validator: Validator) -> bool:
|
||||
// """
|
||||
// Check if ``validator`` has a 0x01 or 0x02 prefixed withdrawal credential.
|
||||
// """
|
||||
// return has_compounding_withdrawal_credential(validator) or has_eth1_withdrawal_credential(validator)
|
||||
func HasExecutionWithdrawalCredentials(v *ethpb.Validator) bool {
|
||||
if v == nil {
|
||||
return false
|
||||
}
|
||||
return HasCompoundingWithdrawalCredential(v) || HasETH1WithdrawalCredential(v)
|
||||
}
|
||||
|
||||
// IsSameWithdrawalCredentials returns true if both validators have the same withdrawal credentials.
|
||||
//
|
||||
// return a.withdrawal_credentials[12:] == b.withdrawal_credentials[12:]
|
||||
func IsSameWithdrawalCredentials(a, b *ethpb.Validator) bool {
|
||||
if a == nil || b == nil {
|
||||
return false
|
||||
}
|
||||
if len(a.WithdrawalCredentials) <= 12 || len(b.WithdrawalCredentials) <= 12 {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(a.WithdrawalCredentials[12:], b.WithdrawalCredentials[12:])
|
||||
}
|
||||
|
||||
// IsFullyWithdrawableValidator returns whether the validator is able to perform a full
|
||||
// withdrawal. This function assumes that the caller holds a lock on the state.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def is_fully_withdrawable_validator(validator: Validator, balance: Gwei, epoch: Epoch) -> bool:
|
||||
// """
|
||||
// Check if ``validator`` is fully withdrawable.
|
||||
// """
|
||||
// return (
|
||||
// has_execution_withdrawal_credential(validator) # [Modified in Electra:EIP7251]
|
||||
// and validator.withdrawable_epoch <= epoch
|
||||
// and balance > 0
|
||||
// )
|
||||
func IsFullyWithdrawableValidator(val *ethpb.Validator, balance uint64, epoch primitives.Epoch) bool {
|
||||
if val == nil || balance <= 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Electra / EIP-7251 logic
|
||||
if epoch >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return HasExecutionWithdrawalCredentials(val) && val.WithdrawableEpoch <= epoch
|
||||
}
|
||||
|
||||
return HasETH1WithdrawalCredential(val) && val.WithdrawableEpoch <= epoch
|
||||
}
|
||||
|
||||
// IsPartiallyWithdrawableValidator returns whether the validator is able to perform a
|
||||
// partial withdrawal. This function assumes that the caller has a lock on the state.
|
||||
// This method conditionally calls the fork appropriate implementation based on the epoch argument.
|
||||
func IsPartiallyWithdrawableValidator(val *ethpb.Validator, balance uint64, epoch primitives.Epoch) bool {
|
||||
if val == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if epoch < params.BeaconConfig().ElectraForkEpoch {
|
||||
return isPartiallyWithdrawableValidatorCapella(val, balance, epoch)
|
||||
}
|
||||
|
||||
return isPartiallyWithdrawableValidatorElectra(val, balance, epoch)
|
||||
}
|
||||
|
||||
// isPartiallyWithdrawableValidatorElectra implements is_partially_withdrawable_validator in the
|
||||
// electra fork.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def is_partially_withdrawable_validator(validator: Validator, balance: Gwei) -> bool:
|
||||
//
|
||||
// """
|
||||
// Check if ``validator`` is partially withdrawable.
|
||||
// """
|
||||
// max_effective_balance = get_validator_max_effective_balance(validator)
|
||||
// has_max_effective_balance = validator.effective_balance == max_effective_balance # [Modified in Electra:EIP7251]
|
||||
// has_excess_balance = balance > max_effective_balance # [Modified in Electra:EIP7251]
|
||||
// return (
|
||||
// has_execution_withdrawal_credential(validator) # [Modified in Electra:EIP7251]
|
||||
// and has_max_effective_balance
|
||||
// and has_excess_balance
|
||||
// )
|
||||
func isPartiallyWithdrawableValidatorElectra(val *ethpb.Validator, balance uint64, epoch primitives.Epoch) bool {
|
||||
maxEB := ValidatorMaxEffectiveBalance(val)
|
||||
hasMaxBalance := val.EffectiveBalance == maxEB
|
||||
hasExcessBalance := balance > maxEB
|
||||
|
||||
return HasExecutionWithdrawalCredentials(val) &&
|
||||
hasMaxBalance &&
|
||||
hasExcessBalance
|
||||
}
|
||||
|
||||
// isPartiallyWithdrawableValidatorCapella implements is_partially_withdrawable_validator in the
|
||||
// capella fork.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def is_partially_withdrawable_validator(validator: Validator, balance: Gwei) -> bool:
|
||||
// """
|
||||
// Check if ``validator`` is partially withdrawable.
|
||||
// """
|
||||
// has_max_effective_balance = validator.effective_balance == MAX_EFFECTIVE_BALANCE
|
||||
// has_excess_balance = balance > MAX_EFFECTIVE_BALANCE
|
||||
// return has_eth1_withdrawal_credential(validator) and has_max_effective_balance and has_excess_balance
|
||||
func isPartiallyWithdrawableValidatorCapella(val *ethpb.Validator, balance uint64, epoch primitives.Epoch) bool {
|
||||
hasMaxBalance := val.EffectiveBalance == params.BeaconConfig().MaxEffectiveBalance
|
||||
hasExcessBalance := balance > params.BeaconConfig().MaxEffectiveBalance
|
||||
return HasETH1WithdrawalCredential(val) && hasExcessBalance && hasMaxBalance
|
||||
}
|
||||
|
||||
// ValidatorMaxEffectiveBalance returns the maximum effective balance for a validator.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def get_validator_max_effective_balance(validator: Validator) -> Gwei:
|
||||
// """
|
||||
// Get max effective balance for ``validator``.
|
||||
// """
|
||||
// if has_compounding_withdrawal_credential(validator):
|
||||
// return MAX_EFFECTIVE_BALANCE_ELECTRA
|
||||
// else:
|
||||
// return MIN_ACTIVATION_BALANCE
|
||||
func ValidatorMaxEffectiveBalance(val *ethpb.Validator) uint64 {
|
||||
if HasCompoundingWithdrawalCredential(val) {
|
||||
return params.BeaconConfig().MaxEffectiveBalanceElectra
|
||||
}
|
||||
return params.BeaconConfig().MinActivationBalance
|
||||
}
|
||||
|
||||
// QueueExcessActiveBalance queues validators with balances above the min activation balance and adds to pending balance deposit.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def queue_excess_active_balance(state: BeaconState, index: ValidatorIndex) -> None:
|
||||
// balance = state.balances[index]
|
||||
// if balance > MIN_ACTIVATION_BALANCE:
|
||||
// excess_balance = balance - MIN_ACTIVATION_BALANCE
|
||||
// state.balances[index] = MIN_ACTIVATION_BALANCE
|
||||
// state.pending_balance_deposits.append(
|
||||
// PendingBalanceDeposit(index=index, amount=excess_balance)
|
||||
// )
|
||||
func QueueExcessActiveBalance(s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
bal, err := s.BalanceAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bal > params.BeaconConfig().MinActivationBalance {
|
||||
excessBalance := bal - params.BeaconConfig().MinActivationBalance
|
||||
if err := s.UpdateBalancesAtIndex(idx, params.BeaconConfig().MinActivationBalance); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.AppendPendingBalanceDeposit(idx, excessBalance)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// QueueEntireBalanceAndResetValidator queues the entire balance and resets the validator. This is used in electra fork logic.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def queue_entire_balance_and_reset_validator(state: BeaconState, index: ValidatorIndex) -> None:
|
||||
// balance = state.balances[index]
|
||||
// validator = state.validators[index]
|
||||
// state.balances[index] = 0
|
||||
// validator.effective_balance = 0
|
||||
// validator.activation_eligibility_epoch = FAR_FUTURE_EPOCH
|
||||
// state.pending_balance_deposits.append(
|
||||
// PendingBalanceDeposit(index=index, amount=balance)
|
||||
// )
|
||||
func QueueEntireBalanceAndResetValidator(s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
bal, err := s.BalanceAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.UpdateBalancesAtIndex(idx, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v, err := s.ValidatorAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v.EffectiveBalance = 0
|
||||
v.ActivationEligibilityEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
if err := s.UpdateValidatorAtIndex(idx, v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.AppendPendingBalanceDeposit(idx, bal)
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestIsActiveValidator_OK(t *testing.T) {
|
||||
@@ -703,25 +704,47 @@ func TestComputeProposerIndex(t *testing.T) {
|
||||
|
||||
func TestIsEligibleForActivationQueue(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
validator *ethpb.Validator
|
||||
want bool
|
||||
name string
|
||||
validator *ethpb.Validator
|
||||
currentEpoch primitives.Epoch
|
||||
want bool
|
||||
}{
|
||||
{"Eligible",
|
||||
ðpb.Validator{ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
true},
|
||||
{"Incorrect activation eligibility epoch",
|
||||
ðpb.Validator{ActivationEligibilityEpoch: 1, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
false},
|
||||
{"Not enough balance",
|
||||
ðpb.Validator{ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: 1},
|
||||
false},
|
||||
{
|
||||
name: "Eligible",
|
||||
validator: ðpb.Validator{ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
currentEpoch: primitives.Epoch(params.BeaconConfig().ElectraForkEpoch - 1),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "Incorrect activation eligibility epoch",
|
||||
validator: ðpb.Validator{ActivationEligibilityEpoch: 1, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
currentEpoch: primitives.Epoch(params.BeaconConfig().ElectraForkEpoch - 1),
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "Not enough balance",
|
||||
validator: ðpb.Validator{ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: 1},
|
||||
currentEpoch: primitives.Epoch(params.BeaconConfig().ElectraForkEpoch - 1),
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "More than max effective balance before electra",
|
||||
validator: ðpb.Validator{ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance + 1},
|
||||
currentEpoch: primitives.Epoch(params.BeaconConfig().ElectraForkEpoch - 1),
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "More than min activation balance after electra",
|
||||
validator: ðpb.Validator{ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MinActivationBalance + 1},
|
||||
currentEpoch: primitives.Epoch(params.BeaconConfig().ElectraForkEpoch),
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
assert.Equal(t, tt.want, helpers.IsEligibleForActivationQueue(tt.validator), "IsEligibleForActivationQueue()")
|
||||
assert.Equal(t, tt.want, helpers.IsEligibleForActivationQueue(tt.validator, tt.currentEpoch), "IsEligibleForActivationQueue()")
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -828,3 +851,309 @@ func TestProposerIndexFromCheckpoint(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ids[5], id)
|
||||
}
|
||||
|
||||
func TestHasETH1WithdrawalCredentials(t *testing.T) {
|
||||
creds := []byte{0xFA, 0xCC}
|
||||
v := ðpb.Validator{WithdrawalCredentials: creds}
|
||||
require.Equal(t, false, helpers.HasETH1WithdrawalCredential(v))
|
||||
creds = []byte{params.BeaconConfig().ETH1AddressWithdrawalPrefixByte, 0xCC}
|
||||
v = ðpb.Validator{WithdrawalCredentials: creds}
|
||||
require.Equal(t, true, helpers.HasETH1WithdrawalCredential(v))
|
||||
// No Withdrawal cred
|
||||
v = ðpb.Validator{}
|
||||
require.Equal(t, false, helpers.HasETH1WithdrawalCredential(v))
|
||||
}
|
||||
|
||||
func TestHasCompoundingWithdrawalCredential(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
validator *ethpb.Validator
|
||||
want bool
|
||||
}{
|
||||
{"Has compounding withdrawal credential",
|
||||
ðpb.Validator{WithdrawalCredentials: bytesutil.PadTo([]byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte}, 32)},
|
||||
true},
|
||||
{"Does not have compounding withdrawal credential",
|
||||
ðpb.Validator{WithdrawalCredentials: bytesutil.PadTo([]byte{0x00}, 32)},
|
||||
false},
|
||||
{"Handles nil case", nil, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, helpers.HasCompoundingWithdrawalCredential(tt.validator))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasExecutionWithdrawalCredentials(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
validator *ethpb.Validator
|
||||
want bool
|
||||
}{
|
||||
{"Has compounding withdrawal credential",
|
||||
ðpb.Validator{WithdrawalCredentials: bytesutil.PadTo([]byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte}, 32)},
|
||||
true},
|
||||
{"Has eth1 withdrawal credential",
|
||||
ðpb.Validator{WithdrawalCredentials: bytesutil.PadTo([]byte{params.BeaconConfig().ETH1AddressWithdrawalPrefixByte}, 32)},
|
||||
true},
|
||||
{"Does not have compounding withdrawal credential or eth1 withdrawal credential",
|
||||
ðpb.Validator{WithdrawalCredentials: bytesutil.PadTo([]byte{0x00}, 32)},
|
||||
false},
|
||||
{"Handles nil case", nil, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, helpers.HasExecutionWithdrawalCredentials(tt.validator))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsFullyWithdrawableValidator(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
validator *ethpb.Validator
|
||||
balance uint64
|
||||
epoch primitives.Epoch
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "Handles nil case",
|
||||
validator: nil,
|
||||
balance: 0,
|
||||
epoch: 0,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "No ETH1 prefix",
|
||||
validator: ðpb.Validator{
|
||||
WithdrawalCredentials: []byte{0xFA, 0xCC},
|
||||
WithdrawableEpoch: 2,
|
||||
},
|
||||
balance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
epoch: 3,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "Wrong withdrawable epoch",
|
||||
validator: ðpb.Validator{
|
||||
WithdrawalCredentials: []byte{params.BeaconConfig().ETH1AddressWithdrawalPrefixByte, 0xCC},
|
||||
WithdrawableEpoch: 2,
|
||||
},
|
||||
balance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
epoch: 1,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "No balance",
|
||||
validator: ðpb.Validator{
|
||||
WithdrawalCredentials: []byte{params.BeaconConfig().ETH1AddressWithdrawalPrefixByte, 0xCC},
|
||||
WithdrawableEpoch: 2,
|
||||
},
|
||||
balance: 0,
|
||||
epoch: 3,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "Fully withdrawable",
|
||||
validator: ðpb.Validator{
|
||||
WithdrawalCredentials: []byte{params.BeaconConfig().ETH1AddressWithdrawalPrefixByte, 0xCC},
|
||||
WithdrawableEpoch: 2,
|
||||
},
|
||||
balance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
epoch: 3,
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "Fully withdrawable compounding validator electra",
|
||||
validator: ðpb.Validator{
|
||||
WithdrawalCredentials: []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte, 0xCC},
|
||||
WithdrawableEpoch: 2,
|
||||
},
|
||||
balance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
epoch: params.BeaconConfig().ElectraForkEpoch,
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, helpers.IsFullyWithdrawableValidator(tt.validator, tt.balance, tt.epoch))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsPartiallyWithdrawableValidator(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
validator *ethpb.Validator
|
||||
balance uint64
|
||||
epoch primitives.Epoch
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "Handles nil case",
|
||||
validator: nil,
|
||||
balance: 0,
|
||||
epoch: 0,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "No ETH1 prefix",
|
||||
validator: ðpb.Validator{
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
WithdrawalCredentials: []byte{0xFA, 0xCC},
|
||||
},
|
||||
balance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
epoch: 3,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "No balance",
|
||||
validator: ðpb.Validator{
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
WithdrawalCredentials: []byte{params.BeaconConfig().ETH1AddressWithdrawalPrefixByte, 0xCC},
|
||||
},
|
||||
balance: 0,
|
||||
epoch: 3,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "Partially withdrawable",
|
||||
validator: ðpb.Validator{
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
WithdrawalCredentials: []byte{params.BeaconConfig().ETH1AddressWithdrawalPrefixByte, 0xCC},
|
||||
},
|
||||
balance: params.BeaconConfig().MaxEffectiveBalance * 2,
|
||||
epoch: 3,
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "Fully withdrawable vanilla validator electra",
|
||||
validator: ðpb.Validator{
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
WithdrawalCredentials: []byte{params.BeaconConfig().ETH1AddressWithdrawalPrefixByte, 0xCC},
|
||||
},
|
||||
balance: params.BeaconConfig().MinActivationBalance * 2,
|
||||
epoch: params.BeaconConfig().ElectraForkEpoch,
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "Fully withdrawable compounding validator electra",
|
||||
validator: ðpb.Validator{
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra,
|
||||
WithdrawalCredentials: []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte, 0xCC},
|
||||
},
|
||||
balance: params.BeaconConfig().MaxEffectiveBalanceElectra * 2,
|
||||
epoch: params.BeaconConfig().ElectraForkEpoch,
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, helpers.IsPartiallyWithdrawableValidator(tt.validator, tt.balance, tt.epoch))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsSameWithdrawalCredentials(t *testing.T) {
|
||||
makeWithdrawalCredentials := func(address []byte) []byte {
|
||||
b := make([]byte, 12)
|
||||
return append(b, address...)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
a *ethpb.Validator
|
||||
b *ethpb.Validator
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
"Same credentials",
|
||||
ðpb.Validator{WithdrawalCredentials: makeWithdrawalCredentials([]byte("same"))},
|
||||
ðpb.Validator{WithdrawalCredentials: makeWithdrawalCredentials([]byte("same"))},
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Different credentials",
|
||||
ðpb.Validator{WithdrawalCredentials: makeWithdrawalCredentials([]byte("foo"))},
|
||||
ðpb.Validator{WithdrawalCredentials: makeWithdrawalCredentials([]byte("bar"))},
|
||||
false,
|
||||
},
|
||||
{"Handles nil case", nil, nil, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, helpers.IsSameWithdrawalCredentials(tt.a, tt.b))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatorMaxEffectiveBalance(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
validator *ethpb.Validator
|
||||
want uint64
|
||||
}{
|
||||
{
|
||||
name: "Compounding withdrawal credential",
|
||||
validator: ðpb.Validator{WithdrawalCredentials: []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte, 0xCC}},
|
||||
want: params.BeaconConfig().MaxEffectiveBalanceElectra,
|
||||
},
|
||||
{
|
||||
name: "Vanilla credentials",
|
||||
validator: ðpb.Validator{WithdrawalCredentials: []byte{params.BeaconConfig().ETH1AddressWithdrawalPrefixByte, 0xCC}},
|
||||
want: params.BeaconConfig().MinActivationBalance,
|
||||
},
|
||||
{
|
||||
"Handles nil case",
|
||||
nil,
|
||||
params.BeaconConfig().MinActivationBalance,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, helpers.ValidatorMaxEffectiveBalance(tt.validator))
|
||||
})
|
||||
}
|
||||
// Sanity check that MinActivationBalance equals (pre-electra) MaxEffectiveBalance
|
||||
assert.Equal(t, params.BeaconConfig().MinActivationBalance, params.BeaconConfig().MaxEffectiveBalance)
|
||||
}
|
||||
|
||||
func TestQueueExcessActiveBalance_Ok(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
bals := st.Balances()
|
||||
bals[0] = params.BeaconConfig().MinActivationBalance + 1000
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
|
||||
err := helpers.QueueExcessActiveBalance(st, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
pbd, err := st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1000), pbd[0].Amount)
|
||||
|
||||
bals = st.Balances()
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, bals[0])
|
||||
}
|
||||
|
||||
func TestQueueEntireBalanceAndResetValidator_Ok(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
val, err := st.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, val.EffectiveBalance)
|
||||
pbd, err := st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(pbd))
|
||||
err = helpers.QueueEntireBalanceAndResetValidator(st, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
pbd, err = st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(pbd))
|
||||
|
||||
val, err = st.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), val.EffectiveBalance)
|
||||
}
|
||||
|
||||
@@ -90,6 +90,15 @@ func CanUpgradeToDeneb(slot primitives.Slot) bool {
|
||||
return epochStart && DenebEpoch
|
||||
}
|
||||
|
||||
// CanUpgradeToElectra returns true if the input `slot` can upgrade to Electra.
|
||||
// Spec code:
|
||||
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == ELECTRA_FORK_EPOCH
|
||||
func CanUpgradeToElectra(slot primitives.Slot) bool {
|
||||
epochStart := slots.IsEpochStart(slot)
|
||||
electraEpoch := slots.ToEpoch(slot) == params.BeaconConfig().ElectraForkEpoch
|
||||
return epochStart && electraEpoch
|
||||
}
|
||||
|
||||
// CanProcessEpoch checks the eligibility to process epoch.
|
||||
// The epoch can be processed at the end of the last slot of every epoch.
|
||||
//
|
||||
|
||||
@@ -333,3 +333,38 @@ func TestCanUpgradeToDeneb(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanUpgradeToElectra(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bc := params.BeaconConfig()
|
||||
bc.ElectraForkEpoch = 5
|
||||
params.OverrideBeaconConfig(bc)
|
||||
tests := []struct {
|
||||
name string
|
||||
slot primitives.Slot
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "not epoch start",
|
||||
slot: 1,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "not electra epoch",
|
||||
slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "electra epoch",
|
||||
slot: primitives.Slot(params.BeaconConfig().ElectraForkEpoch) * params.BeaconConfig().SlotsPerEpoch,
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := time.CanUpgradeToElectra(tt.slot); got != tt.want {
|
||||
t.Errorf("CanUpgradeToElectra() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ go_library(
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/capella:go_default_library",
|
||||
"//beacon-chain/core/deneb:go_default_library",
|
||||
"//beacon-chain/core/electra:go_default_library",
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/execution:go_default_library",
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/capella"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/deneb"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
e "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/execution"
|
||||
@@ -320,6 +321,14 @@ func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconSta
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if time.CanUpgradeToElectra(state.Slot()) {
|
||||
state, err = electra.UpgradeToElectra(state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -257,7 +257,7 @@ func ProcessOperationsNoVerifyAttsSigs(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case version.Altair, version.Bellatrix, version.Capella, version.Deneb:
|
||||
case version.Altair, version.Bellatrix, version.Capella, version.Deneb, version.Electra:
|
||||
state, err = altairOperations(ctx, state, beaconBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -311,7 +311,7 @@ func createFullBlockWithOperations(t *testing.T) (state.BeaconState,
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, blockAtt.Data.Slot, blockAtt.Data.CommitteeIndex)
|
||||
assert.NoError(t, err)
|
||||
attestingIndices, err := attestation.AttestingIndices(blockAtt.AggregationBits, committee)
|
||||
attestingIndices, err := attestation.AttestingIndices(blockAtt, committee)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
hashTreeRoot, err = signing.ComputeSigningRoot(blockAtt.Data, domain)
|
||||
@@ -651,6 +651,20 @@ func TestProcessSlots_ThroughDenebEpoch(t *testing.T) {
|
||||
require.Equal(t, params.BeaconConfig().SlotsPerEpoch*10, st.Slot())
|
||||
}
|
||||
|
||||
func TestProcessSlots_ThroughElectraEpoch(t *testing.T) {
|
||||
transition.SkipSlotCache.Disable()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
conf := params.BeaconConfig()
|
||||
conf.ElectraForkEpoch = 5
|
||||
params.OverrideBeaconConfig(conf)
|
||||
|
||||
st, _ := util.DeterministicGenesisStateDeneb(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
st, err := transition.ProcessSlots(context.Background(), st, params.BeaconConfig().SlotsPerEpoch*10)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, version.Electra, st.Version())
|
||||
require.Equal(t, params.BeaconConfig().SlotsPerEpoch*10, st.Slot())
|
||||
}
|
||||
|
||||
func TestProcessSlotsUsingNextSlotCache(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisState(t, 1)
|
||||
r := []byte{'a'}
|
||||
|
||||
@@ -2,7 +2,10 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["validator.go"],
|
||||
srcs = [
|
||||
"slashing.go",
|
||||
"validator.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
@@ -15,7 +18,9 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
@@ -24,17 +29,22 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = ["validator_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
srcs = [
|
||||
"slashing_test.go",
|
||||
"validator_test.go",
|
||||
],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
33
beacon-chain/core/validators/slashing.go
Normal file
33
beacon-chain/core/validators/slashing.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package validators
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
|
||||
// SlashingParamsPerVersion returns the slashing parameters for the given state version.
|
||||
func SlashingParamsPerVersion(v int) (slashingQuotient, proposerRewardQuotient, whistleblowerRewardQuotient uint64, err error) {
|
||||
cfg := params.BeaconConfig()
|
||||
switch v {
|
||||
case version.Phase0:
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotient
|
||||
proposerRewardQuotient = cfg.ProposerRewardQuotient
|
||||
whistleblowerRewardQuotient = cfg.WhistleBlowerRewardQuotient
|
||||
case version.Altair:
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotientAltair
|
||||
proposerRewardQuotient = cfg.ProposerRewardQuotient
|
||||
whistleblowerRewardQuotient = cfg.WhistleBlowerRewardQuotient
|
||||
case version.Bellatrix, version.Capella, version.Deneb:
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotientBellatrix
|
||||
proposerRewardQuotient = cfg.ProposerRewardQuotient
|
||||
whistleblowerRewardQuotient = cfg.WhistleBlowerRewardQuotient
|
||||
case version.Electra:
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotientElectra
|
||||
proposerRewardQuotient = cfg.ProposerRewardQuotient
|
||||
whistleblowerRewardQuotient = cfg.WhistleBlowerRewardQuotientElectra
|
||||
default:
|
||||
err = errors.New("unknown state version")
|
||||
}
|
||||
return
|
||||
}
|
||||
18
beacon-chain/core/validators/slashing_test.go
Normal file
18
beacon-chain/core/validators/slashing_test.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package validators_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
|
||||
func TestSlashingParamsPerVersion_NoErrors(t *testing.T) {
|
||||
for _, v := range version.All() {
|
||||
_, _, _, err := validators.SlashingParamsPerVersion(v)
|
||||
if err != nil {
|
||||
// If this test is failing, you need to add a case for the version in slashingParamsPerVersion.
|
||||
t.Errorf("Error occurred for version %d: %v", v, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -13,7 +13,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
@@ -43,34 +45,26 @@ func MaxExitEpochAndChurn(s state.BeaconState) (maxExitEpoch primitives.Epoch, c
|
||||
|
||||
// InitiateValidatorExit takes in validator index and updates
|
||||
// validator with correct voluntary exit parameters.
|
||||
// Note: As of Electra, the exitQueueEpoch and churn parameters are unused.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None:
|
||||
// """
|
||||
// Initiate the exit of the validator with index ``index``.
|
||||
// """
|
||||
// # Return if validator already initiated exit
|
||||
// validator = state.validators[index]
|
||||
// if validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||
// return
|
||||
// """
|
||||
// Initiate the exit of the validator with index ``index``.
|
||||
// """
|
||||
// # Return if validator already initiated exit
|
||||
// validator = state.validators[index]
|
||||
// if validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||
// return
|
||||
//
|
||||
// # Compute exit queue epoch
|
||||
// exit_epochs = [v.exit_epoch for v in state.validators if v.exit_epoch != FAR_FUTURE_EPOCH]
|
||||
// exit_queue_epoch = max(exit_epochs + [compute_activation_exit_epoch(get_current_epoch(state))])
|
||||
// exit_queue_churn = len([v for v in state.validators if v.exit_epoch == exit_queue_epoch])
|
||||
// if exit_queue_churn >= get_validator_churn_limit(state):
|
||||
// exit_queue_epoch += Epoch(1)
|
||||
// # Compute exit queue epoch [Modified in Electra:EIP7251]
|
||||
// exit_queue_epoch = compute_exit_epoch_and_update_churn(state, validator.effective_balance)
|
||||
//
|
||||
// # Set validator exit epoch and withdrawable epoch
|
||||
// validator.exit_epoch = exit_queue_epoch
|
||||
// validator.withdrawable_epoch = Epoch(validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY)
|
||||
// # Set validator exit epoch and withdrawable epoch
|
||||
// validator.exit_epoch = exit_queue_epoch
|
||||
// validator.withdrawable_epoch = Epoch(validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY)
|
||||
func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primitives.ValidatorIndex, exitQueueEpoch primitives.Epoch, churn uint64) (state.BeaconState, primitives.Epoch, error) {
|
||||
exitableEpoch := helpers.ActivationExitEpoch(time.CurrentEpoch(s))
|
||||
if exitableEpoch > exitQueueEpoch {
|
||||
exitQueueEpoch = exitableEpoch
|
||||
churn = 0
|
||||
}
|
||||
validator, err := s.ValidatorAtIndex(idx)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
@@ -78,14 +72,38 @@ func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primiti
|
||||
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
return s, validator.ExitEpoch, ErrValidatorAlreadyExited
|
||||
}
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, s, time.CurrentEpoch(s))
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrap(err, "could not get active validator count")
|
||||
}
|
||||
currentChurn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
|
||||
|
||||
if churn >= currentChurn {
|
||||
exitQueueEpoch, err = exitQueueEpoch.SafeAdd(1)
|
||||
// Compute exit queue epoch.
|
||||
if s.Version() < version.Electra {
|
||||
// Relevant spec code from deneb:
|
||||
//
|
||||
// exit_epochs = [v.exit_epoch for v in state.validators if v.exit_epoch != FAR_FUTURE_EPOCH]
|
||||
// exit_queue_epoch = max(exit_epochs + [compute_activation_exit_epoch(get_current_epoch(state))])
|
||||
// exit_queue_churn = len([v for v in state.validators if v.exit_epoch == exit_queue_epoch])
|
||||
// if exit_queue_churn >= get_validator_churn_limit(state):
|
||||
// exit_queue_epoch += Epoch(1)
|
||||
exitableEpoch := helpers.ActivationExitEpoch(time.CurrentEpoch(s))
|
||||
if exitableEpoch > exitQueueEpoch {
|
||||
exitQueueEpoch = exitableEpoch
|
||||
churn = 0
|
||||
}
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, s, time.CurrentEpoch(s))
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrap(err, "could not get active validator count")
|
||||
}
|
||||
currentChurn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
|
||||
|
||||
if churn >= currentChurn {
|
||||
exitQueueEpoch, err = exitQueueEpoch.SafeAdd(1)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// [Modified in Electra:EIP7251]
|
||||
// exit_queue_epoch = compute_exit_epoch_and_update_churn(state, validator.effective_balance)
|
||||
var err error
|
||||
exitQueueEpoch, err = s.ExitEpochAndUpdateChurn(math.Gwei(validator.EffectiveBalance))
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
@@ -102,7 +120,8 @@ func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primiti
|
||||
}
|
||||
|
||||
// SlashValidator slashes the malicious validator's balance and awards
|
||||
// the whistleblower's balance.
|
||||
// the whistleblower's balance. Note: This implementation does not handle an
|
||||
// optional whistleblower index. The whistleblower index is always the proposer index.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
@@ -118,22 +137,22 @@ func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primiti
|
||||
// validator.slashed = True
|
||||
// validator.withdrawable_epoch = max(validator.withdrawable_epoch, Epoch(epoch + EPOCHS_PER_SLASHINGS_VECTOR))
|
||||
// state.slashings[epoch % EPOCHS_PER_SLASHINGS_VECTOR] += validator.effective_balance
|
||||
// decrease_balance(state, slashed_index, validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT)
|
||||
// slashing_penalty = validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT_EIP7251 # [Modified in EIP7251]
|
||||
// decrease_balance(state, slashed_index, slashing_penalty)
|
||||
//
|
||||
// # Apply proposer and whistleblower rewards
|
||||
// proposer_index = get_beacon_proposer_index(state)
|
||||
// if whistleblower_index is None:
|
||||
// whistleblower_index = proposer_index
|
||||
// whistleblower_reward = Gwei(validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT)
|
||||
// proposer_reward = Gwei(whistleblower_reward // PROPOSER_REWARD_QUOTIENT)
|
||||
// whistleblower_reward = Gwei(
|
||||
// validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA) # [Modified in EIP7251]
|
||||
// proposer_reward = Gwei(whistleblower_reward * PROPOSER_WEIGHT // WEIGHT_DENOMINATOR)
|
||||
// increase_balance(state, proposer_index, proposer_reward)
|
||||
// increase_balance(state, whistleblower_index, Gwei(whistleblower_reward - proposer_reward))
|
||||
func SlashValidator(
|
||||
ctx context.Context,
|
||||
s state.BeaconState,
|
||||
slashedIdx primitives.ValidatorIndex,
|
||||
penaltyQuotient uint64,
|
||||
proposerRewardQuotient uint64) (state.BeaconState, error) {
|
||||
slashedIdx primitives.ValidatorIndex) (state.BeaconState, error) {
|
||||
maxExitEpoch, churn := MaxExitEpochAndChurn(s)
|
||||
s, _, err := InitiateValidatorExit(ctx, s, slashedIdx, maxExitEpoch, churn)
|
||||
if err != nil && !errors.Is(err, ErrValidatorAlreadyExited) {
|
||||
@@ -161,7 +180,17 @@ func SlashValidator(
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := helpers.DecreaseBalance(s, slashedIdx, validator.EffectiveBalance/penaltyQuotient); err != nil {
|
||||
|
||||
slashingQuotient, proposerRewardQuotient, whistleblowerRewardQuotient, err := SlashingParamsPerVersion(s.Version())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get slashing parameters per version")
|
||||
}
|
||||
|
||||
slashingPenalty, err := math.Div64(validator.EffectiveBalance, slashingQuotient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to compute slashing slashingPenalty")
|
||||
}
|
||||
if err := helpers.DecreaseBalance(s, slashedIdx, slashingPenalty); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -170,14 +199,18 @@ func SlashValidator(
|
||||
return nil, errors.Wrap(err, "could not get proposer idx")
|
||||
}
|
||||
whistleBlowerIdx := proposerIdx
|
||||
whistleblowerReward := validator.EffectiveBalance / params.BeaconConfig().WhistleBlowerRewardQuotient
|
||||
proposerReward := whistleblowerReward / proposerRewardQuotient
|
||||
err = helpers.IncreaseBalance(s, proposerIdx, proposerReward)
|
||||
whistleblowerReward, err := math.Div64(validator.EffectiveBalance, whistleblowerRewardQuotient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to compute whistleblowerReward")
|
||||
}
|
||||
proposerReward, err := math.Div64(whistleblowerReward, proposerRewardQuotient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to compute proposer reward")
|
||||
}
|
||||
if err := helpers.IncreaseBalance(s, proposerIdx, proposerReward); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = helpers.IncreaseBalance(s, whistleBlowerIdx, whistleblowerReward-proposerReward)
|
||||
if err != nil {
|
||||
if err := helpers.IncreaseBalance(s, whistleBlowerIdx, whistleblowerReward-proposerReward); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package validators
|
||||
package validators_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -6,13 +6,16 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func TestHasVoted_OK(t *testing.T) {
|
||||
@@ -48,8 +51,8 @@ func TestInitiateValidatorExit_AlreadyExited(t *testing.T) {
|
||||
}}
|
||||
state, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
newState, epoch, err := InitiateValidatorExit(context.Background(), state, 0, 199, 1)
|
||||
require.ErrorIs(t, err, ErrValidatorAlreadyExited)
|
||||
newState, epoch, err := validators.InitiateValidatorExit(context.Background(), state, 0, 199, 1)
|
||||
require.ErrorIs(t, err, validators.ErrValidatorAlreadyExited)
|
||||
require.Equal(t, exitEpoch, epoch)
|
||||
v, err := newState.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
@@ -67,7 +70,7 @@ func TestInitiateValidatorExit_ProperExit(t *testing.T) {
|
||||
}}
|
||||
state, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
newState, epoch, err := InitiateValidatorExit(context.Background(), state, idx, exitedEpoch+2, 1)
|
||||
newState, epoch, err := validators.InitiateValidatorExit(context.Background(), state, idx, exitedEpoch+2, 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitedEpoch+2, epoch)
|
||||
v, err := newState.ValidatorAtIndex(idx)
|
||||
@@ -87,7 +90,7 @@ func TestInitiateValidatorExit_ChurnOverflow(t *testing.T) {
|
||||
}}
|
||||
state, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
newState, epoch, err := InitiateValidatorExit(context.Background(), state, idx, exitedEpoch+2, 4)
|
||||
newState, epoch, err := validators.InitiateValidatorExit(context.Background(), state, idx, exitedEpoch+2, 4)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitedEpoch+3, epoch)
|
||||
|
||||
@@ -109,10 +112,58 @@ func TestInitiateValidatorExit_WithdrawalOverflows(t *testing.T) {
|
||||
}}
|
||||
state, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
_, _, err = InitiateValidatorExit(context.Background(), state, 1, params.BeaconConfig().FarFutureEpoch-1, 1)
|
||||
_, _, err = validators.InitiateValidatorExit(context.Background(), state, 1, params.BeaconConfig().FarFutureEpoch-1, 1)
|
||||
require.ErrorContains(t, "addition overflows", err)
|
||||
}
|
||||
|
||||
func TestInitiateValidatorExit_ProperExit_Electra(t *testing.T) {
|
||||
exitedEpoch := primitives.Epoch(100)
|
||||
idx := primitives.ValidatorIndex(3)
|
||||
base := ðpb.BeaconStateElectra{
|
||||
Slot: slots.UnsafeEpochStart(exitedEpoch + 1),
|
||||
Validators: []*ethpb.Validator{
|
||||
{
|
||||
ExitEpoch: exitedEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
},
|
||||
{
|
||||
ExitEpoch: exitedEpoch + 1,
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
},
|
||||
{
|
||||
ExitEpoch: exitedEpoch + 2,
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
},
|
||||
{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
},
|
||||
},
|
||||
}
|
||||
state, err := state_native.InitializeFromProtoElectra(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Pre-check: Exit balance to consume should be zero.
|
||||
ebtc, err := state.ExitBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, math.Gwei(0), ebtc)
|
||||
|
||||
newState, epoch, err := validators.InitiateValidatorExit(context.Background(), state, idx, 0, 0) // exitQueueEpoch and churn are not used in electra
|
||||
require.NoError(t, err)
|
||||
|
||||
// Expect that the exit epoch is the next available epoch with max seed lookahead.
|
||||
want := helpers.ActivationExitEpoch(exitedEpoch + 1)
|
||||
require.Equal(t, want, epoch)
|
||||
v, err := newState.ValidatorAtIndex(idx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, want, v.ExitEpoch, "Exit epoch was not the highest")
|
||||
|
||||
// Check that the exit balance to consume has been updated on the state.
|
||||
ebtc, err = state.ExitBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, math.Gwei(0), ebtc, "Exit balance to consume was not updated")
|
||||
}
|
||||
|
||||
func TestSlashValidator_OK(t *testing.T) {
|
||||
validatorCount := 100
|
||||
registry := make([]*ethpb.Validator, 0, validatorCount)
|
||||
@@ -141,8 +192,7 @@ func TestSlashValidator_OK(t *testing.T) {
|
||||
require.NoError(t, err, "Could not get proposer")
|
||||
proposerBal, err := state.BalanceAtIndex(proposer)
|
||||
require.NoError(t, err)
|
||||
cfg := params.BeaconConfig()
|
||||
slashedState, err := SlashValidator(context.Background(), state, slashedIdx, cfg.MinSlashingPenaltyQuotient, cfg.ProposerRewardQuotient)
|
||||
slashedState, err := validators.SlashValidator(context.Background(), state, slashedIdx)
|
||||
require.NoError(t, err, "Could not slash validator")
|
||||
require.Equal(t, true, slashedState.Version() == version.Phase0)
|
||||
|
||||
@@ -167,6 +217,59 @@ func TestSlashValidator_OK(t *testing.T) {
|
||||
assert.Equal(t, maxBalance-(v.EffectiveBalance/params.BeaconConfig().MinSlashingPenaltyQuotient), bal, "Did not get expected balance for slashed validator")
|
||||
}
|
||||
|
||||
func TestSlashValidator_Electra(t *testing.T) {
|
||||
validatorCount := 100
|
||||
registry := make([]*ethpb.Validator, 0, validatorCount)
|
||||
balances := make([]uint64, 0, validatorCount)
|
||||
for i := 0; i < validatorCount; i++ {
|
||||
registry = append(registry, ðpb.Validator{
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
})
|
||||
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance)
|
||||
}
|
||||
|
||||
base := ðpb.BeaconStateElectra{
|
||||
Validators: registry,
|
||||
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
Balances: balances,
|
||||
}
|
||||
state, err := state_native.InitializeFromProtoElectra(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
slashedIdx := primitives.ValidatorIndex(3)
|
||||
|
||||
proposer, err := helpers.BeaconProposerIndex(context.Background(), state)
|
||||
require.NoError(t, err, "Could not get proposer")
|
||||
proposerBal, err := state.BalanceAtIndex(proposer)
|
||||
require.NoError(t, err)
|
||||
slashedState, err := validators.SlashValidator(context.Background(), state, slashedIdx)
|
||||
require.NoError(t, err, "Could not slash validator")
|
||||
require.Equal(t, true, slashedState.Version() == version.Electra)
|
||||
|
||||
v, err := state.ValidatorAtIndex(slashedIdx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, v.Slashed, "Validator not slashed despite supposed to being slashed")
|
||||
assert.Equal(t, time.CurrentEpoch(state)+params.BeaconConfig().EpochsPerSlashingsVector, v.WithdrawableEpoch, "Withdrawable epoch not the expected value")
|
||||
|
||||
maxBalance := params.BeaconConfig().MaxEffectiveBalance
|
||||
slashedBalance := state.Slashings()[state.Slot().Mod(uint64(params.BeaconConfig().EpochsPerSlashingsVector))]
|
||||
assert.Equal(t, maxBalance, slashedBalance, "Slashed balance isn't the expected amount")
|
||||
|
||||
whistleblowerReward := slashedBalance / params.BeaconConfig().WhistleBlowerRewardQuotientElectra
|
||||
bal, err := state.BalanceAtIndex(proposer)
|
||||
require.NoError(t, err)
|
||||
// The proposer is the whistleblower.
|
||||
assert.Equal(t, proposerBal+whistleblowerReward, bal, "Did not get expected balance for proposer")
|
||||
bal, err = state.BalanceAtIndex(slashedIdx)
|
||||
require.NoError(t, err)
|
||||
v, err = state.ValidatorAtIndex(slashedIdx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, maxBalance-(v.EffectiveBalance/params.BeaconConfig().MinSlashingPenaltyQuotientElectra), bal, "Did not get expected balance for slashed validator")
|
||||
}
|
||||
|
||||
func TestActivatedValidatorIndices(t *testing.T) {
|
||||
tests := []struct {
|
||||
state *ethpb.BeaconState
|
||||
@@ -219,7 +322,7 @@ func TestActivatedValidatorIndices(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.state)
|
||||
require.NoError(t, err)
|
||||
activatedIndices := ActivatedValidatorIndices(time.CurrentEpoch(s), tt.state.Validators)
|
||||
activatedIndices := validators.ActivatedValidatorIndices(time.CurrentEpoch(s), tt.state.Validators)
|
||||
assert.DeepEqual(t, tt.wanted, activatedIndices)
|
||||
}
|
||||
}
|
||||
@@ -273,7 +376,7 @@ func TestSlashedValidatorIndices(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.state)
|
||||
require.NoError(t, err)
|
||||
slashedIndices := SlashedValidatorIndices(time.CurrentEpoch(s), tt.state.Validators)
|
||||
slashedIndices := validators.SlashedValidatorIndices(time.CurrentEpoch(s), tt.state.Validators)
|
||||
assert.DeepEqual(t, tt.wanted, slashedIndices)
|
||||
}
|
||||
}
|
||||
@@ -335,7 +438,7 @@ func TestExitedValidatorIndices(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
activeCount, err := helpers.ActiveValidatorCount(context.Background(), s, time.PrevEpoch(s))
|
||||
require.NoError(t, err)
|
||||
exitedIndices, err := ExitedValidatorIndices(0, tt.state.Validators, activeCount)
|
||||
exitedIndices, err := validators.ExitedValidatorIndices(0, tt.state.Validators, activeCount)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, tt.wanted, exitedIndices)
|
||||
}
|
||||
@@ -410,7 +513,7 @@ func TestValidatorMaxExitEpochAndChurn(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.state)
|
||||
require.NoError(t, err)
|
||||
epoch, churn := MaxExitEpochAndChurn(s)
|
||||
epoch, churn := validators.MaxExitEpochAndChurn(s)
|
||||
require.Equal(t, tt.wantedEpoch, epoch)
|
||||
require.Equal(t, tt.wantedChurn, churn)
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
"cache.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"migration.go",
|
||||
"mock.go",
|
||||
"pruner.go",
|
||||
],
|
||||
@@ -37,6 +38,7 @@ go_test(
|
||||
srcs = [
|
||||
"blob_test.go",
|
||||
"cache_test.go",
|
||||
"migration_test.go",
|
||||
"pruner_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -37,6 +38,7 @@ const (
|
||||
partExt = "part"
|
||||
|
||||
directoryPermissions = 0700
|
||||
rootPrefixLen = 4
|
||||
)
|
||||
|
||||
// BlobStorageOption is a functional option for configuring a BlobStorage.
|
||||
@@ -321,8 +323,14 @@ func namerForSidecar(sc blocks.VerifiedROBlob) blobNamer {
|
||||
return blobNamer{root: sc.BlockRoot(), index: sc.Index}
|
||||
}
|
||||
|
||||
func (p blobNamer) groupDir() string {
|
||||
return oneBytePrefix(rootString(p.root))
|
||||
}
|
||||
|
||||
func (p blobNamer) dir() string {
|
||||
return rootString(p.root)
|
||||
rs := rootString(p.root)
|
||||
parentDir := oneBytePrefix(rs)
|
||||
return filepath.Join(parentDir, rs)
|
||||
}
|
||||
|
||||
func (p blobNamer) partPath(entropy string) string {
|
||||
@@ -337,6 +345,11 @@ func rootString(root [32]byte) string {
|
||||
return fmt.Sprintf("%#x", root)
|
||||
}
|
||||
|
||||
func oneBytePrefix(p string) string {
|
||||
// returns eg 0x00 from 0x0002fb4db510b8618b04dc82d023793739c26346a8b02eb73482e24b0fec0555
|
||||
return p[0:rootPrefixLen]
|
||||
}
|
||||
|
||||
func stringToRoot(str string) ([32]byte, error) {
|
||||
slice, err := hexutil.Decode(str)
|
||||
if err != nil {
|
||||
|
||||
@@ -170,10 +170,15 @@ func TestBlobStoragePrune(t *testing.T) {
|
||||
for _, sidecar := range testSidecars {
|
||||
require.NoError(t, bs.Save(sidecar))
|
||||
}
|
||||
namer := namerForSidecar(testSidecars[0])
|
||||
|
||||
require.NoError(t, bs.pruner.prune(currentSlot-bs.pruner.windowSize))
|
||||
beforeFolders, err := afero.ReadDir(fs, namer.groupDir())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(beforeFolders))
|
||||
|
||||
remainingFolders, err := afero.ReadDir(fs, ".")
|
||||
require.NoError(t, bs.pruner.prune(currentSlot-bs.pruner.windowSize, nil))
|
||||
|
||||
remainingFolders, err := afero.ReadDir(fs, namer.groupDir())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(remainingFolders))
|
||||
})
|
||||
@@ -181,36 +186,51 @@ func TestBlobStoragePrune(t *testing.T) {
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 299, fieldparams.MaxBlobsPerBlock)
|
||||
testSidecars, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
namer := namerForSidecar(testSidecars[0])
|
||||
|
||||
for _, sidecar := range testSidecars[4:] {
|
||||
require.NoError(t, bs.Save(sidecar))
|
||||
}
|
||||
|
||||
require.NoError(t, bs.pruner.prune(currentSlot-bs.pruner.windowSize))
|
||||
require.NoError(t, bs.pruner.prune(currentSlot-bs.pruner.windowSize, nil))
|
||||
|
||||
remainingFolders, err := afero.ReadDir(fs, ".")
|
||||
remainingFolders, err := afero.ReadDir(fs, namer.groupDir())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(remainingFolders))
|
||||
})
|
||||
t.Run("PruneMany", func(t *testing.T) {
|
||||
blockQty := 10
|
||||
slot := primitives.Slot(1)
|
||||
|
||||
for j := 0; j <= blockQty; j++ {
|
||||
root := bytesutil.ToBytes32(bytesutil.ToBytes(uint64(slot), 32))
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, root, slot, fieldparams.MaxBlobsPerBlock)
|
||||
pruneBefore := currentSlot - bs.pruner.windowSize
|
||||
increment := primitives.Slot(10000)
|
||||
slots := []primitives.Slot{
|
||||
pruneBefore - increment,
|
||||
pruneBefore - (2 * increment),
|
||||
pruneBefore,
|
||||
pruneBefore + increment,
|
||||
pruneBefore + (2 * increment),
|
||||
}
|
||||
namers := make([]blobNamer, len(slots))
|
||||
for i, s := range slots {
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, s, 1)
|
||||
testSidecars, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, bs.Save(testSidecars[0]))
|
||||
|
||||
slot += 10000
|
||||
namers[i] = namerForSidecar(testSidecars[0])
|
||||
}
|
||||
|
||||
require.NoError(t, bs.pruner.prune(currentSlot-bs.pruner.windowSize))
|
||||
require.NoError(t, bs.pruner.prune(currentSlot-bs.pruner.windowSize, nil))
|
||||
|
||||
remainingFolders, err := afero.ReadDir(fs, ".")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 4, len(remainingFolders))
|
||||
// first 2 subdirs should be removed
|
||||
for _, nmr := range namers[0:2] {
|
||||
entries, err := listDir(fs, nmr.dir())
|
||||
require.Equal(t, 0, len(entries))
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
}
|
||||
// the rest should still be there
|
||||
for _, nmr := range namers[2:] {
|
||||
entries, err := listDir(fs, nmr.dir())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(entries))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -234,7 +254,7 @@ func BenchmarkPruning(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := bs.pruner.prune(currentSlot)
|
||||
err := bs.pruner.prune(currentSlot, nil)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
61
beacon-chain/db/filesystem/migration.go
Normal file
61
beacon-chain/db/filesystem/migration.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
type directoryMigrator interface {
|
||||
migrate(fs afero.Fs, dirs []string) error
|
||||
}
|
||||
|
||||
type oneBytePrefixMigrator struct {
|
||||
migrated []string
|
||||
}
|
||||
|
||||
func (m *oneBytePrefixMigrator) migrate(fs afero.Fs, dirs []string) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
nMigrated := len(m.migrated)
|
||||
if nMigrated > 0 {
|
||||
log.WithField("elapsed", time.Since(start).String()).
|
||||
WithField("dirsMoved", nMigrated).
|
||||
Debug("Migrated blob subdirectories to byte-prefixed format")
|
||||
}
|
||||
}()
|
||||
groups := groupDirsByPrefix(dirs)
|
||||
return m.renameByGroup(fs, groups)
|
||||
}
|
||||
|
||||
func (m *oneBytePrefixMigrator) renameByGroup(fs afero.Fs, groups map[string][]string) error {
|
||||
for g, sd := range groups {
|
||||
// make the enclosing dir if needed
|
||||
if err := fs.MkdirAll(g, directoryPermissions); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, dir := range sd {
|
||||
dest := filepath.Join(g, dir)
|
||||
// todo: check if directory exists and move files one at a time if so?
|
||||
// that shouldn't be a problem if we migrate in cache warmup and never write to old path.
|
||||
if err := fs.Rename(dir, dest); err != nil {
|
||||
return err
|
||||
}
|
||||
log.WithField("source", dir).WithField("dest", dest).Trace("Migrated legacy blob storage path.")
|
||||
m.migrated = append(m.migrated, dir)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func groupDirsByPrefix(dirs []string) map[string][]string {
|
||||
groups := make(map[string][]string)
|
||||
for _, dir := range dirs {
|
||||
if filterLegacy(dir) {
|
||||
key := oneBytePrefix(dir)
|
||||
groups[key] = append(groups[key], dir)
|
||||
}
|
||||
}
|
||||
return groups
|
||||
}
|
||||
216
beacon-chain/db/filesystem/migration_test.go
Normal file
216
beacon-chain/db/filesystem/migration_test.go
Normal file
@@ -0,0 +1,216 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
func testSetupPaths(t *testing.T, fs afero.Fs, paths []migrateBeforeAfter) {
|
||||
for _, ba := range paths {
|
||||
p := ba.before()
|
||||
dir := filepath.Dir(p)
|
||||
require.NoError(t, fs.MkdirAll(dir, directoryPermissions))
|
||||
fh, err := fs.Create(p)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fh.Close())
|
||||
// double check that we got the full path correct
|
||||
_, err = fs.Stat(ba.before())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func testAssertNewPaths(t *testing.T, fs afero.Fs, paths []migrateBeforeAfter) {
|
||||
for _, ba := range paths {
|
||||
if ba.before() != ba.after() {
|
||||
_, err := fs.Stat(ba.before())
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
dir := filepath.Dir(ba.before())
|
||||
_, err = listDir(fs, dir)
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
}
|
||||
_, err := fs.Stat(ba.after())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
type migrateBeforeAfter [2]string
|
||||
|
||||
func (ba migrateBeforeAfter) before() string {
|
||||
return ba[0]
|
||||
}
|
||||
func (ba migrateBeforeAfter) after() string {
|
||||
return ba[1]
|
||||
}
|
||||
|
||||
func TestOneBytePrefixMigrator(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
plan []migrateBeforeAfter
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "happy path",
|
||||
plan: []migrateBeforeAfter{
|
||||
{
|
||||
"0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b/0.ssz",
|
||||
"0x01/0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b/0.ssz",
|
||||
},
|
||||
{
|
||||
"0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/0.ssz",
|
||||
"0x01/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/0.ssz",
|
||||
},
|
||||
{
|
||||
"0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/1.ssz",
|
||||
"0x01/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/1.ssz",
|
||||
},
|
||||
{
|
||||
"0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/0.ssz",
|
||||
"0x02/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/0.ssz",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "different roots same prefix",
|
||||
plan: []migrateBeforeAfter{
|
||||
{
|
||||
"0xff/0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cb/0.ssz",
|
||||
"0xff/0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cb/0.ssz",
|
||||
},
|
||||
{
|
||||
"0xff0774a80664e1667dcd5a18bced866a596b6cef5f351c0f88cd310dd00cb16d/0.ssz",
|
||||
"0xff/0xff0774a80664e1667dcd5a18bced866a596b6cef5f351c0f88cd310dd00cb16d/0.ssz",
|
||||
},
|
||||
{
|
||||
"0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b/0.ssz",
|
||||
"0x01/0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b/0.ssz",
|
||||
},
|
||||
{
|
||||
"0x01/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/0.ssz",
|
||||
"0x01/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/0.ssz",
|
||||
},
|
||||
{
|
||||
"0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/1.ssz",
|
||||
"0x01/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/1.ssz",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mix old and new",
|
||||
plan: []migrateBeforeAfter{
|
||||
{
|
||||
"0xff/0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cb/0.ssz",
|
||||
"0xff/0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cb/0.ssz",
|
||||
},
|
||||
{
|
||||
"0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b/0.ssz",
|
||||
"0x01/0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b/0.ssz",
|
||||
},
|
||||
{
|
||||
"0xa0/0xa0000137a809ca8425e03ae6c4244eedc7c0aa37f2735883366bcaf1cca1e3f3/0.ssz",
|
||||
"0xa0/0xa0000137a809ca8425e03ae6c4244eedc7c0aa37f2735883366bcaf1cca1e3f3/0.ssz",
|
||||
},
|
||||
{
|
||||
"0xa0/0xa0000137a809ca8425e03ae6c4244eedc7c0aa37f2735883366bcaf1cca1e3f3/1.ssz",
|
||||
"0xa0/0xa0000137a809ca8425e03ae6c4244eedc7c0aa37f2735883366bcaf1cca1e3f3/1.ssz",
|
||||
},
|
||||
{
|
||||
"0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/0.ssz",
|
||||
"0x01/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/0.ssz",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "overwrite existing root dir",
|
||||
plan: []migrateBeforeAfter{
|
||||
{
|
||||
"0xff/0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cb/0.ssz",
|
||||
"0xff/0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cb/0.ssz",
|
||||
},
|
||||
{
|
||||
"0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cb/1.ssz",
|
||||
"0xff/0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cb/0.ssz",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
fs, _ := NewEphemeralBlobStorageWithFs(t)
|
||||
testSetupPaths(t, fs, c.plan)
|
||||
entries, err := listDir(fs, ".")
|
||||
require.NoError(t, err)
|
||||
m := &oneBytePrefixMigrator{}
|
||||
err = m.migrate(fs, entries)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
testAssertNewPaths(t, fs, c.plan)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGroupDirsByPrefix(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
dirs []string
|
||||
groups map[string][]string
|
||||
}{
|
||||
{
|
||||
name: "different buckets",
|
||||
dirs: []string{
|
||||
"0x00ff0b18f16d3f22e6386ec3d6718346358089be531cb3715cb61b34a08aca04",
|
||||
"0x0105400af093eeca95c1bf3874e97ec433244dd45222d850fe5ee50e53385f05",
|
||||
},
|
||||
groups: map[string][]string{
|
||||
"0x00": {"0x00ff0b18f16d3f22e6386ec3d6718346358089be531cb3715cb61b34a08aca04"},
|
||||
"0x01": {"0x0105400af093eeca95c1bf3874e97ec433244dd45222d850fe5ee50e53385f05"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "same prefix, one bucket",
|
||||
dirs: []string{
|
||||
"0xfff5b975edfa1fbf807afb96e512bfa91eb41f78a9c9999d17f451d0077d3ed8",
|
||||
"0xffff0f4efdd596f39c602c7758d73b7ecf66856fd7649321f78fc8356a2e98b1",
|
||||
"0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cb",
|
||||
},
|
||||
groups: map[string][]string{
|
||||
"0xff": {
|
||||
"0xfff5b975edfa1fbf807afb96e512bfa91eb41f78a9c9999d17f451d0077d3ed8",
|
||||
"0xffff0f4efdd596f39c602c7758d73b7ecf66856fd7649321f78fc8356a2e98b1",
|
||||
"0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cb",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mix of legacy and new",
|
||||
dirs: []string{
|
||||
"0xfff5b975edfa1fbf807afb96e512bfa91eb41f78a9c9999d17f451d0077d3ed8",
|
||||
"0xff/0xffff0f4efdd596f39c602c7758d73b7ecf66856fd7649321f78fc8356a2e98b1",
|
||||
"0xff/0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cb",
|
||||
},
|
||||
groups: map[string][]string{
|
||||
"0xff": {"0xfff5b975edfa1fbf807afb96e512bfa91eb41f78a9c9999d17f451d0077d3ed8"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
groups := groupDirsByPrefix(c.dirs)
|
||||
require.Equal(t, len(c.groups), len(groups))
|
||||
for k, v := range c.groups {
|
||||
got := groups[k]
|
||||
require.Equal(t, len(v), len(got))
|
||||
// compare the lists
|
||||
require.DeepEqual(t, v, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -28,6 +28,10 @@ var (
|
||||
errNotBlobSSZ = errors.New("not a blob ssz file")
|
||||
)
|
||||
|
||||
// Full root in directory will be 66 chars, eg:
|
||||
// >>> len('0x0002fb4db510b8618b04dc82d023793739c26346a8b02eb73482e24b0fec0555') == 66
|
||||
const legacyRootLen = 66
|
||||
|
||||
type blobPruner struct {
|
||||
sync.Mutex
|
||||
prunedBefore atomic.Uint64
|
||||
@@ -74,7 +78,7 @@ func (p *blobPruner) notify(root [32]byte, latest primitives.Slot, idx uint64) e
|
||||
go func() {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
if err := p.prune(primitives.Slot(pruned)); err != nil {
|
||||
if err := p.prune(primitives.Slot(pruned), nil); err != nil {
|
||||
log.WithError(err).Errorf("Failed to prune blobs from slot %d", latest)
|
||||
}
|
||||
}()
|
||||
@@ -93,7 +97,7 @@ func windowMin(latest, offset primitives.Slot) primitives.Slot {
|
||||
func (p *blobPruner) warmCache() error {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
if err := p.prune(0); err != nil {
|
||||
if err := p.prune(0, &oneBytePrefixMigrator{}); err != nil {
|
||||
return err
|
||||
}
|
||||
if !p.warmed {
|
||||
@@ -115,7 +119,7 @@ func (p *blobPruner) waitForCache(ctx context.Context) (*blobStorageCache, error
|
||||
// Prune prunes blobs in the base directory based on the retention epoch.
|
||||
// It deletes blobs older than currentEpoch - (retentionEpochs+bufferEpochs).
|
||||
// This is so that we keep a slight buffer and blobs are deleted after n+2 epochs.
|
||||
func (p *blobPruner) prune(pruneBefore primitives.Slot) error {
|
||||
func (p *blobPruner) prune(pruneBefore primitives.Slot, m directoryMigrator) error {
|
||||
start := time.Now()
|
||||
totalPruned, totalErr := 0, 0
|
||||
// Customize logging/metrics behavior for the initial cache warmup when slot=0.
|
||||
@@ -135,12 +139,11 @@ func (p *blobPruner) prune(pruneBefore primitives.Slot) error {
|
||||
}()
|
||||
}
|
||||
|
||||
entries, err := listDir(p.fs, ".")
|
||||
entries, err := walkAndMigrateBasedir(p.fs, m)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to list root blobs directory")
|
||||
}
|
||||
dirs := filter(entries, filterRoot)
|
||||
for _, dir := range dirs {
|
||||
for _, dir := range entries {
|
||||
pruned, err := p.tryPruneDir(dir, pruneBefore)
|
||||
if err != nil {
|
||||
totalErr += 1
|
||||
@@ -274,6 +277,40 @@ func slotFromBlob(at io.ReaderAt) (primitives.Slot, error) {
|
||||
return primitives.Slot(rawSlot), nil
|
||||
}
|
||||
|
||||
// walkAndMigrateBasedir manages executing any needed directory migrations while also returning a list of every
|
||||
// individual root directory containing blob files.
|
||||
func walkAndMigrateBasedir(fs afero.Fs, m directoryMigrator) ([]string, error) {
|
||||
listing := make([]string, 0)
|
||||
topDirs, err := listDir(fs, ".")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if m != nil {
|
||||
if err := m.migrate(fs, topDirs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// list all the subdirs to get the full listing.
|
||||
for i := range topDirs {
|
||||
dir := topDirs[i]
|
||||
// We're not worried about any dangling legacy format paths because migrator should have already done its job.
|
||||
if !filterRootGroupDir(dir) {
|
||||
continue
|
||||
}
|
||||
subdirs, err := listDir(fs, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, sd := range subdirs {
|
||||
if !filterRoot(sd) {
|
||||
continue
|
||||
}
|
||||
listing = append(listing, filepath.Join(dir, sd))
|
||||
}
|
||||
}
|
||||
return listing, nil
|
||||
}
|
||||
|
||||
func listDir(fs afero.Fs, dir string) ([]string, error) {
|
||||
top, err := fs.Open(dir)
|
||||
if err != nil {
|
||||
@@ -306,6 +343,14 @@ func filterRoot(s string) bool {
|
||||
return strings.HasPrefix(s, "0x")
|
||||
}
|
||||
|
||||
func filterLegacy(s string) bool {
|
||||
return filterRoot(s) && len(s) == legacyRootLen
|
||||
}
|
||||
|
||||
func filterRootGroupDir(s string) bool {
|
||||
return filterRoot(s) && len(filepath.Base(s)) == rootPrefixLen
|
||||
}
|
||||
|
||||
var dotSszExt = "." + sszExt
|
||||
var dotPartExt = "." + partExt
|
||||
|
||||
|
||||
@@ -43,10 +43,11 @@ func TestTryPruneDir_CachedExpired(t *testing.T) {
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 1)
|
||||
sc, err := verification.BlobSidecarNoop(sidecars[0])
|
||||
require.NoError(t, err)
|
||||
rootStr := rootString(sc.BlockRoot())
|
||||
require.NoError(t, fs.Mkdir(rootStr, directoryPermissions)) // make empty directory
|
||||
namer := namerForSidecar(sc)
|
||||
dir := namer.dir()
|
||||
require.NoError(t, fs.Mkdir(dir, directoryPermissions)) // make empty directory
|
||||
require.NoError(t, pr.cache.ensure(sc.BlockRoot(), sc.Slot(), 0))
|
||||
pruned, err := pr.tryPruneDir(rootStr, slot+1)
|
||||
pruned, err := pr.tryPruneDir(dir, slot+1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, pruned)
|
||||
})
|
||||
@@ -61,21 +62,21 @@ func TestTryPruneDir_CachedExpired(t *testing.T) {
|
||||
require.NoError(t, bs.Save(scs[1]))
|
||||
|
||||
// check that the root->slot is cached
|
||||
root := scs[0].BlockRoot()
|
||||
rootStr := rootString(root)
|
||||
namer := namerForSidecar(scs[0])
|
||||
dir := namer.dir()
|
||||
cs, cok := bs.pruner.cache.slot(scs[0].BlockRoot())
|
||||
require.Equal(t, true, cok)
|
||||
require.Equal(t, slot, cs)
|
||||
|
||||
// ensure that we see the saved files in the filesystem
|
||||
files, err := listDir(fs, rootStr)
|
||||
files, err := listDir(fs, dir)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(files))
|
||||
|
||||
pruned, err := bs.pruner.tryPruneDir(rootStr, slot+1)
|
||||
pruned, err := bs.pruner.tryPruneDir(dir, slot+1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, pruned)
|
||||
files, err = listDir(fs, rootStr)
|
||||
files, err = listDir(fs, dir)
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
require.Equal(t, 0, len(files))
|
||||
})
|
||||
@@ -94,7 +95,8 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
|
||||
|
||||
// check that the root->slot is cached
|
||||
root := scs[0].BlockRoot()
|
||||
rootStr := rootString(root)
|
||||
namer := namerForSidecar(scs[0])
|
||||
dir := namer.dir()
|
||||
cs, ok := bs.pruner.cache.slot(root)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, slot, cs)
|
||||
@@ -104,14 +106,14 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
// ensure that we see the saved files in the filesystem
|
||||
files, err := listDir(fs, rootStr)
|
||||
files, err := listDir(fs, dir)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(files))
|
||||
|
||||
pruned, err := bs.pruner.tryPruneDir(rootStr, slot+1)
|
||||
pruned, err := bs.pruner.tryPruneDir(dir, slot+1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, pruned)
|
||||
files, err = listDir(fs, rootStr)
|
||||
files, err = listDir(fs, dir)
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
require.Equal(t, 0, len(files))
|
||||
})
|
||||
@@ -128,24 +130,25 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
|
||||
|
||||
// Evict slot mapping from the cache so that we trigger the file read path.
|
||||
root := scs[0].BlockRoot()
|
||||
rootStr := rootString(root)
|
||||
namer := namerForSidecar(scs[0])
|
||||
dir := namer.dir()
|
||||
bs.pruner.cache.evict(root)
|
||||
_, ok := bs.pruner.cache.slot(root)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
// Ensure that we see the saved files in the filesystem.
|
||||
files, err := listDir(fs, rootStr)
|
||||
files, err := listDir(fs, dir)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(files))
|
||||
|
||||
// This should use the slotFromFile code (simulating restart).
|
||||
// Setting pruneBefore == slot, so that the slot will be outside the window (at the boundary).
|
||||
pruned, err := bs.pruner.tryPruneDir(rootStr, slot)
|
||||
pruned, err := bs.pruner.tryPruneDir(dir, slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, pruned)
|
||||
|
||||
// Ensure files are still present.
|
||||
files, err = listDir(fs, rootStr)
|
||||
files, err = listDir(fs, dir)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(files))
|
||||
})
|
||||
|
||||
@@ -115,6 +115,7 @@ go_test(
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/testing:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
|
||||
@@ -813,6 +813,16 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.ReadOnlySignedBea
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(denebBlindKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal blinded Deneb block")
|
||||
}
|
||||
case hasElectraKey(enc):
|
||||
rawBlock = ðpb.SignedBeaconBlockElectra{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(electraKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal Electra block")
|
||||
}
|
||||
case hasElectraBlindKey(enc):
|
||||
rawBlock = ðpb.SignedBlindedBeaconBlockElectra{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(electraBlindKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal blinded Electra block")
|
||||
}
|
||||
default:
|
||||
// Marshal block bytes to phase 0 beacon block.
|
||||
rawBlock = ðpb.SignedBeaconBlock{}
|
||||
@@ -842,6 +852,11 @@ func encodeBlock(blk interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
|
||||
|
||||
func keyForBlock(blk interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
|
||||
switch blk.Version() {
|
||||
case version.Electra:
|
||||
if blk.IsBlinded() {
|
||||
return electraBlindKey, nil
|
||||
}
|
||||
return electraKey, nil
|
||||
case version.Deneb:
|
||||
if blk.IsBlinded() {
|
||||
return denebBlindKey, nil
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
@@ -124,6 +125,47 @@ var blockTests = []struct {
|
||||
return blocks.NewSignedBeaconBlock(b)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "electra",
|
||||
newBlock: func(slot primitives.Slot, root []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
b := util.NewBeaconBlockElectra()
|
||||
b.Block.Slot = slot
|
||||
if root != nil {
|
||||
b.Block.ParentRoot = root
|
||||
b.Block.Body.Consolidations = []*ethpb.SignedConsolidation{
|
||||
{
|
||||
Message: ðpb.Consolidation{
|
||||
SourceIndex: 1,
|
||||
TargetIndex: 2,
|
||||
Epoch: 3,
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
}
|
||||
return blocks.NewSignedBeaconBlock(b)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "electra blind",
|
||||
newBlock: func(slot primitives.Slot, root []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
b := util.NewBlindedBeaconBlockElectra()
|
||||
b.Message.Slot = slot
|
||||
if root != nil {
|
||||
b.Message.ParentRoot = root
|
||||
b.Message.Body.Consolidations = []*ethpb.SignedConsolidation{
|
||||
{
|
||||
Message: ðpb.Consolidation{
|
||||
SourceIndex: 1,
|
||||
TargetIndex: 2,
|
||||
Epoch: 3,
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
}
|
||||
return blocks.NewSignedBeaconBlock(b)
|
||||
}},
|
||||
}
|
||||
|
||||
func TestStore_SaveBlock_NoDuplicates(t *testing.T) {
|
||||
@@ -180,7 +222,7 @@ func TestStore_BlocksCRUD(t *testing.T) {
|
||||
retrievedBlock, err = db.Block(ctx, blockRoot)
|
||||
require.NoError(t, err)
|
||||
wanted := retrievedBlock
|
||||
if _, err := retrievedBlock.PbBellatrixBlock(); err == nil {
|
||||
if retrievedBlock.Version() >= version.Bellatrix {
|
||||
wanted, err = retrievedBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -368,15 +410,7 @@ func TestStore_BlocksCRUD_NoCache(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
wanted := blk
|
||||
if _, err := blk.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = blk.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := blk.PbCapellaBlock(); err == nil {
|
||||
wanted, err = blk.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := blk.PbDenebBlock(); err == nil {
|
||||
if blk.Version() >= version.Bellatrix {
|
||||
wanted, err = blk.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -595,15 +629,7 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
|
||||
b, err := db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
wanted := block1
|
||||
if _, err := block1.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = wanted.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := block1.PbCapellaBlock(); err == nil {
|
||||
wanted, err = wanted.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := block1.PbDenebBlock(); err == nil {
|
||||
if block1.Version() >= version.Bellatrix {
|
||||
wanted, err = wanted.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -621,15 +647,7 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
wanted2 := block2
|
||||
if _, err := block2.PbBellatrixBlock(); err == nil {
|
||||
wanted2, err = block2.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := block2.PbCapellaBlock(); err == nil {
|
||||
wanted2, err = block2.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := block2.PbDenebBlock(); err == nil {
|
||||
if block2.Version() >= version.Bellatrix {
|
||||
wanted2, err = block2.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -647,15 +665,7 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
wanted = block3
|
||||
if _, err := block3.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = wanted.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := block3.PbCapellaBlock(); err == nil {
|
||||
wanted, err = wanted.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := block3.PbDenebBlock(); err == nil {
|
||||
if block3.Version() >= version.Bellatrix {
|
||||
wanted, err = wanted.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -691,15 +701,7 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
|
||||
b, err := db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
wanted := block1
|
||||
if _, err := block1.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = block1.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := block1.PbCapellaBlock(); err == nil {
|
||||
wanted, err = block1.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := block1.PbDenebBlock(); err == nil {
|
||||
if block1.Version() >= version.Bellatrix {
|
||||
wanted, err = block1.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -716,15 +718,7 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
wanted = genesisBlock
|
||||
if _, err := genesisBlock.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = genesisBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := genesisBlock.PbCapellaBlock(); err == nil {
|
||||
wanted, err = genesisBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := genesisBlock.PbDenebBlock(); err == nil {
|
||||
if genesisBlock.Version() >= version.Bellatrix {
|
||||
wanted, err = genesisBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -741,15 +735,7 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
wanted = genesisBlock
|
||||
if _, err := genesisBlock.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = genesisBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := genesisBlock.PbCapellaBlock(); err == nil {
|
||||
wanted, err = genesisBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := genesisBlock.PbDenebBlock(); err == nil {
|
||||
if genesisBlock.Version() >= version.Bellatrix {
|
||||
wanted, err = genesisBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -845,15 +831,7 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
wanted := b1
|
||||
if _, err := b1.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = b1.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := b1.PbCapellaBlock(); err == nil {
|
||||
wanted, err = b1.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := b1.PbDenebBlock(); err == nil {
|
||||
if b1.Version() >= version.Bellatrix {
|
||||
wanted, err = b1.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -869,15 +847,7 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
|
||||
t.Fatalf("Expected 2 blocks, received %d blocks", len(retrievedBlocks))
|
||||
}
|
||||
wanted = b2
|
||||
if _, err := b2.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = b2.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := b2.PbCapellaBlock(); err == nil {
|
||||
wanted, err = b2.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := b2.PbDenebBlock(); err == nil {
|
||||
if b2.Version() >= version.Bellatrix {
|
||||
wanted, err = b2.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -887,15 +857,7 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(wantedPb, retrieved0Pb), "Wanted: %v, received: %v", retrievedBlocks[0], wanted)
|
||||
wanted = b3
|
||||
if _, err := b3.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = b3.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := b3.PbCapellaBlock(); err == nil {
|
||||
wanted, err = b3.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := b3.PbDenebBlock(); err == nil {
|
||||
if b3.Version() >= version.Bellatrix {
|
||||
wanted, err = b3.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -51,3 +51,17 @@ func hasDenebBlindKey(enc []byte) bool {
|
||||
}
|
||||
return bytes.Equal(enc[:len(denebBlindKey)], denebBlindKey)
|
||||
}
|
||||
|
||||
func hasElectraKey(enc []byte) bool {
|
||||
if len(electraKey) >= len(enc) {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(enc[:len(electraKey)], electraKey)
|
||||
}
|
||||
|
||||
func hasElectraBlindKey(enc []byte) bool {
|
||||
if len(electraBlindKey) >= len(enc) {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(enc[:len(electraBlindKey)], electraBlindKey)
|
||||
}
|
||||
|
||||
@@ -48,6 +48,8 @@ var (
|
||||
saveBlindedBeaconBlocksKey = []byte("save-blinded-beacon-blocks")
|
||||
denebKey = []byte("deneb")
|
||||
denebBlindKey = []byte("blind-deneb")
|
||||
electraKey = []byte("electra")
|
||||
electraBlindKey = []byte("blind-electra")
|
||||
|
||||
// block root included in the beacon state used by weak subjectivity initial sync
|
||||
originCheckpointBlockRootKey = []byte("origin-checkpoint-block-root")
|
||||
|
||||
@@ -229,160 +229,142 @@ func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, bl
|
||||
// look at issue https://github.com/prysmaticlabs/prysm/issues/9262.
|
||||
switch rawType := states[i].ToProtoUnsafe().(type) {
|
||||
case *ethpb.BeaconState:
|
||||
pbState, err := getPhase0PbState(rawType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
encodedState, err := encode(ctx, pbState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
if err := s.processPhase0(ctx, rawType, rt[:], bucket, valIdxBkt, validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateAltair:
|
||||
pbState, err := getAltairPbState(rawType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encodedState := snappy.Encode(nil, append(altairKey, rawObj...))
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
if err := s.processAltair(ctx, rawType, rt[:], bucket, valIdxBkt, validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateBellatrix:
|
||||
pbState, err := getBellatrixPbState(rawType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encodedState := snappy.Encode(nil, append(bellatrixKey, rawObj...))
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
if err := s.processBellatrix(ctx, rawType, rt[:], bucket, valIdxBkt, validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateCapella:
|
||||
pbState, err := getCapellaPbState(rawType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encodedState := snappy.Encode(nil, append(capellaKey, rawObj...))
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
if err := s.processCapella(ctx, rawType, rt[:], bucket, valIdxBkt, validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateDeneb:
|
||||
pbState, err := getDenebPbState(rawType)
|
||||
if err != nil {
|
||||
if err := s.processDeneb(ctx, rawType, rt[:], bucket, valIdxBkt, validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encodedState := snappy.Encode(nil, append(denebKey, rawObj...))
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
case *ethpb.BeaconStateElectra:
|
||||
if err := s.processElectra(ctx, rawType, rt[:], bucket, valIdxBkt, validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return errors.New("invalid state type")
|
||||
}
|
||||
}
|
||||
// store the validator entries separately to save space.
|
||||
|
||||
return s.storeValidatorEntriesSeparately(ctx, tx, validatorsEntries)
|
||||
}
|
||||
|
||||
func getPhase0PbState(rawState interface{}) (*ethpb.BeaconState, error) {
|
||||
pbState, err := statenative.ProtobufBeaconStatePhase0(rawState)
|
||||
func (s *Store) processPhase0(ctx context.Context, pbState *ethpb.BeaconState, rootHash []byte, bucket, valIdxBkt *bolt.Bucket, validatorKey []byte) error {
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
encodedState, err := encode(ctx, pbState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
pbState.Validators = valEntries
|
||||
if err := bucket.Put(rootHash, encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
return pbState, nil
|
||||
if err := valIdxBkt.Put(rootHash, validatorKey); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getAltairPbState(rawState interface{}) (*ethpb.BeaconStateAltair, error) {
|
||||
pbState, err := statenative.ProtobufBeaconStateAltair(rawState)
|
||||
func (s *Store) processAltair(ctx context.Context, pbState *ethpb.BeaconStateAltair, rootHash []byte, bucket, valIdxBkt *bolt.Bucket, validatorKey []byte) error {
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
encodedState := snappy.Encode(nil, append(altairKey, rawObj...))
|
||||
if err := bucket.Put(rootHash, encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
return pbState, nil
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rootHash, validatorKey); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getBellatrixPbState(rawState interface{}) (*ethpb.BeaconStateBellatrix, error) {
|
||||
pbState, err := statenative.ProtobufBeaconStateBellatrix(rawState)
|
||||
func (s *Store) processBellatrix(ctx context.Context, pbState *ethpb.BeaconStateBellatrix, rootHash []byte, bucket, valIdxBkt *bolt.Bucket, validatorKey []byte) error {
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
encodedState := snappy.Encode(nil, append(bellatrixKey, rawObj...))
|
||||
if err := bucket.Put(rootHash, encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
return pbState, nil
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rootHash, validatorKey); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getCapellaPbState(rawState interface{}) (*ethpb.BeaconStateCapella, error) {
|
||||
pbState, err := statenative.ProtobufBeaconStateCapella(rawState)
|
||||
func (s *Store) processCapella(ctx context.Context, pbState *ethpb.BeaconStateCapella, rootHash []byte, bucket, valIdxBkt *bolt.Bucket, validatorKey []byte) error {
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
encodedState := snappy.Encode(nil, append(capellaKey, rawObj...))
|
||||
if err := bucket.Put(rootHash, encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
return pbState, nil
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rootHash, validatorKey); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDenebPbState(rawState interface{}) (*ethpb.BeaconStateDeneb, error) {
|
||||
pbState, err := statenative.ProtobufBeaconStateDeneb(rawState)
|
||||
func (s *Store) processDeneb(ctx context.Context, pbState *ethpb.BeaconStateDeneb, rootHash []byte, bucket, valIdxBkt *bolt.Bucket, validatorKey []byte) error {
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
encodedState := snappy.Encode(nil, append(denebKey, rawObj...))
|
||||
if err := bucket.Put(rootHash, encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
return pbState, nil
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rootHash, validatorKey); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) processElectra(ctx context.Context, pbState *ethpb.BeaconStateElectra, rootHash []byte, bucket, valIdxBkt *bolt.Bucket, validatorKey []byte) error {
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encodedState := snappy.Encode(nil, append(electraKey, rawObj...))
|
||||
if err := bucket.Put(rootHash, encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rootHash, validatorKey); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) storeValidatorEntriesSeparately(ctx context.Context, tx *bolt.Tx, validatorsEntries map[string]*ethpb.Validator) error {
|
||||
@@ -534,6 +516,19 @@ func (s *Store) unmarshalState(_ context.Context, enc []byte, validatorEntries [
|
||||
}
|
||||
|
||||
switch {
|
||||
case hasElectraKey(enc):
|
||||
protoState := ðpb.BeaconStateElectra{}
|
||||
if err := protoState.UnmarshalSSZ(enc[len(electraKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal encoding for Electra")
|
||||
}
|
||||
ok, err := s.isStateValidatorMigrationOver()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
protoState.Validators = validatorEntries
|
||||
}
|
||||
return statenative.InitializeFromProtoUnsafeElectra(protoState)
|
||||
case hasDenebKey(enc):
|
||||
protoState := ðpb.BeaconStateDeneb{}
|
||||
if err := protoState.UnmarshalSSZ(enc[len(denebKey):]); err != nil {
|
||||
@@ -667,6 +662,19 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, append(denebKey, rawObj...)), nil
|
||||
case *ethpb.BeaconStateElectra:
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateElectra)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
}
|
||||
if rState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
rawObj, err := rState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, append(electraKey, rawObj...)), nil
|
||||
default:
|
||||
return nil, errors.New("invalid inner state")
|
||||
}
|
||||
|
||||
@@ -132,6 +132,33 @@ func TestState_CanSaveRetrieve(t *testing.T) {
|
||||
},
|
||||
rootSeed: 'D',
|
||||
},
|
||||
{
|
||||
name: "electra",
|
||||
s: func() state.BeaconState {
|
||||
st, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(100))
|
||||
p, err := blocks.WrappedExecutionPayloadHeaderElectra(&enginev1.ExecutionPayloadHeaderElectra{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
ExtraData: []byte("foo"),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
TransactionsRoot: make([]byte, 32),
|
||||
WithdrawalsRoot: make([]byte, 32),
|
||||
DepositReceiptsRoot: make([]byte, 32),
|
||||
WithdrawalRequestsRoot: make([]byte, 32),
|
||||
}, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(p))
|
||||
return st
|
||||
},
|
||||
rootSeed: 'E',
|
||||
},
|
||||
}
|
||||
|
||||
db := setupDB(t)
|
||||
@@ -1138,6 +1165,100 @@ func TestStateDeneb_CanSaveRetrieveValidatorEntries(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestElectraState_CanSaveRetrieve(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
r := [32]byte{'A'}
|
||||
|
||||
require.Equal(t, false, db.HasState(context.Background(), r))
|
||||
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
require.NoError(t, st.SetSlot(100))
|
||||
|
||||
require.NoError(t, db.SaveState(context.Background(), st, r))
|
||||
require.Equal(t, true, db.HasState(context.Background(), r))
|
||||
|
||||
savedS, err := db.State(context.Background(), r)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.DeepSSZEqual(t, st.ToProtoUnsafe(), savedS.ToProtoUnsafe())
|
||||
|
||||
savedS, err = db.State(context.Background(), [32]byte{'B'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil")
|
||||
}
|
||||
|
||||
func TestElectraState_CanDelete(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
r := [32]byte{'A'}
|
||||
|
||||
require.Equal(t, false, db.HasState(context.Background(), r))
|
||||
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
require.NoError(t, st.SetSlot(100))
|
||||
|
||||
require.NoError(t, db.SaveState(context.Background(), st, r))
|
||||
require.Equal(t, true, db.HasState(context.Background(), r))
|
||||
|
||||
require.NoError(t, db.DeleteState(context.Background(), r))
|
||||
savedS, err := db.State(context.Background(), r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil")
|
||||
}
|
||||
|
||||
func TestStateElectra_CanSaveRetrieveValidatorEntries(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
// enable historical state representation flag to test this
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableHistoricalSpaceRepresentation: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
r := [32]byte{'A'}
|
||||
|
||||
require.Equal(t, false, db.HasState(context.Background(), r))
|
||||
|
||||
stateValidators := validators(10)
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 20)
|
||||
require.NoError(t, st.SetSlot(100))
|
||||
require.NoError(t, st.SetValidators(stateValidators))
|
||||
|
||||
ctx := context.Background()
|
||||
require.NoError(t, db.SaveState(ctx, st, r))
|
||||
assert.Equal(t, true, db.HasState(context.Background(), r))
|
||||
|
||||
savedS, err := db.State(context.Background(), r)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepSSZEqual(t, st.Validators(), savedS.Validators(), "saved state with validators and retrieved state are not matching")
|
||||
|
||||
// check if the index of the second state is still present.
|
||||
err = db.db.Update(func(tx *bolt.Tx) error {
|
||||
idxBkt := tx.Bucket(blockRootValidatorHashesBucket)
|
||||
data := idxBkt.Get(r[:])
|
||||
require.NotEqual(t, 0, len(data))
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// check if all the validator entries are still intact in the validator entry bucket.
|
||||
err = db.db.Update(func(tx *bolt.Tx) error {
|
||||
valBkt := tx.Bucket(stateValidatorsBucket)
|
||||
// if any of the original validator entry is not present, then fail the test.
|
||||
for _, val := range stateValidators {
|
||||
hash, hashErr := val.HashTreeRoot()
|
||||
assert.NoError(t, hashErr)
|
||||
data := valBkt.Get(hash[:])
|
||||
require.NotNil(t, data)
|
||||
require.NotEqual(t, 0, len(data))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func BenchmarkState_CheckStateSaveTime_1(b *testing.B) { checkStateSaveTime(b, 1) }
|
||||
func BenchmarkState_CheckStateSaveTime_10(b *testing.B) { checkStateSaveTime(b, 10) }
|
||||
|
||||
|
||||
@@ -156,10 +156,10 @@ func (s *Store) CheckAttesterDoubleVotes(
|
||||
signingRootsBkt := tx.Bucket(attestationDataRootsBucket)
|
||||
attRecordsBkt := tx.Bucket(attestationRecordsBucket)
|
||||
|
||||
encEpoch := encodeTargetEpoch(attToProcess.IndexedAttestation.Data.Target.Epoch)
|
||||
encEpoch := encodeTargetEpoch(attToProcess.IndexedAttestation.GetData().Target.Epoch)
|
||||
localDoubleVotes := make([]*slashertypes.AttesterDoubleVote, 0)
|
||||
|
||||
for _, valIdx := range attToProcess.IndexedAttestation.AttestingIndices {
|
||||
for _, valIdx := range attToProcess.IndexedAttestation.GetAttestingIndices() {
|
||||
// Check if there is signing root in the database for this combination
|
||||
// of validator index and target epoch.
|
||||
encIdx := encodeValidatorIndex(primitives.ValidatorIndex(valIdx))
|
||||
@@ -194,7 +194,7 @@ func (s *Store) CheckAttesterDoubleVotes(
|
||||
// Build the proof of double vote.
|
||||
slashAtt := &slashertypes.AttesterDoubleVote{
|
||||
ValidatorIndex: primitives.ValidatorIndex(valIdx),
|
||||
Target: attToProcess.IndexedAttestation.Data.Target.Epoch,
|
||||
Target: attToProcess.IndexedAttestation.GetData().Target.Epoch,
|
||||
Wrapper_1: existingAttRecord,
|
||||
Wrapper_2: attToProcess,
|
||||
}
|
||||
@@ -280,7 +280,7 @@ func (s *Store) SaveAttestationRecordsForValidators(
|
||||
encodedRecords := make([][]byte, attWrappersCount)
|
||||
|
||||
for i, attestation := range attWrappers {
|
||||
encEpoch := encodeTargetEpoch(attestation.IndexedAttestation.Data.Target.Epoch)
|
||||
encEpoch := encodeTargetEpoch(attestation.IndexedAttestation.GetData().Target.Epoch)
|
||||
|
||||
value, err := encodeAttestationRecord(attestation)
|
||||
if err != nil {
|
||||
@@ -325,7 +325,7 @@ func (s *Store) SaveAttestationRecordsForValidators(
|
||||
return err
|
||||
}
|
||||
|
||||
for _, validatorIndex := range attWrapper.IndexedAttestation.AttestingIndices {
|
||||
for _, validatorIndex := range attWrapper.IndexedAttestation.GetAttestingIndices() {
|
||||
encodedIndex := encodeValidatorIndex(primitives.ValidatorIndex(validatorIndex))
|
||||
|
||||
key := append(encodedTargetEpoch, encodedIndex...)
|
||||
@@ -638,8 +638,8 @@ func (s *Store) HighestAttestations(
|
||||
}
|
||||
highestAtt := ðpb.HighestAttestation{
|
||||
ValidatorIndex: uint64(indices[i]),
|
||||
HighestSourceEpoch: attWrapper.IndexedAttestation.Data.Source.Epoch,
|
||||
HighestTargetEpoch: attWrapper.IndexedAttestation.Data.Target.Epoch,
|
||||
HighestSourceEpoch: attWrapper.IndexedAttestation.GetData().Source.Epoch,
|
||||
HighestTargetEpoch: attWrapper.IndexedAttestation.GetData().Target.Epoch,
|
||||
}
|
||||
history = append(history, highestAtt)
|
||||
break
|
||||
|
||||
@@ -62,7 +62,7 @@ func TestStore_AttestationRecordForValidator_SaveRetrieve(t *testing.T) {
|
||||
actual, err := beaconDB.AttestationRecordForValidator(ctx, validatorIndex, primitives.Epoch(i+1))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, expected.IndexedAttestation.Data.Source.Epoch, actual.IndexedAttestation.Data.Source.Epoch)
|
||||
require.DeepEqual(t, expected.IndexedAttestation.GetData().Source.Epoch, actual.IndexedAttestation.GetData().Source.Epoch)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -544,7 +544,7 @@ func BenchmarkHighestAttestations(b *testing.B) {
|
||||
for i := 0; i < count; i++ {
|
||||
indicesForAtt := make([]primitives.ValidatorIndex, valsPerAtt)
|
||||
for r := 0; r < valsPerAtt; r++ {
|
||||
indicesForAtt[r] = primitives.ValidatorIndex(atts[i].IndexedAttestation.AttestingIndices[r])
|
||||
indicesForAtt[r] = primitives.ValidatorIndex(atts[i].IndexedAttestation.GetAttestingIndices()[r])
|
||||
}
|
||||
allIndices = append(allIndices, indicesForAtt...)
|
||||
}
|
||||
|
||||
@@ -49,6 +49,8 @@ const (
|
||||
// NewPayloadMethodV2 v2 request string for JSON-RPC.
|
||||
NewPayloadMethodV2 = "engine_newPayloadV2"
|
||||
NewPayloadMethodV3 = "engine_newPayloadV3"
|
||||
// NewPayloadMethodV4 is the engine_newPayloadVX method added at Electra.
|
||||
NewPayloadMethodV4 = "engine_newPayloadV4"
|
||||
// ForkchoiceUpdatedMethod v1 request string for JSON-RPC.
|
||||
ForkchoiceUpdatedMethod = "engine_forkchoiceUpdatedV1"
|
||||
// ForkchoiceUpdatedMethodV2 v2 request string for JSON-RPC.
|
||||
@@ -59,7 +61,10 @@ const (
|
||||
GetPayloadMethod = "engine_getPayloadV1"
|
||||
// GetPayloadMethodV2 v2 request string for JSON-RPC.
|
||||
GetPayloadMethodV2 = "engine_getPayloadV2"
|
||||
// GetPayloadMethodV3 is the get payload method added for deneb
|
||||
GetPayloadMethodV3 = "engine_getPayloadV3"
|
||||
// GetPayloadMethodV4 is the get payload method added for electra
|
||||
GetPayloadMethodV4 = "engine_getPayloadV4"
|
||||
// BlockByHashMethod request string for JSON-RPC.
|
||||
BlockByHashMethod = "eth_getBlockByHash"
|
||||
// BlockByNumberMethod request string for JSON-RPC.
|
||||
@@ -82,7 +87,7 @@ type ForkchoiceUpdatedResponse struct {
|
||||
ValidationError string `json:"validationError"`
|
||||
}
|
||||
|
||||
// ExecutionPayloadReconstructor defines a service that can reconstruct a full beacon
|
||||
// PayloadReconstructor defines a service that can reconstruct a full beacon
|
||||
// block with an execution payload from a signed beacon block and a connection
|
||||
// to an execution client's engine API.
|
||||
type PayloadReconstructor interface {
|
||||
@@ -150,6 +155,15 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
case *pb.ExecutionPayloadElectra:
|
||||
payloadPb, ok := payload.Proto().(*pb.ExecutionPayloadElectra)
|
||||
if !ok {
|
||||
return nil, errors.New("execution data must be a Deneb execution payload")
|
||||
}
|
||||
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethodV4, payloadPb, versionedHashes, parentBlockRoot)
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("unknown execution data type")
|
||||
}
|
||||
@@ -208,7 +222,7 @@ func (s *Service) ForkchoiceUpdated(
|
||||
if err != nil {
|
||||
return nil, nil, handleRPCError(err)
|
||||
}
|
||||
case version.Deneb:
|
||||
case version.Deneb, version.Electra:
|
||||
a, err := attrs.PbV3()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -254,6 +268,19 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primit
|
||||
ctx, cancel := context.WithDeadline(ctx, d)
|
||||
defer cancel()
|
||||
|
||||
if slots.ToEpoch(slot) >= params.BeaconConfig().ElectraForkEpoch {
|
||||
result := &pb.ExecutionPayloadElectraWithValueAndBlobsBundle{}
|
||||
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethodV4, pb.PayloadIDBytes(payloadId))
|
||||
if err != nil {
|
||||
return nil, nil, false, handleRPCError(err)
|
||||
}
|
||||
ed, err := blocks.WrappedExecutionPayloadElectra(result.Payload, blocks.PayloadValueToWei(result.Value))
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
return ed, result.BlobsBundle, result.ShouldOverrideBuilder, nil
|
||||
}
|
||||
|
||||
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch {
|
||||
result := &pb.ExecutionPayloadDenebWithValueAndBlobsBundle{}
|
||||
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethodV3, pb.PayloadIDBytes(payloadId))
|
||||
@@ -682,100 +709,6 @@ func (s *Service) retrievePayloadsFromExecutionHashes(
|
||||
return fullBlocks, nil
|
||||
}
|
||||
|
||||
func fullPayloadFromExecutionBlock(
|
||||
blockVersion int, header interfaces.ExecutionData, block *pb.ExecutionBlock,
|
||||
) (interfaces.ExecutionData, error) {
|
||||
if header.IsNil() || block == nil {
|
||||
return nil, errors.New("execution block and header cannot be nil")
|
||||
}
|
||||
blockHash := block.Hash
|
||||
if !bytes.Equal(header.BlockHash(), blockHash[:]) {
|
||||
return nil, fmt.Errorf(
|
||||
"block hash field in execution header %#x does not match execution block hash %#x",
|
||||
header.BlockHash(),
|
||||
blockHash,
|
||||
)
|
||||
}
|
||||
blockTransactions := block.Transactions
|
||||
txs := make([][]byte, len(blockTransactions))
|
||||
for i, tx := range blockTransactions {
|
||||
txBin, err := tx.MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txs[i] = txBin
|
||||
}
|
||||
|
||||
switch blockVersion {
|
||||
case version.Bellatrix:
|
||||
return blocks.WrappedExecutionPayload(&pb.ExecutionPayload{
|
||||
ParentHash: header.ParentHash(),
|
||||
FeeRecipient: header.FeeRecipient(),
|
||||
StateRoot: header.StateRoot(),
|
||||
ReceiptsRoot: header.ReceiptsRoot(),
|
||||
LogsBloom: header.LogsBloom(),
|
||||
PrevRandao: header.PrevRandao(),
|
||||
BlockNumber: header.BlockNumber(),
|
||||
GasLimit: header.GasLimit(),
|
||||
GasUsed: header.GasUsed(),
|
||||
Timestamp: header.Timestamp(),
|
||||
ExtraData: header.ExtraData(),
|
||||
BaseFeePerGas: header.BaseFeePerGas(),
|
||||
BlockHash: blockHash[:],
|
||||
Transactions: txs,
|
||||
})
|
||||
case version.Capella:
|
||||
return blocks.WrappedExecutionPayloadCapella(&pb.ExecutionPayloadCapella{
|
||||
ParentHash: header.ParentHash(),
|
||||
FeeRecipient: header.FeeRecipient(),
|
||||
StateRoot: header.StateRoot(),
|
||||
ReceiptsRoot: header.ReceiptsRoot(),
|
||||
LogsBloom: header.LogsBloom(),
|
||||
PrevRandao: header.PrevRandao(),
|
||||
BlockNumber: header.BlockNumber(),
|
||||
GasLimit: header.GasLimit(),
|
||||
GasUsed: header.GasUsed(),
|
||||
Timestamp: header.Timestamp(),
|
||||
ExtraData: header.ExtraData(),
|
||||
BaseFeePerGas: header.BaseFeePerGas(),
|
||||
BlockHash: blockHash[:],
|
||||
Transactions: txs,
|
||||
Withdrawals: block.Withdrawals,
|
||||
}, big.NewInt(0)) // We can't get the block value and don't care about the block value for this instance
|
||||
case version.Deneb:
|
||||
ebg, err := header.ExcessBlobGas()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to extract ExcessBlobGas attribute from execution payload header")
|
||||
}
|
||||
bgu, err := header.BlobGasUsed()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to extract BlobGasUsed attribute from execution payload header")
|
||||
}
|
||||
return blocks.WrappedExecutionPayloadDeneb(
|
||||
&pb.ExecutionPayloadDeneb{
|
||||
ParentHash: header.ParentHash(),
|
||||
FeeRecipient: header.FeeRecipient(),
|
||||
StateRoot: header.StateRoot(),
|
||||
ReceiptsRoot: header.ReceiptsRoot(),
|
||||
LogsBloom: header.LogsBloom(),
|
||||
PrevRandao: header.PrevRandao(),
|
||||
BlockNumber: header.BlockNumber(),
|
||||
GasLimit: header.GasLimit(),
|
||||
GasUsed: header.GasUsed(),
|
||||
Timestamp: header.Timestamp(),
|
||||
ExtraData: header.ExtraData(),
|
||||
BaseFeePerGas: header.BaseFeePerGas(),
|
||||
BlockHash: blockHash[:],
|
||||
Transactions: txs,
|
||||
Withdrawals: block.Withdrawals,
|
||||
BlobGasUsed: bgu,
|
||||
ExcessBlobGas: ebg,
|
||||
}, big.NewInt(0)) // We can't get the block value and don't care about the block value for this instance
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown execution block version %d", block.Version)
|
||||
}
|
||||
}
|
||||
|
||||
func fullPayloadFromPayloadBody(
|
||||
header interfaces.ExecutionData, body *pb.ExecutionPayloadBodyV1, bVersion int,
|
||||
) (interfaces.ExecutionData, error) {
|
||||
@@ -848,6 +781,35 @@ func fullPayloadFromPayloadBody(
|
||||
ExcessBlobGas: ebg,
|
||||
BlobGasUsed: bgu,
|
||||
}, big.NewInt(0)) // We can't get the block value and don't care about the block value for this instance
|
||||
case version.Electra:
|
||||
ebg, err := header.ExcessBlobGas()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to extract ExcessBlobGas attribute from execution payload header")
|
||||
}
|
||||
bgu, err := header.BlobGasUsed()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to extract BlobGasUsed attribute from execution payload header")
|
||||
}
|
||||
return blocks.WrappedExecutionPayloadElectra(
|
||||
&pb.ExecutionPayloadElectra{
|
||||
ParentHash: header.ParentHash(),
|
||||
FeeRecipient: header.FeeRecipient(),
|
||||
StateRoot: header.StateRoot(),
|
||||
ReceiptsRoot: header.ReceiptsRoot(),
|
||||
LogsBloom: header.LogsBloom(),
|
||||
PrevRandao: header.PrevRandao(),
|
||||
BlockNumber: header.BlockNumber(),
|
||||
GasLimit: header.GasLimit(),
|
||||
GasUsed: header.GasUsed(),
|
||||
Timestamp: header.Timestamp(),
|
||||
ExtraData: header.ExtraData(),
|
||||
BaseFeePerGas: header.BaseFeePerGas(),
|
||||
BlockHash: header.BlockHash(),
|
||||
Transactions: body.Transactions,
|
||||
Withdrawals: body.Withdrawals,
|
||||
ExcessBlobGas: ebg,
|
||||
BlobGasUsed: bgu,
|
||||
}, big.NewInt(0)) // We can't get the block value and don't care about the block value for this instance
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown execution block version for payload %d", bVersion)
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@@ -16,7 +17,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
gethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
gethRPC "github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
mocks "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
|
||||
@@ -45,7 +45,7 @@ type RPCClientBad struct {
|
||||
}
|
||||
|
||||
func (RPCClientBad) Close() {}
|
||||
func (RPCClientBad) BatchCall([]gethRPC.BatchElem) error {
|
||||
func (RPCClientBad) BatchCall([]rpc.BatchElem) error {
|
||||
return errors.New("rpc client is not initialized")
|
||||
}
|
||||
|
||||
@@ -76,7 +76,9 @@ func TestClient_IPC(t *testing.T) {
|
||||
resp, _, override, err := srv.GetPayload(ctx, payloadId, 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, override)
|
||||
resPb, err := resp.PbBellatrix()
|
||||
pbs := resp.Proto()
|
||||
resPb, ok := pbs.(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resPb)
|
||||
})
|
||||
@@ -87,8 +89,9 @@ func TestClient_IPC(t *testing.T) {
|
||||
resp, _, override, err := srv.GetPayload(ctx, payloadId, params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, override)
|
||||
resPb, err := resp.PbCapella()
|
||||
require.NoError(t, err)
|
||||
pbs := resp.Proto()
|
||||
resPb, ok := pbs.(*pb.ExecutionPayloadCapella)
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, want, resPb)
|
||||
})
|
||||
t.Run(ForkchoiceUpdatedMethod, func(t *testing.T) {
|
||||
@@ -158,6 +161,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 2
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
t.Run(GetPayloadMethod, func(t *testing.T) {
|
||||
@@ -201,9 +205,10 @@ func TestClient_HTTP(t *testing.T) {
|
||||
resp, _, override, err := client.GetPayload(ctx, payloadId, 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, override)
|
||||
pb, err := resp.PbBellatrix()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, pb)
|
||||
pbs := resp.Proto()
|
||||
pbStruct, ok := pbs.(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, want, pbStruct)
|
||||
})
|
||||
t.Run(GetPayloadMethodV2, func(t *testing.T) {
|
||||
payloadId := [8]byte{1}
|
||||
@@ -246,14 +251,15 @@ func TestClient_HTTP(t *testing.T) {
|
||||
resp, _, override, err := client.GetPayload(ctx, payloadId, params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, override)
|
||||
pb, err := resp.PbCapella()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.ExecutionPayload.BlockHash.Bytes(), pb.BlockHash)
|
||||
require.DeepEqual(t, want.ExecutionPayload.StateRoot.Bytes(), pb.StateRoot)
|
||||
require.DeepEqual(t, want.ExecutionPayload.ParentHash.Bytes(), pb.ParentHash)
|
||||
require.DeepEqual(t, want.ExecutionPayload.FeeRecipient.Bytes(), pb.FeeRecipient)
|
||||
require.DeepEqual(t, want.ExecutionPayload.PrevRandao.Bytes(), pb.PrevRandao)
|
||||
require.DeepEqual(t, want.ExecutionPayload.ParentHash.Bytes(), pb.ParentHash)
|
||||
pbs := resp.Proto()
|
||||
ep, ok := pbs.(*pb.ExecutionPayloadCapella)
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, want.ExecutionPayload.BlockHash.Bytes(), ep.BlockHash)
|
||||
require.DeepEqual(t, want.ExecutionPayload.StateRoot.Bytes(), ep.StateRoot)
|
||||
require.DeepEqual(t, want.ExecutionPayload.ParentHash.Bytes(), ep.ParentHash)
|
||||
require.DeepEqual(t, want.ExecutionPayload.FeeRecipient.Bytes(), ep.FeeRecipient)
|
||||
require.DeepEqual(t, want.ExecutionPayload.PrevRandao.Bytes(), ep.PrevRandao)
|
||||
require.DeepEqual(t, want.ExecutionPayload.ParentHash.Bytes(), ep.ParentHash)
|
||||
|
||||
v, err := resp.ValueInGwei()
|
||||
require.NoError(t, err)
|
||||
@@ -314,6 +320,61 @@ func TestClient_HTTP(t *testing.T) {
|
||||
blobs := [][]byte{bytesutil.PadTo([]byte("a"), fieldparams.BlobLength), bytesutil.PadTo([]byte("b"), fieldparams.BlobLength)}
|
||||
require.DeepEqual(t, blobs, blobsBundle.Blobs)
|
||||
})
|
||||
t.Run(GetPayloadMethodV4, func(t *testing.T) {
|
||||
payloadId := [8]byte{1}
|
||||
want, ok := fix["ExecutionPayloadElectraWithValue"].(*pb.GetPayloadV4ResponseJson)
|
||||
require.Equal(t, true, ok)
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
enc, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
|
||||
reqArg, err := json.Marshal(pb.PayloadIDBytes(payloadId))
|
||||
require.NoError(t, err)
|
||||
|
||||
// We expect the JSON string RPC request contains the right arguments.
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, string(reqArg),
|
||||
))
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": want,
|
||||
}
|
||||
err = json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
client := &Service{}
|
||||
client.rpcClient = rpcClient
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, blobsBundle, override, err := client.GetPayload(ctx, payloadId, 2*params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, override)
|
||||
g, err := resp.ExcessBlobGas()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, uint64(3), g)
|
||||
g, err = resp.BlobGasUsed()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, uint64(2), g)
|
||||
|
||||
commitments := [][]byte{bytesutil.PadTo([]byte("commitment1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("commitment2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, commitments, blobsBundle.KzgCommitments)
|
||||
proofs := [][]byte{bytesutil.PadTo([]byte("proof1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("proof2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, proofs, blobsBundle.Proofs)
|
||||
blobs := [][]byte{bytesutil.PadTo([]byte("a"), fieldparams.BlobLength), bytesutil.PadTo([]byte("b"), fieldparams.BlobLength)}
|
||||
require.DeepEqual(t, blobs, blobsBundle.Blobs)
|
||||
})
|
||||
t.Run(ForkchoiceUpdatedMethod+" VALID status", func(t *testing.T) {
|
||||
forkChoiceState := &pb.ForkchoiceState{
|
||||
HeadBlockHash: []byte("head"),
|
||||
@@ -496,6 +557,20 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
t.Run(NewPayloadMethodV4+" VALID status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayloadElectra"].(*pb.ExecutionPayloadElectra)
|
||||
require.Equal(t, true, ok)
|
||||
want, ok := fix["ValidPayloadStatus"].(*pb.PayloadStatus)
|
||||
require.Equal(t, true, ok)
|
||||
client := newPayloadV4Setup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadElectra(execPayload, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
t.Run(NewPayloadMethod+" SYNCING status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
@@ -538,6 +613,20 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(NewPayloadMethodV4+" SYNCING status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayloadElectra"].(*pb.ExecutionPayloadElectra)
|
||||
require.Equal(t, true, ok)
|
||||
want, ok := fix["SyncingStatus"].(*pb.PayloadStatus)
|
||||
require.Equal(t, true, ok)
|
||||
client := newPayloadV4Setup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadElectra(execPayload, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
|
||||
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(NewPayloadMethod+" INVALID_BLOCK_HASH status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
@@ -580,6 +669,20 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(NewPayloadMethodV4+" INVALID_BLOCK_HASH status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayloadElectra"].(*pb.ExecutionPayloadElectra)
|
||||
require.Equal(t, true, ok)
|
||||
want, ok := fix["InvalidBlockHashStatus"].(*pb.PayloadStatus)
|
||||
require.Equal(t, true, ok)
|
||||
client := newPayloadV4Setup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadElectra(execPayload, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
|
||||
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(NewPayloadMethod+" INVALID status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
@@ -622,6 +725,20 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
t.Run(NewPayloadMethodV4+" INVALID status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayloadElectra"].(*pb.ExecutionPayloadElectra)
|
||||
require.Equal(t, true, ok)
|
||||
want, ok := fix["InvalidStatus"].(*pb.PayloadStatus)
|
||||
require.Equal(t, true, ok)
|
||||
client := newPayloadV4Setup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadElectra(execPayload, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
|
||||
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
t.Run(NewPayloadMethod+" UNKNOWN status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
@@ -1348,6 +1465,62 @@ func fixtures() map[string]interface{} {
|
||||
BlobGasUsed: 2,
|
||||
ExcessBlobGas: 3,
|
||||
}
|
||||
withdrawalRequests := make([]pb.WithdrawalRequestV1, 3)
|
||||
for i := range withdrawalRequests {
|
||||
amount := hexutil.Uint64(i)
|
||||
address := &common.Address{}
|
||||
address.SetBytes([]byte{0, 0, byte(i)})
|
||||
pubkey := pb.BlsPubkey{}
|
||||
copy(pubkey[:], []byte{0, byte(i)})
|
||||
withdrawalRequests[i] = pb.WithdrawalRequestV1{
|
||||
SourceAddress: address,
|
||||
ValidatorPubkey: &pubkey,
|
||||
Amount: &amount,
|
||||
}
|
||||
}
|
||||
depositRequests := make([]pb.DepositRequestV1, 3)
|
||||
for i := range depositRequests {
|
||||
amount := hexutil.Uint64(math.MaxUint16 - i)
|
||||
creds := &common.Hash{}
|
||||
creds.SetBytes([]byte{0, 0, byte(i)})
|
||||
pubkey := pb.BlsPubkey{}
|
||||
copy(pubkey[:], []byte{0, byte(i)})
|
||||
sig := pb.BlsSig{}
|
||||
copy(sig[:], []byte{0, 0, 0, byte(i)})
|
||||
idx := hexutil.Uint64(i)
|
||||
depositRequests[i] = pb.DepositRequestV1{
|
||||
PubKey: &pubkey,
|
||||
WithdrawalCredentials: creds,
|
||||
Amount: &amount,
|
||||
Signature: &sig,
|
||||
Index: &idx,
|
||||
}
|
||||
}
|
||||
outer := &pb.ExecutionPayloadElectraJSON{
|
||||
WithdrawalRequests: withdrawalRequests,
|
||||
DepositRequests: depositRequests,
|
||||
}
|
||||
executionPayloadFixtureElectra := &pb.ExecutionPayloadElectra{
|
||||
ParentHash: foo[:],
|
||||
FeeRecipient: bar,
|
||||
StateRoot: foo[:],
|
||||
ReceiptsRoot: foo[:],
|
||||
LogsBloom: baz,
|
||||
PrevRandao: foo[:],
|
||||
BlockNumber: 1,
|
||||
GasLimit: 1,
|
||||
GasUsed: 1,
|
||||
Timestamp: 1,
|
||||
ExtraData: foo[:],
|
||||
BaseFeePerGas: bytesutil.PadTo(baseFeePerGas.Bytes(), fieldparams.RootLength),
|
||||
BlockHash: foo[:],
|
||||
Transactions: [][]byte{foo[:]},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
BlobGasUsed: 2,
|
||||
ExcessBlobGas: 3,
|
||||
DepositReceipts: outer.ElectraDepositReceipts(),
|
||||
WithdrawalRequests: outer.ElectraExecutionLayerWithdrawalRequests(),
|
||||
}
|
||||
hexUint := hexutil.Uint64(1)
|
||||
executionPayloadWithValueFixtureCapella := &pb.GetPayloadV2ResponseJson{
|
||||
ExecutionPayload: &pb.ExecutionPayloadCapellaJSON{
|
||||
@@ -1397,6 +1570,33 @@ func fixtures() map[string]interface{} {
|
||||
Blobs: []hexutil.Bytes{{'a'}, {'b'}},
|
||||
},
|
||||
}
|
||||
executionPayloadWithValueFixtureElectra := &pb.GetPayloadV4ResponseJson{
|
||||
ShouldOverrideBuilder: true,
|
||||
ExecutionPayload: &pb.ExecutionPayloadElectraJSON{
|
||||
ParentHash: &common.Hash{'a'},
|
||||
FeeRecipient: &common.Address{'b'},
|
||||
StateRoot: &common.Hash{'c'},
|
||||
ReceiptsRoot: &common.Hash{'d'},
|
||||
LogsBloom: &hexutil.Bytes{'e'},
|
||||
PrevRandao: &common.Hash{'f'},
|
||||
BaseFeePerGas: "0x123",
|
||||
BlockHash: &common.Hash{'g'},
|
||||
Transactions: []hexutil.Bytes{{'h'}},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
BlockNumber: &hexUint,
|
||||
GasLimit: &hexUint,
|
||||
GasUsed: &hexUint,
|
||||
Timestamp: &hexUint,
|
||||
BlobGasUsed: &bgu,
|
||||
ExcessBlobGas: &ebg,
|
||||
},
|
||||
BlockValue: "0x11fffffffff",
|
||||
BlobsBundle: &pb.BlobBundleJSON{
|
||||
Commitments: []hexutil.Bytes{[]byte("commitment1"), []byte("commitment2")},
|
||||
Proofs: []hexutil.Bytes{[]byte("proof1"), []byte("proof2")},
|
||||
Blobs: []hexutil.Bytes{{'a'}, {'b'}},
|
||||
},
|
||||
}
|
||||
parent := bytesutil.PadTo([]byte("parentHash"), fieldparams.RootLength)
|
||||
sha3Uncles := bytesutil.PadTo([]byte("sha3Uncles"), fieldparams.RootLength)
|
||||
miner := bytesutil.PadTo([]byte("miner"), fieldparams.FeeRecipientLength)
|
||||
@@ -1487,8 +1687,10 @@ func fixtures() map[string]interface{} {
|
||||
"ExecutionPayload": executionPayloadFixture,
|
||||
"ExecutionPayloadCapella": executionPayloadFixtureCapella,
|
||||
"ExecutionPayloadDeneb": executionPayloadFixtureDeneb,
|
||||
"ExecutionPayloadElectra": executionPayloadFixtureElectra,
|
||||
"ExecutionPayloadCapellaWithValue": executionPayloadWithValueFixtureCapella,
|
||||
"ExecutionPayloadDenebWithValue": executionPayloadWithValueFixtureDeneb,
|
||||
"ExecutionPayloadElectraWithValue": executionPayloadWithValueFixtureElectra,
|
||||
"ValidPayloadStatus": validStatus,
|
||||
"InvalidBlockHashStatus": inValidBlockHashStatus,
|
||||
"AcceptedStatus": acceptedStatus,
|
||||
@@ -1502,189 +1704,6 @@ func fixtures() map[string]interface{} {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_fullPayloadFromExecutionBlock(t *testing.T) {
|
||||
type args struct {
|
||||
header *pb.ExecutionPayloadHeader
|
||||
block *pb.ExecutionBlock
|
||||
version int
|
||||
}
|
||||
wantedHash := common.BytesToHash([]byte("foo"))
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want func() interfaces.ExecutionData
|
||||
err string
|
||||
}{
|
||||
{
|
||||
name: "block hash field in header and block hash mismatch",
|
||||
args: args{
|
||||
header: &pb.ExecutionPayloadHeader{
|
||||
BlockHash: []byte("foo"),
|
||||
},
|
||||
block: &pb.ExecutionBlock{
|
||||
Hash: common.BytesToHash([]byte("bar")),
|
||||
},
|
||||
version: version.Bellatrix,
|
||||
},
|
||||
err: "does not match execution block hash",
|
||||
},
|
||||
{
|
||||
name: "ok",
|
||||
args: args{
|
||||
header: &pb.ExecutionPayloadHeader{
|
||||
BlockHash: wantedHash[:],
|
||||
},
|
||||
block: &pb.ExecutionBlock{
|
||||
Hash: wantedHash,
|
||||
},
|
||||
version: version.Bellatrix,
|
||||
},
|
||||
want: func() interfaces.ExecutionData {
|
||||
p, err := blocks.WrappedExecutionPayload(&pb.ExecutionPayload{
|
||||
BlockHash: wantedHash[:],
|
||||
Transactions: [][]byte{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return p
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
wrapped, err := blocks.WrappedExecutionPayloadHeader(tt.args.header)
|
||||
require.NoError(t, err)
|
||||
got, err := fullPayloadFromExecutionBlock(tt.args.version, wrapped, tt.args.block)
|
||||
if err != nil {
|
||||
assert.ErrorContains(t, tt.err, err)
|
||||
} else {
|
||||
assert.DeepEqual(t, tt.want(), got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_fullPayloadFromExecutionBlockCapella(t *testing.T) {
|
||||
type args struct {
|
||||
header *pb.ExecutionPayloadHeaderCapella
|
||||
block *pb.ExecutionBlock
|
||||
version int
|
||||
}
|
||||
wantedHash := common.BytesToHash([]byte("foo"))
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want func() interfaces.ExecutionData
|
||||
err string
|
||||
}{
|
||||
{
|
||||
name: "block hash field in header and block hash mismatch",
|
||||
args: args{
|
||||
header: &pb.ExecutionPayloadHeaderCapella{
|
||||
BlockHash: []byte("foo"),
|
||||
},
|
||||
block: &pb.ExecutionBlock{
|
||||
Hash: common.BytesToHash([]byte("bar")),
|
||||
},
|
||||
version: version.Capella,
|
||||
},
|
||||
err: "does not match execution block hash",
|
||||
},
|
||||
{
|
||||
name: "ok",
|
||||
args: args{
|
||||
header: &pb.ExecutionPayloadHeaderCapella{
|
||||
BlockHash: wantedHash[:],
|
||||
},
|
||||
block: &pb.ExecutionBlock{
|
||||
Hash: wantedHash,
|
||||
},
|
||||
version: version.Capella,
|
||||
},
|
||||
want: func() interfaces.ExecutionData {
|
||||
p, err := blocks.WrappedExecutionPayloadCapella(&pb.ExecutionPayloadCapella{
|
||||
BlockHash: wantedHash[:],
|
||||
Transactions: [][]byte{},
|
||||
}, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
return p
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
wrapped, err := blocks.WrappedExecutionPayloadHeaderCapella(tt.args.header, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
got, err := fullPayloadFromExecutionBlock(tt.args.version, wrapped, tt.args.block)
|
||||
if err != nil {
|
||||
assert.ErrorContains(t, tt.err, err)
|
||||
} else {
|
||||
assert.DeepEqual(t, tt.want(), got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_fullPayloadFromExecutionBlockDeneb(t *testing.T) {
|
||||
type args struct {
|
||||
header *pb.ExecutionPayloadHeaderDeneb
|
||||
block *pb.ExecutionBlock
|
||||
version int
|
||||
}
|
||||
wantedHash := common.BytesToHash([]byte("foo"))
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want func() interfaces.ExecutionData
|
||||
err string
|
||||
}{
|
||||
{
|
||||
name: "block hash field in header and block hash mismatch",
|
||||
args: args{
|
||||
header: &pb.ExecutionPayloadHeaderDeneb{
|
||||
BlockHash: []byte("foo"),
|
||||
},
|
||||
block: &pb.ExecutionBlock{
|
||||
Hash: common.BytesToHash([]byte("bar")),
|
||||
},
|
||||
version: version.Deneb,
|
||||
},
|
||||
err: "does not match execution block hash",
|
||||
},
|
||||
{
|
||||
name: "ok",
|
||||
args: args{
|
||||
header: &pb.ExecutionPayloadHeaderDeneb{
|
||||
BlockHash: wantedHash[:],
|
||||
},
|
||||
block: &pb.ExecutionBlock{
|
||||
Hash: wantedHash,
|
||||
},
|
||||
version: version.Deneb,
|
||||
},
|
||||
want: func() interfaces.ExecutionData {
|
||||
p, err := blocks.WrappedExecutionPayloadDeneb(&pb.ExecutionPayloadDeneb{
|
||||
BlockHash: wantedHash[:],
|
||||
Transactions: [][]byte{},
|
||||
}, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
return p
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
wrapped, err := blocks.WrappedExecutionPayloadHeaderDeneb(tt.args.header, big.NewInt(0))
|
||||
require.NoError(t, err)
|
||||
got, err := fullPayloadFromExecutionBlock(tt.args.version, wrapped, tt.args.block)
|
||||
if err != nil {
|
||||
assert.ErrorContains(t, tt.err, err)
|
||||
} else {
|
||||
assert.DeepEqual(t, tt.want(), got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeaderByHash_NotFound(t *testing.T) {
|
||||
srv := &Service{}
|
||||
srv.rpcClient = RPCClientBad{}
|
||||
@@ -2031,6 +2050,40 @@ func newPayloadV3Setup(t *testing.T, status *pb.PayloadStatus, payload *pb.Execu
|
||||
return service
|
||||
}
|
||||
|
||||
func newPayloadV4Setup(t *testing.T, status *pb.PayloadStatus, payload *pb.ExecutionPayloadElectra) *Service {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
enc, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
|
||||
reqArg, err := json.Marshal(payload)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We expect the JSON string RPC request contains the right arguments.
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, string(reqArg),
|
||||
))
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": status,
|
||||
}
|
||||
err = json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
return service
|
||||
}
|
||||
|
||||
func TestCapella_PayloadBodiesByHash(t *testing.T) {
|
||||
t.Run("empty response works", func(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
@@ -26,6 +26,7 @@ type EngineClient struct {
|
||||
ExecutionPayload *pb.ExecutionPayload
|
||||
ExecutionPayloadCapella *pb.ExecutionPayloadCapella
|
||||
ExecutionPayloadDeneb *pb.ExecutionPayloadDeneb
|
||||
ExecutionPayloadElectra *pb.ExecutionPayloadElectra
|
||||
ExecutionBlock *pb.ExecutionBlock
|
||||
Err error
|
||||
ErrLatestExecBlock error
|
||||
@@ -61,6 +62,13 @@ func (e *EngineClient) ForkchoiceUpdated(
|
||||
|
||||
// GetPayload --
|
||||
func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, s primitives.Slot) (interfaces.ExecutionData, *pb.BlobsBundle, bool, error) {
|
||||
if slots.ToEpoch(s) >= params.BeaconConfig().ElectraForkEpoch {
|
||||
ed, err := blocks.WrappedExecutionPayloadElectra(e.ExecutionPayloadElectra, big.NewInt(int64(e.BlockValue)))
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
return ed, e.BlobsBundle, e.BuilderOverride, nil
|
||||
}
|
||||
if slots.ToEpoch(s) >= params.BeaconConfig().DenebForkEpoch {
|
||||
ed, err := blocks.WrappedExecutionPayloadDeneb(e.ExecutionPayloadDeneb, big.NewInt(int64(e.BlockValue)))
|
||||
if err != nil {
|
||||
|
||||
@@ -33,12 +33,12 @@ func (s *Service) canUpdateAttestedValidator(idx primitives.ValidatorIndex, slot
|
||||
}
|
||||
|
||||
// attestingIndices returns the indices of validators that participated in the given aggregated attestation.
|
||||
func attestingIndices(ctx context.Context, state state.BeaconState, att *ethpb.Attestation) ([]uint64, error) {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
func attestingIndices(ctx context.Context, state state.BeaconState, att interfaces.Attestation) ([]uint64, error) {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return attestation.AttestingIndices(att.AggregationBits, committee)
|
||||
return attestation.AttestingIndices(att, committee)
|
||||
}
|
||||
|
||||
// logMessageTimelyFlagsForIndex returns the log message with performance info for the attestation (head, source, target)
|
||||
@@ -63,7 +63,7 @@ func (s *Service) processAttestations(ctx context.Context, state state.BeaconSta
|
||||
}
|
||||
|
||||
// processIncludedAttestation logs in the event for the tracked validators' and their latest attestation gets processed.
|
||||
func (s *Service) processIncludedAttestation(ctx context.Context, state state.BeaconState, att *ethpb.Attestation) {
|
||||
func (s *Service) processIncludedAttestation(ctx context.Context, state state.BeaconState, att interfaces.Attestation) {
|
||||
attestingIndices, err := attestingIndices(ctx, state, att)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get attesting indices")
|
||||
@@ -72,8 +72,8 @@ func (s *Service) processIncludedAttestation(ctx context.Context, state state.Be
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
for _, idx := range attestingIndices {
|
||||
if s.canUpdateAttestedValidator(primitives.ValidatorIndex(idx), att.Data.Slot) {
|
||||
logFields := logMessageTimelyFlagsForIndex(primitives.ValidatorIndex(idx), att.Data)
|
||||
if s.canUpdateAttestedValidator(primitives.ValidatorIndex(idx), att.GetData().Slot) {
|
||||
logFields := logMessageTimelyFlagsForIndex(primitives.ValidatorIndex(idx), att.GetData())
|
||||
balance, err := state.BalanceAtIndex(primitives.ValidatorIndex(idx))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get balance")
|
||||
@@ -88,7 +88,7 @@ func (s *Service) processIncludedAttestation(ctx context.Context, state state.Be
|
||||
balanceChg := int64(balance - latestPerf.balance)
|
||||
latestPerf.balanceChange = balanceChg
|
||||
latestPerf.balance = balance
|
||||
latestPerf.attestedSlot = att.Data.Slot
|
||||
latestPerf.attestedSlot = att.GetData().Slot
|
||||
latestPerf.inclusionSlot = state.Slot()
|
||||
inclusionSlotGauge.WithLabelValues(fmt.Sprintf("%d", idx)).Set(float64(latestPerf.inclusionSlot))
|
||||
aggregatedPerf.totalDistance += uint64(latestPerf.inclusionSlot - latestPerf.attestedSlot)
|
||||
@@ -161,10 +161,10 @@ func (s *Service) processIncludedAttestation(ctx context.Context, state state.Be
|
||||
}
|
||||
|
||||
// processUnaggregatedAttestation logs when the beacon node observes an unaggregated attestation from tracked validator.
|
||||
func (s *Service) processUnaggregatedAttestation(ctx context.Context, att *ethpb.Attestation) {
|
||||
func (s *Service) processUnaggregatedAttestation(ctx context.Context, att interfaces.Attestation) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
root := bytesutil.ToBytes32(att.Data.BeaconBlockRoot)
|
||||
root := bytesutil.ToBytes32(att.GetData().BeaconBlockRoot)
|
||||
st := s.config.StateGen.StateByRootIfCachedNoCopy(root)
|
||||
if st == nil {
|
||||
log.WithField("beaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
|
||||
@@ -177,8 +177,8 @@ func (s *Service) processUnaggregatedAttestation(ctx context.Context, att *ethpb
|
||||
return
|
||||
}
|
||||
for _, idx := range attestingIndices {
|
||||
if s.canUpdateAttestedValidator(primitives.ValidatorIndex(idx), att.Data.Slot) {
|
||||
logFields := logMessageTimelyFlagsForIndex(primitives.ValidatorIndex(idx), att.Data)
|
||||
if s.canUpdateAttestedValidator(primitives.ValidatorIndex(idx), att.GetData().Slot) {
|
||||
logFields := logMessageTimelyFlagsForIndex(primitives.ValidatorIndex(idx), att.GetData())
|
||||
log.WithFields(logFields).Info("Processed unaggregated attestation")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -124,14 +124,14 @@ func (s *Service) processSlashings(blk interfaces.ReadOnlyBeaconBlock) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"attesterIndex": idx,
|
||||
"blockInclusionSlot": blk.Slot(),
|
||||
"attestationSlot1": slashing.Attestation_1.Data.Slot,
|
||||
"beaconBlockRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_1.Data.BeaconBlockRoot)),
|
||||
"sourceEpoch1": slashing.Attestation_1.Data.Source.Epoch,
|
||||
"targetEpoch1": slashing.Attestation_1.Data.Target.Epoch,
|
||||
"attestationSlot2": slashing.Attestation_2.Data.Slot,
|
||||
"beaconBlockRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_2.Data.BeaconBlockRoot)),
|
||||
"sourceEpoch2": slashing.Attestation_2.Data.Source.Epoch,
|
||||
"targetEpoch2": slashing.Attestation_2.Data.Target.Epoch,
|
||||
"attestationSlot1": slashing.GetFirstAttestation().GetData().Slot,
|
||||
"beaconBlockRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.GetFirstAttestation().GetData().BeaconBlockRoot)),
|
||||
"sourceEpoch1": slashing.GetFirstAttestation().GetData().Source.Epoch,
|
||||
"targetEpoch1": slashing.GetFirstAttestation().GetData().Target.Epoch,
|
||||
"attestationSlot2": slashing.GetSecondAttestation().GetData().Slot,
|
||||
"beaconBlockRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.GetSecondAttestation().GetData().BeaconBlockRoot)),
|
||||
"sourceEpoch2": slashing.GetSecondAttestation().GetData().Source.Epoch,
|
||||
"targetEpoch2": slashing.GetSecondAttestation().GetData().Target.Epoch,
|
||||
}).Info("Attester slashing was included")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,9 +20,9 @@ go_library(
|
||||
"//cache/lru:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation/aggregation/attestations:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
@@ -49,6 +49,7 @@ go_test(
|
||||
"//beacon-chain/operations/attestations/kv:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -15,9 +15,9 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation/aggregation/attestations:go_default_library",
|
||||
"@com_github_patrickmn_go_cache//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
@@ -39,6 +39,7 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
attaggregation "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation/aggregation/attestations"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -28,13 +28,13 @@ func (c *AttCaches) AggregateUnaggregatedAttestations(ctx context.Context) error
|
||||
return c.aggregateUnaggregatedAtts(ctx, unaggregatedAtts)
|
||||
}
|
||||
|
||||
func (c *AttCaches) aggregateUnaggregatedAtts(ctx context.Context, unaggregatedAtts []*ethpb.Attestation) error {
|
||||
func (c *AttCaches) aggregateUnaggregatedAtts(ctx context.Context, unaggregatedAtts []interfaces.Attestation) error {
|
||||
_, span := trace.StartSpan(ctx, "operations.attestations.kv.aggregateUnaggregatedAtts")
|
||||
defer span.End()
|
||||
|
||||
attsByDataRoot := make(map[[32]byte][]*ethpb.Attestation, len(unaggregatedAtts))
|
||||
attsByDataRoot := make(map[[32]byte][]interfaces.Attestation, len(unaggregatedAtts))
|
||||
for _, att := range unaggregatedAtts {
|
||||
attDataRoot, err := att.Data.HashTreeRoot()
|
||||
attDataRoot, err := att.GetData().HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -66,12 +66,12 @@ func (c *AttCaches) aggregateUnaggregatedAtts(ctx context.Context, unaggregatedA
|
||||
// aggregateParallel aggregates attestations in parallel for `atts` and saves them in the pool,
|
||||
// returns the unaggregated attestations that weren't able to aggregate.
|
||||
// Given `n` CPU cores, it creates a channel of size `n` and spawns `n` goroutines to aggregate attestations
|
||||
func (c *AttCaches) aggregateParallel(atts map[[32]byte][]*ethpb.Attestation, leftOver map[[32]byte]bool) map[[32]byte]bool {
|
||||
func (c *AttCaches) aggregateParallel(atts map[[32]byte][]interfaces.Attestation, leftOver map[[32]byte]bool) map[[32]byte]bool {
|
||||
var leftoverLock sync.Mutex
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
n := runtime.GOMAXPROCS(0) // defaults to the value of runtime.NumCPU
|
||||
ch := make(chan []*ethpb.Attestation, n)
|
||||
ch := make(chan []interfaces.Attestation, n)
|
||||
wg.Add(n)
|
||||
for i := 0; i < n; i++ {
|
||||
go func() {
|
||||
@@ -87,7 +87,7 @@ func (c *AttCaches) aggregateParallel(atts map[[32]byte][]*ethpb.Attestation, le
|
||||
continue
|
||||
}
|
||||
if helpers.IsAggregated(aggregated) {
|
||||
if err := c.SaveAggregatedAttestations([]*ethpb.Attestation{aggregated}); err != nil {
|
||||
if err := c.SaveAggregatedAttestations([]interfaces.Attestation{aggregated}); err != nil {
|
||||
log.WithError(err).Error("could not save aggregated attestation")
|
||||
continue
|
||||
}
|
||||
@@ -116,7 +116,7 @@ func (c *AttCaches) aggregateParallel(atts map[[32]byte][]*ethpb.Attestation, le
|
||||
}
|
||||
|
||||
// SaveAggregatedAttestation saves an aggregated attestation in cache.
|
||||
func (c *AttCaches) SaveAggregatedAttestation(att *ethpb.Attestation) error {
|
||||
func (c *AttCaches) SaveAggregatedAttestation(att interfaces.Attestation) error {
|
||||
if err := helpers.ValidateNilAttestation(att); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -139,16 +139,16 @@ func (c *AttCaches) SaveAggregatedAttestation(att *ethpb.Attestation) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
r, err := hashFn(att.Data)
|
||||
r, err := hashFn(att.GetData())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not tree hash attestation")
|
||||
}
|
||||
copiedAtt := ethpb.CopyAttestation(att)
|
||||
copiedAtt := interfaces.CopyAttestation(att)
|
||||
c.aggregatedAttLock.Lock()
|
||||
defer c.aggregatedAttLock.Unlock()
|
||||
atts, ok := c.aggregatedAtt[r]
|
||||
if !ok {
|
||||
atts := []*ethpb.Attestation{copiedAtt}
|
||||
atts := []interfaces.Attestation{copiedAtt}
|
||||
c.aggregatedAtt[r] = atts
|
||||
return nil
|
||||
}
|
||||
@@ -163,7 +163,7 @@ func (c *AttCaches) SaveAggregatedAttestation(att *ethpb.Attestation) error {
|
||||
}
|
||||
|
||||
// SaveAggregatedAttestations saves a list of aggregated attestations in cache.
|
||||
func (c *AttCaches) SaveAggregatedAttestations(atts []*ethpb.Attestation) error {
|
||||
func (c *AttCaches) SaveAggregatedAttestations(atts []interfaces.Attestation) error {
|
||||
for _, att := range atts {
|
||||
if err := c.SaveAggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Debug("Could not save aggregated attestation")
|
||||
@@ -176,11 +176,11 @@ func (c *AttCaches) SaveAggregatedAttestations(atts []*ethpb.Attestation) error
|
||||
}
|
||||
|
||||
// AggregatedAttestations returns the aggregated attestations in cache.
|
||||
func (c *AttCaches) AggregatedAttestations() []*ethpb.Attestation {
|
||||
func (c *AttCaches) AggregatedAttestations() []interfaces.Attestation {
|
||||
c.aggregatedAttLock.RLock()
|
||||
defer c.aggregatedAttLock.RUnlock()
|
||||
|
||||
atts := make([]*ethpb.Attestation, 0)
|
||||
atts := make([]interfaces.Attestation, 0)
|
||||
|
||||
for _, a := range c.aggregatedAtt {
|
||||
atts = append(atts, a...)
|
||||
@@ -191,16 +191,16 @@ func (c *AttCaches) AggregatedAttestations() []*ethpb.Attestation {
|
||||
|
||||
// AggregatedAttestationsBySlotIndex returns the aggregated attestations in cache,
|
||||
// filtered by committee index and slot.
|
||||
func (c *AttCaches) AggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []*ethpb.Attestation {
|
||||
func (c *AttCaches) AggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []interfaces.Attestation {
|
||||
_, span := trace.StartSpan(ctx, "operations.attestations.kv.AggregatedAttestationsBySlotIndex")
|
||||
defer span.End()
|
||||
|
||||
atts := make([]*ethpb.Attestation, 0)
|
||||
atts := make([]interfaces.Attestation, 0)
|
||||
|
||||
c.aggregatedAttLock.RLock()
|
||||
defer c.aggregatedAttLock.RUnlock()
|
||||
for _, a := range c.aggregatedAtt {
|
||||
if slot == a[0].Data.Slot && committeeIndex == a[0].Data.CommitteeIndex {
|
||||
if slot == a[0].GetData().Slot && committeeIndex == a[0].GetData().CommitteeIndex {
|
||||
atts = append(atts, a...)
|
||||
}
|
||||
}
|
||||
@@ -209,14 +209,14 @@ func (c *AttCaches) AggregatedAttestationsBySlotIndex(ctx context.Context, slot
|
||||
}
|
||||
|
||||
// DeleteAggregatedAttestation deletes the aggregated attestations in cache.
|
||||
func (c *AttCaches) DeleteAggregatedAttestation(att *ethpb.Attestation) error {
|
||||
func (c *AttCaches) DeleteAggregatedAttestation(att interfaces.Attestation) error {
|
||||
if err := helpers.ValidateNilAttestation(att); err != nil {
|
||||
return err
|
||||
}
|
||||
if !helpers.IsAggregated(att) {
|
||||
return errors.New("attestation is not aggregated")
|
||||
}
|
||||
r, err := hashFn(att.Data)
|
||||
r, err := hashFn(att.GetData())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not tree hash attestation data")
|
||||
}
|
||||
@@ -232,9 +232,9 @@ func (c *AttCaches) DeleteAggregatedAttestation(att *ethpb.Attestation) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
filtered := make([]*ethpb.Attestation, 0)
|
||||
filtered := make([]interfaces.Attestation, 0)
|
||||
for _, a := range attList {
|
||||
if c, err := att.AggregationBits.Contains(a.AggregationBits); err != nil {
|
||||
if c, err := att.GetAggregationBits().Contains(a.GetAggregationBits()); err != nil {
|
||||
return err
|
||||
} else if !c {
|
||||
filtered = append(filtered, a)
|
||||
@@ -250,11 +250,11 @@ func (c *AttCaches) DeleteAggregatedAttestation(att *ethpb.Attestation) error {
|
||||
}
|
||||
|
||||
// HasAggregatedAttestation checks if the input attestations has already existed in cache.
|
||||
func (c *AttCaches) HasAggregatedAttestation(att *ethpb.Attestation) (bool, error) {
|
||||
func (c *AttCaches) HasAggregatedAttestation(att interfaces.Attestation) (bool, error) {
|
||||
if err := helpers.ValidateNilAttestation(att); err != nil {
|
||||
return false, err
|
||||
}
|
||||
r, err := hashFn(att.Data)
|
||||
r, err := hashFn(att.GetData())
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not tree hash attestation")
|
||||
}
|
||||
@@ -263,7 +263,7 @@ func (c *AttCaches) HasAggregatedAttestation(att *ethpb.Attestation) (bool, erro
|
||||
defer c.aggregatedAttLock.RUnlock()
|
||||
if atts, ok := c.aggregatedAtt[r]; ok {
|
||||
for _, a := range atts {
|
||||
if c, err := a.AggregationBits.Contains(att.AggregationBits); err != nil {
|
||||
if c, err := a.GetAggregationBits().Contains(att.GetAggregationBits()); err != nil {
|
||||
return false, err
|
||||
} else if c {
|
||||
return true, nil
|
||||
@@ -275,7 +275,7 @@ func (c *AttCaches) HasAggregatedAttestation(att *ethpb.Attestation) (bool, erro
|
||||
defer c.blockAttLock.RUnlock()
|
||||
if atts, ok := c.blockAtt[r]; ok {
|
||||
for _, a := range atts {
|
||||
if c, err := a.AggregationBits.Contains(att.AggregationBits); err != nil {
|
||||
if c, err := a.GetAggregationBits().Contains(att.GetAggregationBits()); err != nil {
|
||||
return false, err
|
||||
} else if c {
|
||||
return true, nil
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
fssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
@@ -30,7 +31,7 @@ func TestKV_Aggregated_AggregateUnaggregatedAttestations(t *testing.T) {
|
||||
att6 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1010}, Signature: sig1.Marshal()})
|
||||
att7 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1100}, Signature: sig1.Marshal()})
|
||||
att8 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1001}, Signature: sig2.Marshal()})
|
||||
atts := []*ethpb.Attestation{att1, att2, att3, att4, att5, att6, att7, att8}
|
||||
atts := []interfaces.Attestation{att1, att2, att3, att4, att5, att6, att7, att8}
|
||||
require.NoError(t, cache.SaveUnaggregatedAttestations(atts))
|
||||
require.NoError(t, cache.AggregateUnaggregatedAttestations(context.Background()))
|
||||
|
||||
@@ -41,7 +42,7 @@ func TestKV_Aggregated_AggregateUnaggregatedAttestations(t *testing.T) {
|
||||
func TestKV_Aggregated_SaveAggregatedAttestation(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
att *ethpb.Attestation
|
||||
att interfaces.Attestation
|
||||
count int
|
||||
wantErrString string
|
||||
}{
|
||||
@@ -118,13 +119,13 @@ func TestKV_Aggregated_SaveAggregatedAttestation(t *testing.T) {
|
||||
func TestKV_Aggregated_SaveAggregatedAttestations(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
atts []*ethpb.Attestation
|
||||
atts []interfaces.Attestation
|
||||
count int
|
||||
wantErrString string
|
||||
}{
|
||||
{
|
||||
name: "no duplicates",
|
||||
atts: []*ethpb.Attestation{
|
||||
atts: []interfaces.Attestation{
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1},
|
||||
AggregationBits: bitfield.Bitlist{0b1101}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1},
|
||||
@@ -153,13 +154,13 @@ func TestKV_Aggregated_SaveAggregatedAttestations(t *testing.T) {
|
||||
func TestKV_Aggregated_SaveAggregatedAttestations_SomeGoodSomeBad(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
atts []*ethpb.Attestation
|
||||
atts []interfaces.Attestation
|
||||
count int
|
||||
wantErrString string
|
||||
}{
|
||||
{
|
||||
name: "the first attestation is bad",
|
||||
atts: []*ethpb.Attestation{
|
||||
atts: []interfaces.Attestation{
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1},
|
||||
AggregationBits: bitfield.Bitlist{0b1100}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1},
|
||||
@@ -191,7 +192,7 @@ func TestKV_Aggregated_AggregatedAttestations(t *testing.T) {
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att3 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
atts := []*ethpb.Attestation{att1, att2, att3}
|
||||
atts := []interfaces.Attestation{att1, att2, att3}
|
||||
|
||||
for _, att := range atts {
|
||||
require.NoError(t, cache.SaveAggregatedAttestation(att))
|
||||
@@ -199,7 +200,7 @@ func TestKV_Aggregated_AggregatedAttestations(t *testing.T) {
|
||||
|
||||
returned := cache.AggregatedAttestations()
|
||||
sort.Slice(returned, func(i, j int) bool {
|
||||
return returned[i].Data.Slot < returned[j].Data.Slot
|
||||
return returned[i].GetData().Slot < returned[j].GetData().Slot
|
||||
})
|
||||
assert.DeepSSZEqual(t, atts, returned)
|
||||
}
|
||||
@@ -246,13 +247,13 @@ func TestKV_Aggregated_DeleteAggregatedAttestation(t *testing.T) {
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b11010}})
|
||||
att3 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b11010}})
|
||||
att4 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b10101}})
|
||||
atts := []*ethpb.Attestation{att1, att2, att3, att4}
|
||||
atts := []interfaces.Attestation{att1, att2, att3, att4}
|
||||
require.NoError(t, cache.SaveAggregatedAttestations(atts))
|
||||
require.NoError(t, cache.DeleteAggregatedAttestation(att1))
|
||||
require.NoError(t, cache.DeleteAggregatedAttestation(att3))
|
||||
|
||||
returned := cache.AggregatedAttestations()
|
||||
wanted := []*ethpb.Attestation{att2}
|
||||
wanted := []interfaces.Attestation{att2}
|
||||
assert.DeepEqual(t, wanted, returned)
|
||||
})
|
||||
|
||||
@@ -262,16 +263,16 @@ func TestKV_Aggregated_DeleteAggregatedAttestation(t *testing.T) {
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b110111}})
|
||||
att3 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b110100}})
|
||||
att4 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b110101}})
|
||||
atts := []*ethpb.Attestation{att1, att2, att3, att4}
|
||||
atts := []interfaces.Attestation{att1, att2, att3, att4}
|
||||
require.NoError(t, cache.SaveAggregatedAttestations(atts))
|
||||
|
||||
assert.Equal(t, 2, cache.AggregatedAttestationCount(), "Unexpected number of atts")
|
||||
require.NoError(t, cache.DeleteAggregatedAttestation(att4))
|
||||
|
||||
returned := cache.AggregatedAttestations()
|
||||
wanted := []*ethpb.Attestation{att1, att2}
|
||||
wanted := []interfaces.Attestation{att1, att2}
|
||||
sort.Slice(returned, func(i, j int) bool {
|
||||
return string(returned[i].AggregationBits) < string(returned[j].AggregationBits)
|
||||
return string(returned[i].GetAggregationBits()) < string(returned[j].GetAggregationBits())
|
||||
})
|
||||
assert.DeepEqual(t, wanted, returned)
|
||||
})
|
||||
@@ -280,7 +281,7 @@ func TestKV_Aggregated_DeleteAggregatedAttestation(t *testing.T) {
|
||||
func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
existing []*ethpb.Attestation
|
||||
existing []interfaces.Attestation
|
||||
input *ethpb.Attestation
|
||||
want bool
|
||||
err error
|
||||
@@ -319,7 +320,7 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "single attestation in cache with exact match",
|
||||
existing: []*ethpb.Attestation{{
|
||||
existing: []interfaces.Attestation{ðpb.Attestation{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
}),
|
||||
@@ -334,7 +335,7 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "single attestation in cache with subset aggregation",
|
||||
existing: []*ethpb.Attestation{{
|
||||
existing: []interfaces.Attestation{ðpb.Attestation{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
}),
|
||||
@@ -349,7 +350,7 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "single attestation in cache with superset aggregation",
|
||||
existing: []*ethpb.Attestation{{
|
||||
existing: []interfaces.Attestation{ðpb.Attestation{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
}),
|
||||
@@ -364,14 +365,14 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "multiple attestations with same data in cache with overlapping aggregation, input is subset",
|
||||
existing: []*ethpb.Attestation{
|
||||
{
|
||||
existing: []interfaces.Attestation{
|
||||
ðpb.Attestation{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
}),
|
||||
AggregationBits: bitfield.Bitlist{0b1111000},
|
||||
},
|
||||
{
|
||||
ðpb.Attestation{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
}),
|
||||
@@ -387,14 +388,14 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "multiple attestations with same data in cache with overlapping aggregation and input is superset",
|
||||
existing: []*ethpb.Attestation{
|
||||
{
|
||||
existing: []interfaces.Attestation{
|
||||
ðpb.Attestation{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
}),
|
||||
AggregationBits: bitfield.Bitlist{0b1111000},
|
||||
},
|
||||
{
|
||||
ðpb.Attestation{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
}),
|
||||
@@ -410,14 +411,14 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "multiple attestations with different data in cache",
|
||||
existing: []*ethpb.Attestation{
|
||||
{
|
||||
existing: []interfaces.Attestation{
|
||||
ðpb.Attestation{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 2,
|
||||
}),
|
||||
AggregationBits: bitfield.Bitlist{0b1111000},
|
||||
},
|
||||
{
|
||||
ðpb.Attestation{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 3,
|
||||
}),
|
||||
@@ -433,8 +434,8 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "attestations with different bitlist lengths",
|
||||
existing: []*ethpb.Attestation{
|
||||
{
|
||||
existing: []interfaces.Attestation{
|
||||
ðpb.Attestation{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 2,
|
||||
}),
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user