Compare commits

..

3 Commits

Author SHA1 Message Date
nisdas
cf0505b8db potuz's suggestion 2023-06-19 22:19:56 +08:00
nisdas
3a9764d3af potuz's suggestion 2023-06-19 20:01:13 +08:00
nisdas
d1d3edc7fe time locks 2023-06-19 18:30:26 +08:00
320 changed files with 6008 additions and 34557 deletions

View File

@@ -3,7 +3,6 @@ import %workspace%/build/bazelrc/convenience.bazelrc
import %workspace%/build/bazelrc/correctness.bazelrc
import %workspace%/build/bazelrc/cross.bazelrc
import %workspace%/build/bazelrc/debug.bazelrc
import %workspace%/build/bazelrc/hermetic-cc.bazelrc
import %workspace%/build/bazelrc/performance.bazelrc
# E2E run with debug gotag
@@ -15,7 +14,7 @@ coverage --define=coverage_enabled=1
# Stamp binaries with git information
build --workspace_status_command=./hack/workspace_status.sh
build --define blst_disabled=false --define blst_modern=true
build --define blst_disabled=false
run --define blst_disabled=false
build:blst_disabled --define blst_disabled=true
@@ -28,7 +27,30 @@ build:minimal --@io_bazel_rules_go//go/config:tags=minimal
build:release --compilation_mode=opt
build:release --stamp
# LLVM compiler for building C/C++ dependencies.
build:llvm --define compiler=llvm
build:llvm --copt -fno-sanitize=vptr,function
build:llvm --linkopt -fno-sanitize=vptr,function
# --incompatible_enable_cc_toolchain_resolution not needed after this issue is closed https://github.com/bazelbuild/bazel/issues/7260
build:llvm --incompatible_enable_cc_toolchain_resolution
build:asan --copt -fsanitize=address,undefined
build:asan --copt -fno-omit-frame-pointer
build:asan --linkopt -fsanitize=address,undefined
build:asan --copt -fno-sanitize=vptr,function
build:asan --linkopt -fno-sanitize=vptr,function
build:asan --copt -DADDRESS_SANITIZER=1
build:asan --copt -D__SANITIZE_ADDRESS__
build:asan --linkopt -ldl
build:llvm-asan --config=llvm
build:llvm-asan --config=asan
build:llvm-asan --linkopt -fuse-ld=ld.lld
build:fuzz --@io_bazel_rules_go//go/config:tags=fuzz
# Build binary with cgo symbolizer for debugging / profiling.
build:cgo_symbolizer --config=llvm
build:cgo_symbolizer --copt=-g
build:cgo_symbolizer --define=USE_CGO_SYMBOLIZER=true
build:cgo_symbolizer -c dbg
@@ -37,7 +59,7 @@ build:cgo_symbolizer --define=gotags=cgosymbolizer_enabled
# toolchain build debug configs
#------------------------------
build:debug --sandbox_debug
build:debug --toolchain_resolution_debug=".*"
build:debug --toolchain_resolution_debug
build:debug --verbose_failures
build:debug -s

View File

@@ -1 +1 @@
6.2.1
6.1.0

View File

@@ -16,37 +16,27 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
rules_pkg_dependencies()
HERMETIC_CC_TOOLCHAIN_VERSION = "v2.0.0"
http_archive(
name = "hermetic_cc_toolchain",
sha256 = "57f03a6c29793e8add7bd64186fc8066d23b5ffd06fe9cc6b0b8c499914d3a65",
urls = [
"https://mirror.bazel.build/github.com/uber/hermetic_cc_toolchain/releases/download/{0}/hermetic_cc_toolchain-{0}.tar.gz".format(HERMETIC_CC_TOOLCHAIN_VERSION),
"https://github.com/uber/hermetic_cc_toolchain/releases/download/{0}/hermetic_cc_toolchain-{0}.tar.gz".format(HERMETIC_CC_TOOLCHAIN_VERSION),
],
name = "com_grail_bazel_toolchain",
sha256 = "b210fc8e58782ef171f428bfc850ed7179bdd805543ebd1aa144b9c93489134f",
strip_prefix = "bazel-toolchain-83e69ba9e4b4fdad0d1d057fcb87addf77c281c9",
urls = ["https://github.com/grailbio/bazel-toolchain/archive/83e69ba9e4b4fdad0d1d057fcb87addf77c281c9.tar.gz"],
)
load("@hermetic_cc_toolchain//toolchain:defs.bzl", zig_toolchains = "toolchains")
load("@com_grail_bazel_toolchain//toolchain:deps.bzl", "bazel_toolchain_dependencies")
zig_toolchains()
bazel_toolchain_dependencies()
# Register zig sdk toolchains with support for Ubuntu 20.04 (Focal Fossa) which has an EOL date of April, 2025.
# For ubuntu glibc support, see https://launchpad.net/ubuntu/+source/glibc
register_toolchains(
"@zig_sdk//toolchain:linux_amd64_gnu.2.31",
"@zig_sdk//toolchain:linux_arm64_gnu.2.31",
# Hermetic cc toolchain is not yet supported on darwin. Sysroot needs to be provided.
# See https://github.com/uber/hermetic_cc_toolchain#osx-sysroot
# "@zig_sdk//toolchain:darwin_amd64",
# "@zig_sdk//toolchain:darwin_arm64",
# Windows builds are not supported yet.
# "@zig_sdk//toolchain:windows_amd64",
load("@com_grail_bazel_toolchain//toolchain:rules.bzl", "llvm_toolchain")
llvm_toolchain(
name = "llvm_toolchain",
llvm_version = "13.0.1",
)
load("@prysm//tools/cross-toolchain:darwin_cc_hack.bzl", "configure_nonhermetic_darwin")
load("@llvm_toolchain//:toolchains.bzl", "llvm_register_toolchains")
configure_nonhermetic_darwin()
llvm_register_toolchains()
load("@prysm//tools/cross-toolchain:prysm_toolchains.bzl", "configure_prysm_toolchains")
@@ -215,7 +205,7 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.4.0-alpha.2"
consensus_spec_version = "v1.4.0-alpha.1"
bls_test_version = "v0.1.1"
@@ -231,7 +221,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "bfba887cbe043907adf884cf6d18f2e8a31e34e9245397b84af1f54ed22b706a",
sha256 = "1118a663be4a00ba00f0635eb20287157f2b2f993aed64335bfbcd04af424c2b",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -247,7 +237,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "9ff77bef0ca1e39bcee2769075c89f0f91fb8f89ad38a1b3e0c31cf6732650ad",
sha256 = "acde6e10940d14f22277eda5b55b65a24623ac88e4c7a2e34134a6069f5eea82",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -263,7 +253,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "fbcc3c9898110c675e5de9c27cb667ad7cadf930db7ebb5c6bba15d7be95bf8a",
sha256 = "49c022f3a3478cea849ba8f877a9f7e4c1ded549edddc09993550bbc5bb192e1",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
@@ -278,7 +268,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "9fff1bcdd0e5857797197800db091c3675b2c11b54f704fe4de1ba683bed7ba5",
sha256 = "c3e246ff01f6b7b9e9e41939954a6ff89dfca7297415f88781809165fa83267c",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -11,12 +11,12 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/builder",
visibility = ["//visibility:public"],
deps = [
"//config/fieldparams:go_default_library",
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//network:go_default_library",
"//network/authorization:go_default_library",
@@ -40,7 +40,6 @@ go_test(
data = glob(["testdata/**"]),
embed = [":go_default_library"],
deps = [
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",

View File

@@ -1,12 +1,14 @@
package builder
import (
"github.com/pkg/errors"
"math/big"
ssz "github.com/prysmaticlabs/fastssz"
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/math"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
)
@@ -22,7 +24,6 @@ type SignedBid interface {
// Bid is an interface describing the method set of a builder bid.
type Bid interface {
Header() (interfaces.ExecutionData, error)
BlindedBlobsBundle() (*enginev1.BlindedBlobsBundle, error)
Value() []byte
Pubkey() []byte
Version() int
@@ -115,11 +116,6 @@ func (b builderBid) Header() (interfaces.ExecutionData, error) {
return blocks.WrappedExecutionPayloadHeader(b.p.Header)
}
// BlindedBlobsBundle --
func (b builderBid) BlindedBlobsBundle() (*enginev1.BlindedBlobsBundle, error) {
return nil, errors.New("blinded blobs bundle not available before Deneb")
}
// Version --
func (b builderBid) Version() int {
return version.Bellatrix
@@ -166,12 +162,8 @@ func WrappedBuilderBidCapella(p *ethpb.BuilderBidCapella) (Bid, error) {
// Header returns the execution data interface.
func (b builderBidCapella) Header() (interfaces.ExecutionData, error) {
// We have to convert big endian to little endian because the value is coming from the execution layer.
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, blocks.PayloadValueToGwei(b.p.Value))
}
// BlindedBlobsBundle --
func (b builderBidCapella) BlindedBlobsBundle() (*enginev1.BlindedBlobsBundle, error) {
return nil, errors.New("blinded blobs bundle not available before Deneb")
v := big.NewInt(0).SetBytes(bytesutil.ReverseByteOrder(b.p.Value))
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, math.WeiToGwei(v))
}
// Version --
@@ -203,90 +195,3 @@ func (b builderBidCapella) HashTreeRoot() ([32]byte, error) {
func (b builderBidCapella) HashTreeRootWith(hh *ssz.Hasher) error {
return b.p.HashTreeRootWith(hh)
}
type builderBidDeneb struct {
p *ethpb.BuilderBidDeneb
}
// WrappedBuilderBidDeneb is a constructor which wraps a protobuf bid into an interface.
func WrappedBuilderBidDeneb(p *ethpb.BuilderBidDeneb) (Bid, error) {
w := builderBidDeneb{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
// Version --
func (b builderBidDeneb) Version() int {
return version.Deneb
}
// Value --
func (b builderBidDeneb) Value() []byte {
return b.p.Value
}
// Pubkey --
func (b builderBidDeneb) Pubkey() []byte {
return b.p.Pubkey
}
// IsNil --
func (b builderBidDeneb) IsNil() bool {
return b.p == nil
}
// HashTreeRoot --
func (b builderBidDeneb) HashTreeRoot() ([32]byte, error) {
return b.p.HashTreeRoot()
}
// HashTreeRootWith --
func (b builderBidDeneb) HashTreeRootWith(hh *ssz.Hasher) error {
return b.p.HashTreeRootWith(hh)
}
// Header --
func (b builderBidDeneb) Header() (interfaces.ExecutionData, error) {
// We have to convert big endian to little endian because the value is coming from the execution layer.
return blocks.WrappedExecutionPayloadHeaderDeneb(b.p.Header, blocks.PayloadValueToGwei(b.p.Value))
}
// BlindedBlobsBundle --
func (b builderBidDeneb) BlindedBlobsBundle() (*enginev1.BlindedBlobsBundle, error) {
return b.p.BlindedBlobsBundle, nil
}
type signedBuilderBidDeneb struct {
p *ethpb.SignedBuilderBidDeneb
}
// WrappedSignedBuilderBidDeneb is a constructor which wraps a protobuf signed bit into an interface.
func WrappedSignedBuilderBidDeneb(p *ethpb.SignedBuilderBidDeneb) (SignedBid, error) {
w := signedBuilderBidDeneb{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
// Message --
func (b signedBuilderBidDeneb) Message() (Bid, error) {
return WrappedBuilderBidDeneb(b.p.Message)
}
// Signature --
func (b signedBuilderBidDeneb) Signature() []byte {
return b.p.Signature
}
// Version --
func (b signedBuilderBidDeneb) Version() int {
return version.Deneb
}
// IsNil --
func (b signedBuilderBidDeneb) IsNil() bool {
return b.p == nil
}

View File

@@ -19,7 +19,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
"github.com/prysmaticlabs/prysm/v4/network"
"github.com/prysmaticlabs/prysm/v4/network/authorization"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
log "github.com/sirupsen/logrus"
@@ -87,7 +86,7 @@ type BuilderClient interface {
NodeURL() string
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubkey [48]byte) (SignedBid, error)
RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock, blobs []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error)
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error)
Status(ctx context.Context) error
}
@@ -221,16 +220,6 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
}
switch strings.ToLower(v.Version) {
case strings.ToLower(version.String(version.Deneb)):
hr := &ExecHeaderResponseDeneb{}
if err := json.Unmarshal(hb, hr); err != nil {
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
}
p, err := hr.ToProto()
if err != nil {
return nil, errors.Wrapf(err, "could not extract proto message from header")
}
return WrappedSignedBuilderBidDeneb(p)
case strings.ToLower(version.String(version.Capella)):
hr := &ExecHeaderResponseCapella{}
if err := json.Unmarshal(hb, hr); err != nil {
@@ -285,20 +274,20 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
// SubmitBlindedBlock calls the builder API endpoint that binds the validator to the builder and submits the block.
// The response is the full execution payload used to create the blinded block.
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock, blobs []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error) {
if !sb.IsBlinded() {
return nil, nil, errNotBlinded
return nil, errNotBlinded
}
switch sb.Version() {
case version.Bellatrix:
psb, err := sb.PbBlindedBellatrixBlock()
if err != nil {
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
return nil, errors.Wrapf(err, "could not get protobuf block")
}
b := &SignedBlindedBeaconBlockBellatrix{SignedBlindedBeaconBlockBellatrix: psb}
body, err := json.Marshal(b)
if err != nil {
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockBellatrix value body in SubmitBlindedBlock")
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockBellatrix value body in SubmitBlindedBlock")
}
versionOpt := func(r *http.Request) {
@@ -307,33 +296,29 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
if err != nil {
return nil, nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockBellatrix to the builder api")
return nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockBellatrix to the builder api")
}
ep := &ExecPayloadResponse{}
if err := json.Unmarshal(rb, ep); err != nil {
return nil, nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlock response")
return nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlock response")
}
if strings.ToLower(ep.Version) != version.String(version.Bellatrix) {
return nil, nil, errors.New("not a bellatrix payload")
return nil, errors.New("not a bellatrix payload")
}
p, err := ep.ToProto()
if err != nil {
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
return nil, errors.Wrapf(err, "could not extract proto message from payload")
}
payload, err := blocks.WrappedExecutionPayload(p)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
}
return payload, nil, nil
return blocks.WrappedExecutionPayload(p)
case version.Capella:
psb, err := sb.PbBlindedCapellaBlock()
if err != nil {
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
return nil, errors.Wrapf(err, "could not get protobuf block")
}
b := &SignedBlindedBeaconBlockCapella{SignedBlindedBeaconBlockCapella: psb}
body, err := json.Marshal(b)
if err != nil {
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockCapella value body in SubmitBlindedBlockCapella")
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockCapella value body in SubmitBlindedBlockCapella")
}
versionOpt := func(r *http.Request) {
@@ -342,58 +327,22 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
if err != nil {
return nil, nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockCapella to the builder api")
return nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockCapella to the builder api")
}
ep := &ExecPayloadResponseCapella{}
if err := json.Unmarshal(rb, ep); err != nil {
return nil, nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockCapella response")
return nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockCapella response")
}
if strings.ToLower(ep.Version) != version.String(version.Capella) {
return nil, nil, errors.New("not a capella payload")
return nil, errors.New("not a capella payload")
}
p, err := ep.ToProto()
if err != nil {
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
return nil, errors.Wrapf(err, "could not extract proto message from payload")
}
payload, err := blocks.WrappedExecutionPayloadCapella(p, 0)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
}
return payload, nil, nil
case version.Deneb:
psb, err := sb.PbBlindedDenebBlock()
if err != nil {
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
}
b := &ethpb.SignedBlindedBeaconBlockAndBlobsDeneb{Block: psb, Blobs: blobs}
body, err := json.Marshal(b)
if err != nil {
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockDeneb value body in SubmitBlindedBlockDeneb")
}
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Deneb))
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
ep := &ExecPayloadResponseDeneb{}
if err := json.Unmarshal(rb, ep); err != nil {
return nil, nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockDeneb response")
}
if strings.ToLower(ep.Version) != version.String(version.Deneb) {
return nil, nil, errors.New("not a deneb payload")
}
p, blobBundle, err := ep.ToProto()
if err != nil {
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
}
payload, err := blocks.WrappedExecutionPayloadDeneb(p, 0)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
}
return payload, blobBundle, nil
return blocks.WrappedExecutionPayloadCapella(p, 0)
default:
return nil, nil, fmt.Errorf("unsupported block version %s", version.String(sb.Version()))
return nil, fmt.Errorf("unsupported block version %s", version.String(sb.Version()))
}
}

View File

@@ -12,9 +12,7 @@ import (
"strconv"
"testing"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/prysmaticlabs/go-bitfield"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
@@ -127,6 +125,7 @@ func TestClient_GetHeader(t *testing.T) {
var slot types.Slot = 23
parentHash := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
pubkey := ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
t.Run("server error", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
@@ -237,52 +236,6 @@ func TestClient_GetHeader(t *testing.T) {
require.DeepEqual(t, bidValue, value.Bytes())
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
})
t.Run("deneb", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponseDeneb)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
h, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.NoError(t, err)
expectedWithdrawalsRoot := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
bid, err := h.Message()
require.NoError(t, err)
bidHeader, err := bid.Header()
require.NoError(t, err)
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
bidValue := bytesutil.ReverseByteOrder(bid.Value())
require.DeepEqual(t, bidValue, value.Bytes())
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
bundle, err := bid.BlindedBlobsBundle()
require.NoError(t, err)
require.Equal(t, len(bundle.BlobRoots) <= fieldparams.MaxBlobsPerBlock && len(bundle.BlobRoots) > 0, true)
for i := range bundle.BlobRoots {
require.Equal(t, len(bundle.BlobRoots[i]) == fieldparams.RootLength, true)
}
require.Equal(t, len(bundle.KzgCommitments) > 0, true)
for i := range bundle.KzgCommitments {
require.Equal(t, len(bundle.KzgCommitments[i]) == 48, true)
}
require.Equal(t, len(bundle.Proofs) > 0, true)
for i := range bundle.Proofs {
require.Equal(t, len(bundle.Proofs[i]) == 48, true)
}
})
t.Run("unsupported version", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
@@ -324,7 +277,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
}
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
require.NoError(t, err)
ep, _, err := c.SubmitBlindedBlock(ctx, sbbb, nil)
ep, err := c.SubmitBlindedBlock(ctx, sbbb)
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"), ep.ParentHash()))
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
@@ -350,7 +303,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
}
sbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockCapella(t))
require.NoError(t, err)
ep, _, err := c.SubmitBlindedBlock(ctx, sbb, nil)
ep, err := c.SubmitBlindedBlock(ctx, sbb)
require.NoError(t, err)
withdrawals, err := ep.Withdrawals()
require.NoError(t, err)
@@ -360,40 +313,6 @@ func TestSubmitBlindedBlock(t *testing.T) {
assert.DeepEqual(t, ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943"), withdrawals[0].Address)
assert.Equal(t, uint64(1), withdrawals[0].Amount)
})
t.Run("deneb", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "deneb", r.Header.Get("Eth-Consensus-Version"))
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadDeneb)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
test := testSignedBlindedBeaconBlockAndBlobsDeneb(t)
sbb, err := blocks.NewSignedBeaconBlock(test.Block)
require.NoError(t, err)
ep, blobBundle, err := c.SubmitBlindedBlock(ctx, sbb, test.Blobs)
require.NoError(t, err)
withdrawals, err := ep.Withdrawals()
require.NoError(t, err)
require.Equal(t, 1, len(withdrawals))
assert.Equal(t, uint64(1), withdrawals[0].Index)
assert.Equal(t, types.ValidatorIndex(1), withdrawals[0].ValidatorIndex)
assert.DeepEqual(t, ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943"), withdrawals[0].Address)
assert.Equal(t, uint64(1), withdrawals[0].Amount)
require.NotNil(t, blobBundle)
require.Equal(t, hexutil.Encode(blobBundle.Blobs[0]), hexutil.Encode(make([]byte, fieldparams.BlobLength)))
require.Equal(t, hexutil.Encode(blobBundle.KzgCommitments[0]), "0x8dab030c51e16e84be9caab84ee3d0b8bbec1db4a0e4de76439da8424d9b957370a10a78851f97e4b54d2ce1ab0d686f")
require.Equal(t, hexutil.Encode(blobBundle.Proofs[0]), "0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a")
})
t.Run("mismatched versions, expected bellatrix got capella", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
@@ -411,13 +330,13 @@ func TestSubmitBlindedBlock(t *testing.T) {
}
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
require.NoError(t, err)
_, _, err = c.SubmitBlindedBlock(ctx, sbbb, nil)
_, err = c.SubmitBlindedBlock(ctx, sbbb)
require.ErrorContains(t, "not a bellatrix payload", err)
})
t.Run("not blinded", func(t *testing.T) {
sbb, err := blocks.NewSignedBeaconBlock(&eth.SignedBeaconBlockBellatrix{Block: &eth.BeaconBlockBellatrix{Body: &eth.BeaconBlockBodyBellatrix{}}})
require.NoError(t, err)
_, _, err = (&Client{}).SubmitBlindedBlock(ctx, sbb, nil)
_, err = (&Client{}).SubmitBlindedBlock(ctx, sbb)
require.ErrorIs(t, err, errNotBlinded)
})
}
@@ -707,168 +626,6 @@ func testSignedBlindedBeaconBlockCapella(t *testing.T) *eth.SignedBlindedBeaconB
}
}
func testSignedBlindedBeaconBlockAndBlobsDeneb(t *testing.T) *eth.SignedBlindedBeaconBlockAndBlobsDeneb {
return &eth.SignedBlindedBeaconBlockAndBlobsDeneb{
Block: &eth.SignedBlindedBeaconBlockDeneb{
Block: &eth.BlindedBeaconBlockDeneb{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Body: &eth.BlindedBeaconBlockBodyDeneb{
RandaoReveal: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
Eth1Data: &eth.Eth1Data{
DepositRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
DepositCount: 1,
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Graffiti: ezDecode(t, "0xdeadbeefc0ffee"),
ProposerSlashings: []*eth.ProposerSlashing{
{
Header_1: &eth.SignedBeaconBlockHeader{
Header: &eth.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
Header_2: &eth.SignedBeaconBlockHeader{
Header: &eth.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
},
AttesterSlashings: []*eth.AttesterSlashing{
{
Attestation_1: &eth.IndexedAttestation{
AttestingIndices: []uint64{1},
Data: &eth.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Source: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Target: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
Attestation_2: &eth.IndexedAttestation{
AttestingIndices: []uint64{1},
Data: &eth.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Source: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Target: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
},
Attestations: []*eth.Attestation{
{
AggregationBits: bitfield.Bitlist{0x01},
Data: &eth.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Source: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Target: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
Deposits: []*eth.Deposit{
{
Proof: [][]byte{ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")},
Data: &eth.Deposit_Data{
PublicKey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
WithdrawalCredentials: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Amount: 1,
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
},
VoluntaryExits: []*eth.SignedVoluntaryExit{
{
Exit: &eth.VoluntaryExit{
Epoch: 1,
ValidatorIndex: 1,
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
SyncAggregate: &eth.SyncAggregate{
SyncCommitteeSignature: make([]byte, 48),
SyncCommitteeBits: bitfield.Bitvector512{0x01},
},
ExecutionPayloadHeader: &v1.ExecutionPayloadHeaderDeneb{
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
ReceiptsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
LogsBloom: ezDecode(t, "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
PrevRandao: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BlockNumber: 1,
GasLimit: 1,
GasUsed: 1,
Timestamp: 1,
ExtraData: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BaseFeePerGas: []byte(strconv.FormatUint(1, 10)),
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
WithdrawalsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
DataGasUsed: 1,
ExcessDataGas: 1,
},
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
Blobs: []*eth.SignedBlindedBlobSidecar{
{
Message: &eth.BlindedBlobSidecar{
BlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Index: 0,
Slot: 1,
BlockParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
ProposerIndex: 1,
BlobRoot: ezDecode(t, "0x24564723180fcb3d994104538d351c8dcbde12d541676bb736cf678018ca4739"),
KzgCommitment: ezDecode(t, "0x8dab030c51e16e84be9caab84ee3d0b8bbec1db4a0e4de76439da8424d9b957370a10a78851f97e4b54d2ce1ab0d686f"),
KzgProof: ezDecode(t, "0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a"),
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
}
}
func TestRequestLogger(t *testing.T) {
wo := WithObserver(&requestLogger{})
c, err := NewClient("localhost:3500", wo)

View File

@@ -10,7 +10,6 @@ go_library(
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
],
)

View File

@@ -7,7 +7,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
@@ -41,8 +40,8 @@ func (m MockClient) RegisterValidator(_ context.Context, svr []*ethpb.SignedVali
}
// SubmitBlindedBlock --
func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock, _ []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
return nil, nil, nil
func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error) {
return nil, nil
}
// Status --

View File

@@ -8,7 +8,6 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
@@ -1108,295 +1107,6 @@ func (b *BlindedBeaconBlockBodyCapella) MarshalJSON() ([]byte, error) {
})
}
// ExecHeaderResponseDeneb is the header response for builder API /eth/v1/builder/header/{slot}/{parent_hash}/{pubkey}.
type ExecHeaderResponseDeneb struct {
Data struct {
Signature hexutil.Bytes `json:"signature"`
Message *BuilderBidDeneb `json:"message"`
} `json:"data"`
}
// ToProto creates a SignedBuilderBidDeneb Proto from ExecHeaderResponseDeneb.
func (ehr *ExecHeaderResponseDeneb) ToProto() (*eth.SignedBuilderBidDeneb, error) {
bb, err := ehr.Data.Message.ToProto()
if err != nil {
return nil, err
}
return &eth.SignedBuilderBidDeneb{
Message: bb,
Signature: bytesutil.SafeCopyBytes(ehr.Data.Signature),
}, nil
}
// ToProto creates a BuilderBidDeneb Proto from BuilderBidDeneb.
func (bb *BuilderBidDeneb) ToProto() (*eth.BuilderBidDeneb, error) {
header, err := bb.Header.ToProto()
if err != nil {
return nil, err
}
bundle, err := bb.BlindedBlobsBundle.ToProto()
if err != nil {
return nil, err
}
return &eth.BuilderBidDeneb{
Header: header,
BlindedBlobsBundle: bundle,
Value: bytesutil.SafeCopyBytes(bb.Value.SSZBytes()),
Pubkey: bytesutil.SafeCopyBytes(bb.Pubkey),
}, nil
}
// BuilderBidDeneb is a field of ExecHeaderResponseDeneb.
type BuilderBidDeneb struct {
Header *ExecutionPayloadHeaderDeneb `json:"header"`
BlindedBlobsBundle *BlindedBlobsBundle `json:"blinded_blobs_bundle"`
Value Uint256 `json:"value"`
Pubkey hexutil.Bytes `json:"pubkey"`
}
// BlindedBlobsBundle is a field of BuilderBidDeneb and represents the blinded blobs of the associated header.
type BlindedBlobsBundle struct {
KzgCommitments []hexutil.Bytes `json:"commitments"`
Proofs []hexutil.Bytes `json:"proofs"`
BlobRoots []hexutil.Bytes `json:"blob_roots"`
}
// ToProto creates a BlindedBlobsBundle Proto from BlindedBlobsBundle.
func (r *BlindedBlobsBundle) ToProto() (*v1.BlindedBlobsBundle, error) {
kzg := make([][]byte, len(r.KzgCommitments))
for i := range kzg {
kzg[i] = bytesutil.SafeCopyBytes(r.KzgCommitments[i])
}
proofs := make([][]byte, len(r.Proofs))
for i := range proofs {
proofs[i] = bytesutil.SafeCopyBytes(r.Proofs[i])
}
blobRoots := make([][]byte, len(r.BlobRoots))
for i := range blobRoots {
blobRoots[i] = bytesutil.SafeCopyBytes(r.BlobRoots[i])
}
return &v1.BlindedBlobsBundle{
KzgCommitments: kzg,
Proofs: proofs,
BlobRoots: blobRoots,
}, nil
}
// ExecutionPayloadHeaderDeneb a field part of the BuilderBidDeneb.
type ExecutionPayloadHeaderDeneb struct {
ParentHash hexutil.Bytes `json:"parent_hash"`
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
StateRoot hexutil.Bytes `json:"state_root"`
ReceiptsRoot hexutil.Bytes `json:"receipts_root"`
LogsBloom hexutil.Bytes `json:"logs_bloom"`
PrevRandao hexutil.Bytes `json:"prev_randao"`
BlockNumber Uint64String `json:"block_number"`
GasLimit Uint64String `json:"gas_limit"`
GasUsed Uint64String `json:"gas_used"`
Timestamp Uint64String `json:"timestamp"`
ExtraData hexutil.Bytes `json:"extra_data"`
BaseFeePerGas Uint256 `json:"base_fee_per_gas"`
BlockHash hexutil.Bytes `json:"block_hash"`
TransactionsRoot hexutil.Bytes `json:"transactions_root"`
WithdrawalsRoot hexutil.Bytes `json:"withdrawals_root"`
DataGasUsed Uint64String `json:"data_gas_used"` // new in deneb
ExcessDataGas Uint64String `json:"excess_data_gas"` // new in deneb
*v1.ExecutionPayloadHeaderDeneb
}
// MarshalJSON returns a JSON byte array representing the ExecutionPayloadHeaderDeneb struct.
func (h *ExecutionPayloadHeaderDeneb) MarshalJSON() ([]byte, error) {
type MarshalCaller ExecutionPayloadHeaderDeneb
baseFeePerGas, err := sszBytesToUint256(h.ExecutionPayloadHeaderDeneb.BaseFeePerGas)
if err != nil {
return []byte{}, errors.Wrapf(err, "invalid BaseFeePerGas")
}
return json.Marshal(&MarshalCaller{
ParentHash: h.ExecutionPayloadHeaderDeneb.ParentHash,
FeeRecipient: h.ExecutionPayloadHeaderDeneb.FeeRecipient,
StateRoot: h.ExecutionPayloadHeaderDeneb.StateRoot,
ReceiptsRoot: h.ExecutionPayloadHeaderDeneb.ReceiptsRoot,
LogsBloom: h.ExecutionPayloadHeaderDeneb.LogsBloom,
PrevRandao: h.ExecutionPayloadHeaderDeneb.PrevRandao,
BlockNumber: Uint64String(h.ExecutionPayloadHeaderDeneb.BlockNumber),
GasLimit: Uint64String(h.ExecutionPayloadHeaderDeneb.GasLimit),
GasUsed: Uint64String(h.ExecutionPayloadHeaderDeneb.GasUsed),
Timestamp: Uint64String(h.ExecutionPayloadHeaderDeneb.Timestamp),
ExtraData: h.ExecutionPayloadHeaderDeneb.ExtraData,
BaseFeePerGas: baseFeePerGas,
BlockHash: h.ExecutionPayloadHeaderDeneb.BlockHash,
TransactionsRoot: h.ExecutionPayloadHeaderDeneb.TransactionsRoot,
WithdrawalsRoot: h.ExecutionPayloadHeaderDeneb.WithdrawalsRoot,
DataGasUsed: Uint64String(h.ExecutionPayloadHeaderDeneb.DataGasUsed),
ExcessDataGas: Uint64String(h.ExecutionPayloadHeaderDeneb.ExcessDataGas),
})
}
// UnmarshalJSON takes in a byte array and unmarshals the value into ExecutionPayloadHeaderDeneb.
func (h *ExecutionPayloadHeaderDeneb) UnmarshalJSON(b []byte) error {
type UnmarshalCaller ExecutionPayloadHeaderDeneb
uc := &UnmarshalCaller{}
if err := json.Unmarshal(b, uc); err != nil {
return err
}
ep := ExecutionPayloadHeaderDeneb(*uc)
*h = ep
var err error
h.ExecutionPayloadHeaderDeneb, err = h.ToProto()
return err
}
// ToProto returns a ExecutionPayloadHeaderDeneb Proto object.
func (h *ExecutionPayloadHeaderDeneb) ToProto() (*v1.ExecutionPayloadHeaderDeneb, error) {
return &v1.ExecutionPayloadHeaderDeneb{
ParentHash: bytesutil.SafeCopyBytes(h.ParentHash),
FeeRecipient: bytesutil.SafeCopyBytes(h.FeeRecipient),
StateRoot: bytesutil.SafeCopyBytes(h.StateRoot),
ReceiptsRoot: bytesutil.SafeCopyBytes(h.ReceiptsRoot),
LogsBloom: bytesutil.SafeCopyBytes(h.LogsBloom),
PrevRandao: bytesutil.SafeCopyBytes(h.PrevRandao),
BlockNumber: uint64(h.BlockNumber),
GasLimit: uint64(h.GasLimit),
GasUsed: uint64(h.GasUsed),
Timestamp: uint64(h.Timestamp),
ExtraData: bytesutil.SafeCopyBytes(h.ExtraData),
BaseFeePerGas: bytesutil.SafeCopyBytes(h.BaseFeePerGas.SSZBytes()),
BlockHash: bytesutil.SafeCopyBytes(h.BlockHash),
TransactionsRoot: bytesutil.SafeCopyBytes(h.TransactionsRoot),
WithdrawalsRoot: bytesutil.SafeCopyBytes(h.WithdrawalsRoot),
DataGasUsed: uint64(h.DataGasUsed),
ExcessDataGas: uint64(h.ExcessDataGas),
}, nil
}
// ExecPayloadResponseDeneb the response to the build API /eth/v1/builder/blinded_blocks that includes the version, execution payload object , and blobs bundle object.
type ExecPayloadResponseDeneb struct {
Version string `json:"version"`
Data *ExecutionPayloadDenebAndBlobsBundle `json:"data"`
}
// ExecutionPayloadDenebAndBlobsBundle the main field used in ExecPayloadResponseDeneb.
type ExecutionPayloadDenebAndBlobsBundle struct {
ExecutionPayload *ExecutionPayloadDeneb `json:"execution_payload"`
BlobsBundle *BlobsBundle `json:"blobs_bundle"`
}
// ExecutionPayloadDeneb is a field used in ExecutionPayloadDenebAndBlobsBundle.
type ExecutionPayloadDeneb struct {
ParentHash hexutil.Bytes `json:"parent_hash"`
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
StateRoot hexutil.Bytes `json:"state_root"`
ReceiptsRoot hexutil.Bytes `json:"receipts_root"`
LogsBloom hexutil.Bytes `json:"logs_bloom"`
PrevRandao hexutil.Bytes `json:"prev_randao"`
BlockNumber Uint64String `json:"block_number"`
GasLimit Uint64String `json:"gas_limit"`
GasUsed Uint64String `json:"gas_used"`
Timestamp Uint64String `json:"timestamp"`
ExtraData hexutil.Bytes `json:"extra_data"`
BaseFeePerGas Uint256 `json:"base_fee_per_gas"`
BlockHash hexutil.Bytes `json:"block_hash"`
Transactions []hexutil.Bytes `json:"transactions"`
Withdrawals []Withdrawal `json:"withdrawals"`
DataGasUsed Uint64String `json:"data_gas_used"` // new in deneb
ExcessDataGas Uint64String `json:"excess_data_gas"` // new in deneb
}
// BlobsBundle is a field in ExecutionPayloadDenebAndBlobsBundle.
type BlobsBundle struct {
Commitments []hexutil.Bytes `json:"commitments"`
Proofs []hexutil.Bytes `json:"proofs"`
Blobs []hexutil.Bytes `json:"blobs"`
}
// ToProto returns a BlobsBundle Proto.
func (b BlobsBundle) ToProto() (*v1.BlobsBundle, error) {
commitments := make([][]byte, len(b.Commitments))
for i := range b.Commitments {
if len(b.Commitments[i]) != fieldparams.BLSPubkeyLength {
return nil, fmt.Errorf("commitment length %d is not %d", len(b.Commitments[i]), fieldparams.BLSPubkeyLength)
}
commitments[i] = bytesutil.SafeCopyBytes(b.Commitments[i])
}
proofs := make([][]byte, len(b.Proofs))
for i := range b.Proofs {
if len(b.Proofs[i]) != fieldparams.BLSPubkeyLength {
return nil, fmt.Errorf("proof length %d is not %d", len(b.Proofs[i]), fieldparams.BLSPubkeyLength)
}
proofs[i] = bytesutil.SafeCopyBytes(b.Proofs[i])
}
if len(b.Blobs) > fieldparams.MaxBlobsPerBlock {
return nil, fmt.Errorf("blobs length %d is more than max %d", len(b.Blobs), fieldparams.MaxBlobsPerBlock)
}
blobs := make([][]byte, len(b.Blobs))
for i := range b.Blobs {
if len(b.Blobs[i]) != fieldparams.BlobLength {
return nil, fmt.Errorf("blob length %d is not %d", len(b.Blobs[i]), fieldparams.BlobLength)
}
blobs[i] = bytesutil.SafeCopyBytes(b.Blobs[i])
}
return &v1.BlobsBundle{
KzgCommitments: commitments,
Proofs: proofs,
Blobs: blobs,
}, nil
}
// ToProto returns ExecutionPayloadDeneb Proto and BlobsBundle Proto separately.
func (r *ExecPayloadResponseDeneb) ToProto() (*v1.ExecutionPayloadDeneb, *v1.BlobsBundle, error) {
if r.Data == nil {
return nil, nil, errors.New("data field in response is empty")
}
payload, err := r.Data.ExecutionPayload.ToProto()
if err != nil {
return nil, nil, err
}
bundle, err := r.Data.BlobsBundle.ToProto()
if err != nil {
return nil, nil, err
}
return payload, bundle, nil
}
// ToProto returns the ExecutionPayloadDeneb Proto.
func (p *ExecutionPayloadDeneb) ToProto() (*v1.ExecutionPayloadDeneb, error) {
txs := make([][]byte, len(p.Transactions))
for i := range p.Transactions {
txs[i] = bytesutil.SafeCopyBytes(p.Transactions[i])
}
withdrawals := make([]*v1.Withdrawal, len(p.Withdrawals))
for i, w := range p.Withdrawals {
withdrawals[i] = &v1.Withdrawal{
Index: w.Index.Uint64(),
ValidatorIndex: types.ValidatorIndex(w.ValidatorIndex.Uint64()),
Address: bytesutil.SafeCopyBytes(w.Address),
Amount: w.Amount.Uint64(),
}
}
return &v1.ExecutionPayloadDeneb{
ParentHash: bytesutil.SafeCopyBytes(p.ParentHash),
FeeRecipient: bytesutil.SafeCopyBytes(p.FeeRecipient),
StateRoot: bytesutil.SafeCopyBytes(p.StateRoot),
ReceiptsRoot: bytesutil.SafeCopyBytes(p.ReceiptsRoot),
LogsBloom: bytesutil.SafeCopyBytes(p.LogsBloom),
PrevRandao: bytesutil.SafeCopyBytes(p.PrevRandao),
BlockNumber: uint64(p.BlockNumber),
GasLimit: uint64(p.GasLimit),
GasUsed: uint64(p.GasUsed),
Timestamp: uint64(p.Timestamp),
ExtraData: bytesutil.SafeCopyBytes(p.ExtraData),
BaseFeePerGas: bytesutil.SafeCopyBytes(p.BaseFeePerGas.SSZBytes()),
BlockHash: bytesutil.SafeCopyBytes(p.BlockHash),
Transactions: txs,
Withdrawals: withdrawals,
DataGasUsed: uint64(p.DataGasUsed),
ExcessDataGas: uint64(p.ExcessDataGas),
}, nil
}
// ErrorMessage is a JSON representation of the builder API's returned error message.
type ErrorMessage struct {
Code int `json:"code"`

View File

@@ -14,7 +14,6 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/golang/protobuf/proto"
"github.com/prysmaticlabs/go-bitfield"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
@@ -117,47 +116,6 @@ var testExampleHeaderResponseCapella = `{
}
}`
var testExampleHeaderResponseDeneb = `{
"version": "deneb",
"data": {
"message": {
"header": {
"parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"block_number": "1",
"gas_limit": "1",
"gas_used": "1",
"timestamp": "1",
"extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"base_fee_per_gas": "452312848583266388373324160190187140051835877600158453279131187530910662656",
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"transactions_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"withdrawals_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"data_gas_used": "1",
"excess_data_gas": "1"
},
"blinded_blobs_bundle": {
"commitments": [
"0x8dab030c51e16e84be9caab84ee3d0b8bbec1db4a0e4de76439da8424d9b957370a10a78851f97e4b54d2ce1ab0d686f"
],
"proofs": [
"0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a"
],
"blob_roots": [
"0x24564723180fcb3d994104538d351c8dcbde12d541676bb736cf678018ca4739"
]
},
"value": "652312848583266388373324160190187140051835877600158453279131187530910662656",
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
},
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
}
}`
var testExampleHeaderResponseUnknownVersion = `{
"version": "bad",
"data": {
@@ -560,51 +518,6 @@ var testExampleExecutionPayloadCapella = `{
}
}`
var testExampleExecutionPayloadDeneb = fmt.Sprintf(`{
"version": "deneb",
"data": {
"execution_payload":{
"parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"block_number": "1",
"gas_limit": "1",
"gas_used": "1",
"timestamp": "1",
"extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"base_fee_per_gas": "452312848583266388373324160190187140051835877600158453279131187530910662656",
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"transactions": [
"0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86"
],
"withdrawals": [
{
"index": "1",
"validator_index": "1",
"address": "0xcf8e0d4e9587369b2301d0790347320302cc0943",
"amount": "1"
}
],
"data_gas_used": "2",
"excess_data_gas": "3"
},
"blobs_bundle": {
"commitments": [
"0x8dab030c51e16e84be9caab84ee3d0b8bbec1db4a0e4de76439da8424d9b957370a10a78851f97e4b54d2ce1ab0d686f"
],
"proofs": [
"0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a"
],
"blobs": [
"%s"
]
}
}
}`, hexutil.Encode(make([]byte, fieldparams.BlobLength)))
func TestExecutionPayloadResponseUnmarshal(t *testing.T) {
epr := &ExecPayloadResponse{}
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayload), epr))
@@ -776,107 +689,6 @@ func TestExecutionPayloadResponseCapellaUnmarshal(t *testing.T) {
assert.Equal(t, uint64(1), w.Amount.Uint64())
}
func TestExecutionPayloadResponseDenebUnmarshal(t *testing.T) {
epr := &ExecPayloadResponseDeneb{}
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayloadDeneb), epr))
cases := []struct {
expected string
actual string
name string
}{
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(epr.Data.ExecutionPayload.ParentHash),
name: "ExecPayloadResponse.ExecutionPayload.ParentHash",
},
{
expected: "0xabcf8e0d4e9587369b2301d0790347320302cc09",
actual: hexutil.Encode(epr.Data.ExecutionPayload.FeeRecipient),
name: "ExecPayloadResponse.ExecutionPayload.FeeRecipient",
},
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(epr.Data.ExecutionPayload.StateRoot),
name: "ExecPayloadResponse.ExecutionPayload.StateRoot",
},
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(epr.Data.ExecutionPayload.ReceiptsRoot),
name: "ExecPayloadResponse.ExecutionPayload.ReceiptsRoot",
},
{
expected: "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
actual: hexutil.Encode(epr.Data.ExecutionPayload.LogsBloom),
name: "ExecPayloadResponse.ExecutionPayload.LogsBloom",
},
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(epr.Data.ExecutionPayload.PrevRandao),
name: "ExecPayloadResponse.ExecutionPayload.PrevRandao",
},
{
expected: "1",
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.BlockNumber),
name: "ExecPayloadResponse.ExecutionPayload.BlockNumber",
},
{
expected: "1",
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.GasLimit),
name: "ExecPayloadResponse.ExecutionPayload.GasLimit",
},
{
expected: "1",
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.GasUsed),
name: "ExecPayloadResponse.ExecutionPayload.GasUsed",
},
{
expected: "1",
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.Timestamp),
name: "ExecPayloadResponse.ExecutionPayload.Timestamp",
},
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(epr.Data.ExecutionPayload.ExtraData),
name: "ExecPayloadResponse.ExecutionPayload.ExtraData",
},
{
expected: "452312848583266388373324160190187140051835877600158453279131187530910662656",
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.BaseFeePerGas),
name: "ExecPayloadResponse.ExecutionPayload.BaseFeePerGas",
},
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(epr.Data.ExecutionPayload.BlockHash),
name: "ExecPayloadResponse.ExecutionPayload.BlockHash",
},
{
expected: "2",
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.DataGasUsed),
name: "ExecPayloadResponse.ExecutionPayload.DataGasUsed",
},
{
expected: "3",
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.ExcessDataGas),
name: "ExecPayloadResponse.ExecutionPayload.ExcessDataGas",
},
}
for _, c := range cases {
require.Equal(t, c.expected, c.actual, fmt.Sprintf("unexpected value for field %s", c.name))
}
require.Equal(t, 1, len(epr.Data.ExecutionPayload.Transactions))
txHash := "0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86"
require.Equal(t, txHash, hexutil.Encode(epr.Data.ExecutionPayload.Transactions[0]))
require.Equal(t, 1, len(epr.Data.ExecutionPayload.Withdrawals))
w := epr.Data.ExecutionPayload.Withdrawals[0]
assert.Equal(t, uint64(1), w.Index.Uint64())
assert.Equal(t, uint64(1), w.ValidatorIndex.Uint64())
assert.DeepEqual(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943", w.Address.String())
assert.Equal(t, uint64(1), w.Amount.Uint64())
assert.Equal(t, uint64(2), uint64(epr.Data.ExecutionPayload.DataGasUsed))
assert.Equal(t, uint64(3), uint64(epr.Data.ExecutionPayload.ExcessDataGas))
}
func TestExecutionPayloadResponseToProto(t *testing.T) {
hr := &ExecPayloadResponse{}
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayload), hr))
@@ -984,85 +796,6 @@ func TestExecutionPayloadResponseCapellaToProto(t *testing.T) {
}
func TestExecutionPayloadResponseDenebToProto(t *testing.T) {
hr := &ExecPayloadResponseDeneb{}
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayloadDeneb), hr))
p, blobsBundle, err := hr.ToProto()
require.NoError(t, err)
parentHash, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
feeRecipient, err := hexutil.Decode("0xabcf8e0d4e9587369b2301d0790347320302cc09")
require.NoError(t, err)
stateRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
receiptsRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
logsBloom, err := hexutil.Decode("0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
require.NoError(t, err)
prevRandao, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
extraData, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
blockHash, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
tx, err := hexutil.Decode("0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86")
require.NoError(t, err)
txList := [][]byte{tx}
address, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943")
require.NoError(t, err)
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
expected := &v1.ExecutionPayloadDeneb{
ParentHash: parentHash,
FeeRecipient: feeRecipient,
StateRoot: stateRoot,
ReceiptsRoot: receiptsRoot,
LogsBloom: logsBloom,
PrevRandao: prevRandao,
BlockNumber: 1,
GasLimit: 1,
GasUsed: 1,
Timestamp: 1,
ExtraData: extraData,
BaseFeePerGas: bfpg.SSZBytes(),
BlockHash: blockHash,
Transactions: txList,
Withdrawals: []*v1.Withdrawal{
{
Index: 1,
ValidatorIndex: 1,
Address: address,
Amount: 1,
},
},
DataGasUsed: 2,
ExcessDataGas: 3,
}
require.DeepEqual(t, expected, p)
commitment, err := hexutil.Decode("0x8dab030c51e16e84be9caab84ee3d0b8bbec1db4a0e4de76439da8424d9b957370a10a78851f97e4b54d2ce1ab0d686f")
require.NoError(t, err)
proof, err := hexutil.Decode("0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a")
require.NoError(t, err)
expectedBlobs := &v1.BlobsBundle{
KzgCommitments: [][]byte{
commitment,
},
Proofs: [][]byte{
proof,
},
Blobs: [][]byte{
make([]byte, fieldparams.BlobLength),
},
}
require.DeepEqual(t, blobsBundle, expectedBlobs)
}
func pbEth1Data() *eth.Eth1Data {
return &eth.Eth1Data{
DepositRoot: make([]byte, 32),
@@ -1293,30 +1026,6 @@ func pbExecutionPayloadHeaderCapella(t *testing.T) *v1.ExecutionPayloadHeaderCap
}
}
func pbExecutionPayloadHeaderDeneb(t *testing.T) *v1.ExecutionPayloadHeaderDeneb {
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
return &v1.ExecutionPayloadHeaderDeneb{
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
ReceiptsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
LogsBloom: ezDecode(t, "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
PrevRandao: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BlockNumber: 1,
GasLimit: 1,
GasUsed: 1,
Timestamp: 1,
ExtraData: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BaseFeePerGas: bfpg.SSZBytes(),
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
WithdrawalsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
DataGasUsed: 1,
ExcessDataGas: 1,
}
}
func TestExecutionPayloadHeader_MarshalJSON(t *testing.T) {
h := &ExecutionPayloadHeader{
ExecutionPayloadHeader: pbExecutionPayloadHeader(t),
@@ -1337,16 +1046,6 @@ func TestExecutionPayloadHeaderCapella_MarshalJSON(t *testing.T) {
require.Equal(t, expected, string(b))
}
func TestExecutionPayloadHeaderDeneb_MarshalJSON(t *testing.T) {
h := &ExecutionPayloadHeaderDeneb{
ExecutionPayloadHeaderDeneb: pbExecutionPayloadHeaderDeneb(t),
}
b, err := json.Marshal(h)
require.NoError(t, err)
expected := `{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"452312848583266388373324160190187140051835877600158453279131187530910662656","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","withdrawals_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","data_gas_used":"1","excess_data_gas":"1"}`
require.Equal(t, expected, string(b))
}
var testBuilderBid = `{
"version":"bellatrix",
"data":{

View File

@@ -340,13 +340,7 @@ func (s *Service) IsOptimistic(_ context.Context) (bool, error) {
}
s.headLock.RLock()
headRoot := s.head.root
headSlot := s.head.slot
headOptimistic := s.head.optimistic
s.headLock.RUnlock()
// we trust the head package for recent head slots, otherwise fallback to forkchoice
if headSlot+2 >= s.CurrentSlot() {
return headOptimistic, nil
}
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()

View File

@@ -422,12 +422,6 @@ func TestService_IsOptimistic(t *testing.T) {
opt, err := c.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, primitives.Slot(0), c.CurrentSlot())
require.Equal(t, false, opt)
c.SetGenesisTime(time.Now().Add(-time.Second * time.Duration(4*params.BeaconConfig().SecondsPerSlot)))
opt, err = c.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, true, opt)
}

View File

@@ -207,18 +207,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
if err != nil {
return false, errors.Wrap(invalidBlock{error: err}, "could not get execution payload")
}
var lastValidHash []byte
if blk.Version() >= version.Deneb {
_, err = blk.Block().Body().BlobKzgCommitments()
if err != nil {
return false, errors.Wrap(invalidBlock{error: err}, "could not get blob kzg commitments")
}
// TODO: Convert kzg commitment to version hashes and feed to below
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, [][32]byte{})
} else {
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, [][32]byte{} /*empty version hashes before Deneb*/)
}
lastValidHash, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
switch err {
case nil:
newPayloadValidNodeCount.Inc()
@@ -316,7 +305,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
var attr payloadattribute.Attributer
switch st.Version() {
case version.Capella, version.Deneb:
case version.Capella:
withdrawals, err := st.ExpectedWithdrawals()
if err != nil {
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")

View File

@@ -827,42 +827,6 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
require.Equal(t, 0, len(a))
}
func Test_GetPayloadAttributeDeneb(t *testing.T) {
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
ctx := tr.ctx
st, _ := util.DeterministicGenesisStateDeneb(t, 1)
hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0, []byte{})
require.Equal(t, false, hasPayload)
require.Equal(t, primitives.ValidatorIndex(0), vId)
// Cache hit, advance state, no fee recipient
suggestedVid := primitives.ValidatorIndex(1)
slot := primitives.Slot(1)
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
hook := logTest.NewGlobal()
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, true, hasPayload)
require.Equal(t, suggestedVid, vId)
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
a, err := attr.Withdrawals()
require.NoError(t, err)
require.Equal(t, 0, len(a))
// Cache hit, advance state, has fee recipient
suggestedAddr := common.HexToAddress("123")
require.NoError(t, service.cfg.BeaconDB.SaveFeeRecipientsByValidatorIDs(ctx, []primitives.ValidatorIndex{suggestedVid}, []common.Address{suggestedAddr}))
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
hasPayload, attr, vId = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, true, hasPayload)
require.Equal(t, suggestedVid, vId)
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
a, err = attr.Withdrawals()
require.NoError(t, err)
require.Equal(t, 0, len(a))
}
func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MainnetConfig())

View File

@@ -47,11 +47,9 @@ func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
// This defines the current chain service's view of head.
type head struct {
root [32]byte // current head root.
block interfaces.ReadOnlySignedBeaconBlock // current head block.
state state.BeaconState // current head state.
slot primitives.Slot // the head block slot number
optimistic bool // optimistic status when saved head
root [32]byte // current head root.
block interfaces.ReadOnlySignedBeaconBlock // current head block.
state state.BeaconState // current head state.
}
// This saves head info to the local service cache, it also saves the
@@ -96,10 +94,6 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
return errors.Wrap(err, "could not get old head root")
}
oldHeadRoot := bytesutil.ToBytes32(r)
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(newHeadRoot)
if err != nil {
log.WithError(err).Error("could not check if node is optimistically synced")
}
if headBlock.Block().ParentRoot() != oldHeadRoot {
// A chain re-org occurred, so we fire an event notifying the rest of the services.
commonRoot, forkSlot, err := s.cfg.ForkChoiceStore.CommonAncestor(ctx, oldHeadRoot, newHeadRoot)
@@ -131,6 +125,10 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
reorgDistance.Observe(float64(dis))
reorgDepth.Observe(float64(dep))
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(newHeadRoot)
if err != nil {
return errors.Wrap(err, "could not check if node is optimistically synced")
}
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.Reorg,
Data: &ethpbv1.EventChainReorg{
@@ -152,14 +150,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
}
// Cache the new head info.
newHead := &head{
root: newHeadRoot,
block: headBlock,
state: headState,
optimistic: isOptimistic,
slot: headBlock.Block().Slot(),
}
if err := s.setHead(newHead); err != nil {
if err := s.setHead(newHeadRoot, headBlock, headState); err != nil {
return errors.Wrap(err, "could not set head")
}
@@ -204,22 +195,20 @@ func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.ReadOnlySignedB
return nil
}
// This sets head view object which is used to track the head slot, root, block, state and optimistic status
func (s *Service) setHead(newHead *head) error {
// This sets head view object which is used to track the head slot, root, block and state.
func (s *Service) setHead(root [32]byte, block interfaces.ReadOnlySignedBeaconBlock, state state.BeaconState) error {
s.headLock.Lock()
defer s.headLock.Unlock()
// This does a full copy of the block and state.
bCp, err := newHead.block.Copy()
bCp, err := block.Copy()
if err != nil {
return err
}
s.head = &head{
root: newHead.root,
block: bCp,
state: newHead.state.Copy(),
optimistic: newHead.optimistic,
slot: newHead.slot,
root: root,
block: bCp,
state: state.Copy(),
}
return nil
}

View File

@@ -145,7 +145,8 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
return err
}
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, postState, blockRoot); err != nil {
if err := s.insertBlockToForkchoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
}
if err := s.handleBlockAttestations(ctx, signed.Block(), postState); err != nil {
@@ -185,6 +186,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
}
}()
}
justified := s.cfg.ForkChoiceStore.JustifiedCheckpoint()
start := time.Now()
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
@@ -255,6 +257,10 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
if err := s.updateFinalized(ctx, &ethpb.Checkpoint{Epoch: finalized.Epoch, Root: finalized.Root[:]}); err != nil {
return err
}
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(finalized.Root)
if err != nil {
return errors.Wrap(err, "could not check if node is optimistically synced")
}
go func() {
// Send an event regarding the new finalized checkpoint over a common event feed.
stateRoot := signed.Block().StateRoot()
@@ -264,7 +270,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
Epoch: postState.FinalizedCheckpoint().Epoch,
Block: postState.FinalizedCheckpoint().Root,
State: stateRoot[:],
ExecutionOptimistic: isValidPayload,
ExecutionOptimistic: isOptimistic,
},
})
@@ -522,6 +528,23 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
return nil
}
// This feeds in the block to fork choice store. It's allows fork choice store
// to gain information on the most current chain.
func (s *Service) insertBlockToForkchoiceStore(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, root [32]byte, st state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.insertBlockToForkchoiceStore")
defer span.End()
if !s.cfg.ForkChoiceStore.HasNode(blk.ParentRoot()) {
fCheckpoint := st.FinalizedCheckpoint()
jCheckpoint := st.CurrentJustifiedCheckpoint()
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil {
return err
}
}
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, root)
}
// This feeds in the attestations included in the block to fork choice store. It's allows fork choice store
// to gain information on the most current chain.
func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, st state.BeaconState) error {

View File

@@ -26,7 +26,6 @@ type BlockReceiver interface {
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error
ReceiveBlockBatch(ctx context.Context, blocks []interfaces.ReadOnlySignedBeaconBlock, blkRoots [][32]byte) error
HasBlock(ctx context.Context, root [32]byte) bool
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
}
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
@@ -151,11 +150,6 @@ func (s *Service) HasBlock(ctx context.Context, root [32]byte) bool {
return s.hasBlockInInitSyncOrDB(ctx, root)
}
// RecentBlockSlot returns block slot form fork choice store
func (s *Service) RecentBlockSlot(root [32]byte) (primitives.Slot, error) {
return s.cfg.ForkChoiceStore.Slot(root)
}
// ReceiveAttesterSlashing receives an attester slashing and inserts it to forkchoice
func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing *ethpb.AttesterSlashing) {
s.cfg.ForkChoiceStore.Lock()

View File

@@ -308,13 +308,7 @@ func (s *Service) initializeHeadFromDB(ctx context.Context) error {
if err != nil {
return errors.Wrap(err, "could not get finalized block")
}
if err := s.setHead(&head{
finalizedRoot,
finalizedBlock,
finalizedState,
finalizedBlock.Block().Slot(),
false,
}); err != nil {
if err := s.setHead(finalizedRoot, finalizedBlock, finalizedState); err != nil {
return errors.Wrap(err, "could not set head")
}
@@ -446,13 +440,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
}
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
if err := s.setHead(&head{
genesisBlkRoot,
genesisBlk,
genesisState,
genesisBlk.Block().Slot(),
false,
}); err != nil {
if err := s.setHead(genesisBlkRoot, genesisBlk, genesisState); err != nil {
log.WithError(err).Fatal("Could not set head")
}
return nil

View File

@@ -377,7 +377,9 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
require.NoError(t, err)
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, r))
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, s.insertBlockToForkchoiceStore(ctx, wsb.Block(), r, beaconState))
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
@@ -451,7 +453,9 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
bs := &ethpb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}}
beaconState, err := state_native.InitializeFromProtoPhase0(bs)
require.NoError(b, err)
require.NoError(b, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, r))
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(b, err)
require.NoError(b, s.insertBlockToForkchoiceStore(ctx, wsb.Block(), r, beaconState))
b.ResetTimer()
for i := 0; i < b.N; i++ {

View File

@@ -52,11 +52,6 @@ func (mb *mockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ ui
return nil
}
func (mb *mockBroadcaster) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.SignedBlobSidecar) error {
mb.broadcastCalled = true
return nil
}
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
}

View File

@@ -69,7 +69,6 @@ type ChainService struct {
OptimisticCheckRootReceived [32]byte
FinalizedRoots map[[32]byte]bool
OptimisticRoots map[[32]byte]bool
BlockSlot primitives.Slot
}
func (s *ChainService) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
@@ -390,11 +389,6 @@ func (s *ChainService) HasBlock(ctx context.Context, rt [32]byte) bool {
return s.InitSyncBlockRoots[rt]
}
// RecentBlockSlot mocks the same method in the chain service.
func (s *ChainService) RecentBlockSlot([32]byte) (primitives.Slot, error) {
return s.BlockSlot, nil
}
// HeadGenesisValidatorsRoot mocks HeadGenesisValidatorsRoot method in chain service.
func (_ *ChainService) HeadGenesisValidatorsRoot() [32]byte {
return [32]byte{}

View File

@@ -15,12 +15,10 @@ go_library(
"//beacon-chain/cache:go_default_library",
"//beacon-chain/db:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/fieldparams:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//monitoring/tracing:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",

View File

@@ -2,7 +2,6 @@ package builder
import (
"context"
"fmt"
"reflect"
"time"
@@ -11,12 +10,10 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
log "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
@@ -27,7 +24,7 @@ var ErrNoBuilder = errors.New("builder endpoint not configured")
// BlockBuilder defines the interface for interacting with the block builder
type BlockBuilder interface {
SubmitBlindedBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blobs []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error)
SubmitBlindedBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error)
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubKey [48]byte) (builder.SignedBid, error)
RegisterValidator(ctx context.Context, reg []*ethpb.SignedValidatorRegistrationV1) error
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
@@ -90,7 +87,7 @@ func (s *Service) Stop() error {
}
// SubmitBlindedBlock submits a blinded block to the builder relay network.
func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, blobs []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error) {
ctx, span := trace.StartSpan(ctx, "builder.SubmitBlindedBlock")
defer span.End()
start := time.Now()
@@ -98,13 +95,10 @@ func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlyS
submitBlindedBlockLatency.Observe(float64(time.Since(start).Milliseconds()))
}()
if s.c == nil {
return nil, nil, ErrNoBuilder
}
if uint64(len(blobs)) > fieldparams.MaxBlobsPerBlock {
return nil, nil, fmt.Errorf("blob count %d beyond max limit of %d", len(blobs), fieldparams.MaxBlobsPerBlock)
return nil, ErrNoBuilder
}
return s.c.SubmitBlindedBlock(ctx, b, blobs)
return s.c.SubmitBlindedBlock(ctx, b)
}
// GetHeader retrieves the header for a given slot and parent hash from the builder relay network.

View File

@@ -62,7 +62,7 @@ func Test_BuilderMethodsWithouClient(t *testing.T) {
_, err = s.GetHeader(context.Background(), 0, [32]byte{}, [48]byte{})
assert.ErrorContains(t, ErrNoBuilder.Error(), err)
_, _, err = s.SubmitBlindedBlock(context.Background(), nil, nil)
_, err = s.SubmitBlindedBlock(context.Background(), nil)
assert.ErrorContains(t, ErrNoBuilder.Error(), err)
err = s.RegisterValidator(context.Background(), nil)

View File

@@ -16,7 +16,6 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],

View File

@@ -13,7 +13,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
@@ -27,12 +26,9 @@ type MockBuilderService struct {
HasConfigured bool
Payload *v1.ExecutionPayload
PayloadCapella *v1.ExecutionPayloadCapella
PayloadDeneb *v1.ExecutionPayloadDeneb
BlobBundle *v1.BlobsBundle
ErrSubmitBlindedBlock error
Bid *ethpb.SignedBuilderBid
BidCapella *ethpb.SignedBuilderBidCapella
BidDeneb *ethpb.SignedBuilderBidDeneb
RegistrationCache *cache.RegistrationCache
ErrGetHeader error
ErrRegisterValidator error
@@ -45,36 +41,23 @@ func (s *MockBuilderService) Configured() bool {
}
// SubmitBlindedBlock for mocking.
func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.ReadOnlySignedBeaconBlock, _ []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
switch b.Version() {
case version.Bellatrix:
func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error) {
if s.Payload != nil {
w, err := blocks.WrappedExecutionPayload(s.Payload)
if err != nil {
return nil, nil, errors.Wrap(err, "could not wrap payload")
return nil, errors.Wrap(err, "could not wrap payload")
}
return w, nil, s.ErrSubmitBlindedBlock
case version.Capella:
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella, 0)
if err != nil {
return nil, nil, errors.Wrap(err, "could not wrap capella payload")
}
return w, nil, s.ErrSubmitBlindedBlock
case version.Deneb:
w, err := blocks.WrappedExecutionPayloadDeneb(s.PayloadDeneb, 0)
if err != nil {
return nil, nil, errors.Wrap(err, "could not wrap deneb payload")
}
return w, s.BlobBundle, s.ErrSubmitBlindedBlock
default:
return nil, nil, errors.New("unknown block version for mocking")
return w, s.ErrSubmitBlindedBlock
}
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella, 0)
if err != nil {
return nil, errors.Wrap(err, "could not wrap capella payload")
}
return w, s.ErrSubmitBlindedBlock
}
// GetHeader for mocking.
func (s *MockBuilderService) GetHeader(_ context.Context, slot primitives.Slot, _ [32]byte, _ [48]byte) (builder.SignedBid, error) {
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch || s.BidDeneb != nil {
return builder.WrappedSignedBuilderBidDeneb(s.BidDeneb)
}
if slots.ToEpoch(slot) >= params.BeaconConfig().CapellaForkEpoch || s.BidCapella != nil {
return builder.WrappedSignedBuilderBidCapella(s.BidCapella)
}

View File

@@ -13,15 +13,6 @@ import (
"go.opencensus.io/trace"
)
// AttDelta contains rewards and penalties for a single attestation.
type AttDelta struct {
HeadReward uint64
SourceReward uint64
SourcePenalty uint64
TargetReward uint64
TargetPenalty uint64
}
// InitializePrecomputeValidators precomputes individual validator for its attested balances and the total sum of validators attested balances of the epoch.
func InitializePrecomputeValidators(ctx context.Context, beaconState state.BeaconState) ([]*precompute.Validator, *precompute.Balance, error) {
ctx, span := trace.StartSpan(ctx, "altair.InitializePrecomputeValidators")
@@ -235,7 +226,7 @@ func ProcessRewardsAndPenaltiesPrecompute(
return beaconState, errors.New("validator registries not the same length as state's validator registries")
}
attDeltas, err := AttestationsDelta(beaconState, bal, vals)
attsRewards, attsPenalties, err := AttestationsDelta(beaconState, bal, vals)
if err != nil {
return nil, errors.Wrap(err, "could not get attestation delta")
}
@@ -246,12 +237,11 @@ func ProcessRewardsAndPenaltiesPrecompute(
// Compute the post balance of the validator after accounting for the
// attester and proposer rewards and penalties.
delta := attDeltas[i]
balances[i], err = helpers.IncreaseBalanceWithVal(balances[i], delta.HeadReward+delta.SourceReward+delta.TargetReward)
balances[i], err = helpers.IncreaseBalanceWithVal(balances[i], attsRewards[i])
if err != nil {
return nil, err
}
balances[i] = helpers.DecreaseBalanceWithVal(balances[i], delta.SourcePenalty+delta.TargetPenalty)
balances[i] = helpers.DecreaseBalanceWithVal(balances[i], attsPenalties[i])
vals[i].AfterEpochTransitionBalance = balances[i]
}
@@ -265,8 +255,10 @@ func ProcessRewardsAndPenaltiesPrecompute(
// AttestationsDelta computes and returns the rewards and penalties differences for individual validators based on the
// voting records.
func AttestationsDelta(beaconState state.BeaconState, bal *precompute.Balance, vals []*precompute.Validator) ([]*AttDelta, error) {
attDeltas := make([]*AttDelta, len(vals))
func AttestationsDelta(beaconState state.BeaconState, bal *precompute.Balance, vals []*precompute.Validator) (rewards, penalties []uint64, err error) {
numOfVals := beaconState.NumValidators()
rewards = make([]uint64, numOfVals)
penalties = make([]uint64, numOfVals)
cfg := params.BeaconConfig()
prevEpoch := time.PrevEpoch(beaconState)
@@ -280,29 +272,29 @@ func AttestationsDelta(beaconState state.BeaconState, bal *precompute.Balance, v
bias := cfg.InactivityScoreBias
inactivityPenaltyQuotient, err := beaconState.InactivityPenaltyQuotient()
if err != nil {
return nil, err
return nil, nil, err
}
inactivityDenominator := bias * inactivityPenaltyQuotient
for i, v := range vals {
attDeltas[i], err = attestationDelta(bal, v, baseRewardMultiplier, inactivityDenominator, leak)
rewards[i], penalties[i], err = attestationDelta(bal, v, baseRewardMultiplier, inactivityDenominator, leak)
if err != nil {
return nil, err
return nil, nil, err
}
}
return attDeltas, nil
return rewards, penalties, nil
}
func attestationDelta(
bal *precompute.Balance,
val *precompute.Validator,
baseRewardMultiplier, inactivityDenominator uint64,
inactivityLeak bool) (*AttDelta, error) {
inactivityLeak bool) (reward, penalty uint64, err error) {
eligible := val.IsActivePrevEpoch || (val.IsSlashed && !val.IsWithdrawableCurrentEpoch)
// Per spec `ActiveCurrentEpoch` can't be 0 to process attestation delta.
if !eligible || bal.ActiveCurrentEpoch == 0 {
return &AttDelta{}, nil
return 0, 0, nil
}
cfg := params.BeaconConfig()
@@ -315,32 +307,32 @@ func attestationDelta(
srcWeight := cfg.TimelySourceWeight
tgtWeight := cfg.TimelyTargetWeight
headWeight := cfg.TimelyHeadWeight
attDelta := &AttDelta{}
reward, penalty = uint64(0), uint64(0)
// Process source reward / penalty
if val.IsPrevEpochSourceAttester && !val.IsSlashed {
if !inactivityLeak {
n := baseReward * srcWeight * (bal.PrevEpochAttested / increment)
attDelta.SourceReward += n / (activeIncrement * weightDenominator)
reward += n / (activeIncrement * weightDenominator)
}
} else {
attDelta.SourcePenalty += baseReward * srcWeight / weightDenominator
penalty += baseReward * srcWeight / weightDenominator
}
// Process target reward / penalty
if val.IsPrevEpochTargetAttester && !val.IsSlashed {
if !inactivityLeak {
n := baseReward * tgtWeight * (bal.PrevEpochTargetAttested / increment)
attDelta.TargetReward += n / (activeIncrement * weightDenominator)
reward += n / (activeIncrement * weightDenominator)
}
} else {
attDelta.TargetPenalty += baseReward * tgtWeight / weightDenominator
penalty += baseReward * tgtWeight / weightDenominator
}
// Process head reward / penalty
if val.IsPrevEpochHeadAttester && !val.IsSlashed {
if !inactivityLeak {
n := baseReward * headWeight * (bal.PrevEpochHeadAttested / increment)
attDelta.HeadReward += n / (activeIncrement * weightDenominator)
reward += n / (activeIncrement * weightDenominator)
}
}
@@ -349,10 +341,10 @@ func attestationDelta(
if !val.IsPrevEpochTargetAttester || val.IsSlashed {
n, err := math.Mul64(effectiveBalance, val.InactivityScore)
if err != nil {
return &AttDelta{}, err
return 0, 0, err
}
attDelta.TargetPenalty += n / inactivityDenominator
penalty += n / inactivityDenominator
}
return attDelta, nil
return reward, penalty, nil
}

View File

@@ -213,16 +213,9 @@ func TestAttestationsDelta(t *testing.T) {
require.NoError(t, err)
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
require.NoError(t, err)
deltas, err := AttestationsDelta(s, balance, validators)
rewards, penalties, err := AttestationsDelta(s, balance, validators)
require.NoError(t, err)
rewards := make([]uint64, len(deltas))
penalties := make([]uint64, len(deltas))
for i, d := range deltas {
rewards[i] = d.HeadReward + d.SourceReward + d.TargetReward
penalties[i] = d.SourcePenalty + d.TargetPenalty
}
// Reward amount should increase as validator index increases due to setup.
for i := 1; i < len(rewards); i++ {
require.Equal(t, true, rewards[i] > rewards[i-1])
@@ -251,16 +244,9 @@ func TestAttestationsDeltaBellatrix(t *testing.T) {
require.NoError(t, err)
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
require.NoError(t, err)
deltas, err := AttestationsDelta(s, balance, validators)
rewards, penalties, err := AttestationsDelta(s, balance, validators)
require.NoError(t, err)
rewards := make([]uint64, len(deltas))
penalties := make([]uint64, len(deltas))
for i, d := range deltas {
rewards[i] = d.HeadReward + d.SourceReward + d.TargetReward
penalties[i] = d.SourcePenalty + d.TargetPenalty
}
// Reward amount should increase as validator index increases due to setup.
for i := 1; i < len(rewards); i++ {
require.Equal(t, true, rewards[i] > rewards[i-1])
@@ -299,15 +285,8 @@ func TestProcessRewardsAndPenaltiesPrecompute_Ok(t *testing.T) {
}
wanted := make([]uint64, s.NumValidators())
deltas, err := AttestationsDelta(s, balance, validators)
rewards, penalties, err := AttestationsDelta(s, balance, validators)
require.NoError(t, err)
rewards := make([]uint64, len(deltas))
penalties := make([]uint64, len(deltas))
for i, d := range deltas {
rewards[i] = d.HeadReward + d.SourceReward + d.TargetReward
penalties[i] = d.SourcePenalty + d.TargetPenalty
}
for i := range rewards {
wanted[i] += rewards[i]
}

View File

@@ -145,40 +145,6 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
},
Signature: params.BeaconConfig().EmptySignature[:],
})
case *ethpb.BeaconStateDeneb:
return blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockDeneb{
Block: &ethpb.BeaconBlockDeneb{
ParentRoot: params.BeaconConfig().ZeroHash[:],
StateRoot: root[:],
Body: &ethpb.BeaconBlockBodyDeneb{
RandaoReveal: make([]byte, 96),
Eth1Data: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
},
Graffiti: make([]byte, 32),
SyncAggregate: &ethpb.SyncAggregate{
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
},
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{ // Deneb difference.
ParentHash: make([]byte, 32),
FeeRecipient: make([]byte, 20),
StateRoot: make([]byte, 32),
ReceiptsRoot: make([]byte, 32),
LogsBloom: make([]byte, 256),
PrevRandao: make([]byte, 32),
BaseFeePerGas: make([]byte, 32),
BlockHash: make([]byte, 32),
Transactions: make([][]byte, 0),
Withdrawals: make([]*enginev1.Withdrawal, 0),
},
BlsToExecutionChanges: make([]*ethpb.SignedBLSToExecutionChange, 0),
BlobKzgCommitments: make([][]byte, 0),
},
},
Signature: params.BeaconConfig().EmptySignature[:],
})
default:
return nil, ErrUnrecognizedState
}

View File

@@ -1,30 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["upgrade.go"],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/deneb",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/params:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["upgrade_test.go"],
deps = [
":go_default_library",
"//beacon-chain/core/time:go_default_library",
"//config/params:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
],
)

View File

@@ -1,115 +0,0 @@
package deneb
import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v4/config/params"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
// UpgradeToDeneb updates inputs a generic state to return the version Deneb state.
func UpgradeToDeneb(state state.BeaconState) (state.BeaconState, error) {
epoch := time.CurrentEpoch(state)
currentSyncCommittee, err := state.CurrentSyncCommittee()
if err != nil {
return nil, err
}
nextSyncCommittee, err := state.NextSyncCommittee()
if err != nil {
return nil, err
}
prevEpochParticipation, err := state.PreviousEpochParticipation()
if err != nil {
return nil, err
}
currentEpochParticipation, err := state.CurrentEpochParticipation()
if err != nil {
return nil, err
}
inactivityScores, err := state.InactivityScores()
if err != nil {
return nil, err
}
payloadHeader, err := state.LatestExecutionPayloadHeader()
if err != nil {
return nil, err
}
txRoot, err := payloadHeader.TransactionsRoot()
if err != nil {
return nil, err
}
wdRoot, err := payloadHeader.WithdrawalsRoot()
if err != nil {
return nil, err
}
wi, err := state.NextWithdrawalIndex()
if err != nil {
return nil, err
}
vi, err := state.NextWithdrawalValidatorIndex()
if err != nil {
return nil, err
}
summaries, err := state.HistoricalSummaries()
if err != nil {
return nil, err
}
s := &ethpb.BeaconStateDeneb{
GenesisTime: state.GenesisTime(),
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
Slot: state.Slot(),
Fork: &ethpb.Fork{
PreviousVersion: state.Fork().CurrentVersion,
CurrentVersion: params.BeaconConfig().DenebForkVersion,
Epoch: epoch,
},
LatestBlockHeader: state.LatestBlockHeader(),
BlockRoots: state.BlockRoots(),
StateRoots: state.StateRoots(),
HistoricalRoots: [][]byte{},
Eth1Data: state.Eth1Data(),
Eth1DataVotes: state.Eth1DataVotes(),
Eth1DepositIndex: state.Eth1DepositIndex(),
Validators: state.Validators(),
Balances: state.Balances(),
RandaoMixes: state.RandaoMixes(),
Slashings: state.Slashings(),
PreviousEpochParticipation: prevEpochParticipation,
CurrentEpochParticipation: currentEpochParticipation,
JustificationBits: state.JustificationBits(),
PreviousJustifiedCheckpoint: state.PreviousJustifiedCheckpoint(),
CurrentJustifiedCheckpoint: state.CurrentJustifiedCheckpoint(),
FinalizedCheckpoint: state.FinalizedCheckpoint(),
InactivityScores: inactivityScores,
CurrentSyncCommittee: currentSyncCommittee,
NextSyncCommittee: nextSyncCommittee,
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: payloadHeader.ParentHash(),
FeeRecipient: payloadHeader.FeeRecipient(),
StateRoot: payloadHeader.StateRoot(),
ReceiptsRoot: payloadHeader.ReceiptsRoot(),
LogsBloom: payloadHeader.LogsBloom(),
PrevRandao: payloadHeader.PrevRandao(),
BlockNumber: payloadHeader.BlockNumber(),
GasLimit: payloadHeader.GasLimit(),
GasUsed: payloadHeader.GasUsed(),
Timestamp: payloadHeader.Timestamp(),
ExtraData: payloadHeader.ExtraData(),
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
BlockHash: payloadHeader.BlockHash(),
ExcessDataGas: 0,
DataGasUsed: 0,
TransactionsRoot: txRoot,
WithdrawalsRoot: wdRoot,
},
NextWithdrawalIndex: wi,
NextWithdrawalValidatorIndex: vi,
HistoricalSummaries: summaries,
}
return state_native.InitializeFromProtoUnsafeDeneb(s)
}

View File

@@ -1,94 +0,0 @@
package deneb_test
import (
"testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/deneb"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/config/params"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
)
func TestUpgradeToDeneb(t *testing.T) {
st, _ := util.DeterministicGenesisStateCapella(t, params.BeaconConfig().MaxValidatorsPerCommittee)
preForkState := st.Copy()
mSt, err := deneb.UpgradeToDeneb(st)
require.NoError(t, err)
require.Equal(t, preForkState.GenesisTime(), mSt.GenesisTime())
require.DeepSSZEqual(t, preForkState.GenesisValidatorsRoot(), mSt.GenesisValidatorsRoot())
require.Equal(t, preForkState.Slot(), mSt.Slot())
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), mSt.LatestBlockHeader())
require.DeepSSZEqual(t, preForkState.BlockRoots(), mSt.BlockRoots())
require.DeepSSZEqual(t, preForkState.StateRoots(), mSt.StateRoots())
require.DeepSSZEqual(t, preForkState.Eth1Data(), mSt.Eth1Data())
require.DeepSSZEqual(t, preForkState.Eth1DataVotes(), mSt.Eth1DataVotes())
require.DeepSSZEqual(t, preForkState.Eth1DepositIndex(), mSt.Eth1DepositIndex())
require.DeepSSZEqual(t, preForkState.Validators(), mSt.Validators())
require.DeepSSZEqual(t, preForkState.Balances(), mSt.Balances())
require.DeepSSZEqual(t, preForkState.RandaoMixes(), mSt.RandaoMixes())
require.DeepSSZEqual(t, preForkState.Slashings(), mSt.Slashings())
require.DeepSSZEqual(t, preForkState.JustificationBits(), mSt.JustificationBits())
require.DeepSSZEqual(t, preForkState.PreviousJustifiedCheckpoint(), mSt.PreviousJustifiedCheckpoint())
require.DeepSSZEqual(t, preForkState.CurrentJustifiedCheckpoint(), mSt.CurrentJustifiedCheckpoint())
require.DeepSSZEqual(t, preForkState.FinalizedCheckpoint(), mSt.FinalizedCheckpoint())
numValidators := mSt.NumValidators()
p, err := mSt.PreviousEpochParticipation()
require.NoError(t, err)
require.DeepSSZEqual(t, make([]byte, numValidators), p)
p, err = mSt.CurrentEpochParticipation()
require.NoError(t, err)
require.DeepSSZEqual(t, make([]byte, numValidators), p)
s, err := mSt.InactivityScores()
require.NoError(t, err)
require.DeepSSZEqual(t, make([]uint64, numValidators), s)
f := mSt.Fork()
require.DeepSSZEqual(t, &ethpb.Fork{
PreviousVersion: st.Fork().CurrentVersion,
CurrentVersion: params.BeaconConfig().DenebForkVersion,
Epoch: time.CurrentEpoch(st),
}, f)
csc, err := mSt.CurrentSyncCommittee()
require.NoError(t, err)
psc, err := preForkState.CurrentSyncCommittee()
require.NoError(t, err)
require.DeepSSZEqual(t, psc, csc)
nsc, err := mSt.NextSyncCommittee()
require.NoError(t, err)
psc, err = preForkState.NextSyncCommittee()
require.NoError(t, err)
require.DeepSSZEqual(t, psc, nsc)
header, err := mSt.LatestExecutionPayloadHeader()
require.NoError(t, err)
protoHeader, ok := header.Proto().(*enginev1.ExecutionPayloadHeaderDeneb)
require.Equal(t, true, ok)
prevHeader, err := preForkState.LatestExecutionPayloadHeader()
require.NoError(t, err)
txRoot, err := prevHeader.TransactionsRoot()
require.NoError(t, err)
wdRoot, err := prevHeader.WithdrawalsRoot()
require.NoError(t, err)
wanted := &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: prevHeader.ParentHash(),
FeeRecipient: prevHeader.FeeRecipient(),
StateRoot: prevHeader.StateRoot(),
ReceiptsRoot: prevHeader.ReceiptsRoot(),
LogsBloom: prevHeader.LogsBloom(),
PrevRandao: prevHeader.PrevRandao(),
BlockNumber: prevHeader.BlockNumber(),
GasLimit: prevHeader.GasLimit(),
GasUsed: prevHeader.GasUsed(),
Timestamp: prevHeader.Timestamp(),
BaseFeePerGas: prevHeader.BaseFeePerGas(),
BlockHash: prevHeader.BlockHash(),
TransactionsRoot: txRoot,
WithdrawalsRoot: wdRoot,
}
require.DeepEqual(t, wanted, protoHeader)
}

View File

@@ -81,15 +81,6 @@ func CanUpgradeToCapella(slot primitives.Slot) bool {
return epochStart && capellaEpoch
}
// CanUpgradeToDeneb returns true if the input `slot` can upgrade to Deneb.
// Spec code:
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == DENEB_FORK_EPOCH
func CanUpgradeToDeneb(slot primitives.Slot) bool {
epochStart := slots.IsEpochStart(slot)
DenebEpoch := slots.ToEpoch(slot) == params.BeaconConfig().DenebForkEpoch
return epochStart && DenebEpoch
}
// CanProcessEpoch checks the eligibility to process epoch.
// The epoch can be processed at the end of the last slot of every epoch.
//

View File

@@ -298,38 +298,3 @@ func TestCanUpgradeToCapella(t *testing.T) {
})
}
}
func TestCanUpgradeToDeneb(t *testing.T) {
params.SetupTestConfigCleanup(t)
bc := params.BeaconConfig()
bc.DenebForkEpoch = 5
params.OverrideBeaconConfig(bc)
tests := []struct {
name string
slot primitives.Slot
want bool
}{
{
name: "not epoch start",
slot: 1,
want: false,
},
{
name: "not deneb epoch",
slot: params.BeaconConfig().SlotsPerEpoch,
want: false,
},
{
name: "deneb epoch",
slot: primitives.Slot(params.BeaconConfig().DenebForkEpoch) * params.BeaconConfig().SlotsPerEpoch,
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := time.CanUpgradeToDeneb(tt.slot); got != tt.want {
t.Errorf("CanUpgradeToDeneb() = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -18,7 +18,6 @@ go_library(
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/capella:go_default_library",
"//beacon-chain/core/deneb:go_default_library",
"//beacon-chain/core/epoch:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/execution:go_default_library",

View File

@@ -12,7 +12,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/capella"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/deneb"
e "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/epoch"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/execution"
@@ -270,10 +269,28 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot primitives.
return nil, errors.Wrap(err, "failed to increment state slot")
}
state, err = UpgradeState(ctx, state)
if err != nil {
tracing.AnnotateError(span, err)
return nil, errors.Wrap(err, "failed to upgrade state")
if time.CanUpgradeToAltair(state.Slot()) {
state, err = altair.UpgradeToAltair(ctx, state)
if err != nil {
tracing.AnnotateError(span, err)
return nil, err
}
}
if time.CanUpgradeToBellatrix(state.Slot()) {
state, err = execution.UpgradeToBellatrix(state)
if err != nil {
tracing.AnnotateError(span, err)
return nil, err
}
}
if time.CanUpgradeToCapella(state.Slot()) {
state, err = capella.UpgradeToCapella(state)
if err != nil {
tracing.AnnotateError(span, err)
return nil, err
}
}
}
@@ -284,45 +301,6 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot primitives.
return state, nil
}
// UpgradeState upgrades the state to the next version if possible.
func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "core.state.UpgradeState")
defer span.End()
var err error
if time.CanUpgradeToAltair(state.Slot()) {
state, err = altair.UpgradeToAltair(ctx, state)
if err != nil {
tracing.AnnotateError(span, err)
return nil, err
}
}
if time.CanUpgradeToBellatrix(state.Slot()) {
state, err = execution.UpgradeToBellatrix(state)
if err != nil {
tracing.AnnotateError(span, err)
return nil, err
}
}
if time.CanUpgradeToCapella(state.Slot()) {
state, err = capella.UpgradeToCapella(state)
if err != nil {
tracing.AnnotateError(span, err)
return nil, err
}
}
if time.CanUpgradeToDeneb(state.Slot()) {
state, err = deneb.UpgradeToDeneb(state)
if err != nil {
tracing.AnnotateError(span, err)
return nil, err
}
}
return state, nil
}
// VerifyOperationLengths verifies that block operation lengths are valid.
func VerifyOperationLengths(_ context.Context, state state.BeaconState, b interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
if err := blocks.BeaconBlockIsNil(b); err != nil {

View File

@@ -256,7 +256,7 @@ func ProcessOperationsNoVerifyAttsSigs(
if err != nil {
return nil, err
}
case version.Altair, version.Bellatrix, version.Capella, version.Deneb:
case version.Altair, version.Bellatrix, version.Capella:
state, err = altairOperations(ctx, state, signedBeaconBlock)
if err != nil {
return nil, err

View File

@@ -629,20 +629,6 @@ func TestProcessSlots_ThroughBellatrixEpoch(t *testing.T) {
require.Equal(t, params.BeaconConfig().SlotsPerEpoch*10, st.Slot())
}
func TestProcessSlots_ThroughDenebEpoch(t *testing.T) {
transition.SkipSlotCache.Disable()
params.SetupTestConfigCleanup(t)
conf := params.BeaconConfig()
conf.DenebForkEpoch = 5
params.OverrideBeaconConfig(conf)
st, _ := util.DeterministicGenesisStateCapella(t, params.BeaconConfig().MaxValidatorsPerCommittee)
st, err := transition.ProcessSlots(context.Background(), st, params.BeaconConfig().SlotsPerEpoch*10)
require.NoError(t, err)
require.Equal(t, version.Deneb, st.Version())
require.Equal(t, params.BeaconConfig().SlotsPerEpoch*10, st.Slot())
}
func TestProcessSlotsUsingNextSlotCache(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 1)
r := []byte{'a'}

View File

@@ -54,11 +54,6 @@ type ReadOnlyDatabase interface {
// Fee recipients operations.
FeeRecipientByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (common.Address, error)
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
// Blob operations.
BlobSidecarsByRoot(ctx context.Context, beaconBlockRoot [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error)
BlobSidecarsBySlot(ctx context.Context, slot primitives.Slot, indices ...uint64) ([]*ethpb.BlobSidecar, error)
// origin checkpoint sync support
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
@@ -94,10 +89,6 @@ type NoHeadAccessDatabase interface {
SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, addrs []common.Address) error
SaveRegistrationsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, regs []*ethpb.ValidatorRegistrationV1) error
// Blob operations.
SaveBlobSidecar(ctx context.Context, sidecars []*ethpb.BlobSidecar) error
DeleteBlobSidecar(ctx context.Context, beaconBlockRoot [32]byte) error
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
}

View File

@@ -5,7 +5,6 @@ go_library(
srcs = [
"archived_point.go",
"backup.go",
"blob.go",
"blocks.go",
"checkpoint.go",
"deposit_contract.go",
@@ -39,7 +38,6 @@ go_library(
"//beacon-chain/state/genesis:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
@@ -76,7 +74,6 @@ go_test(
srcs = [
"archived_point_test.go",
"backup_test.go",
"blob_test.go",
"blocks_test.go",
"checkpoint_test.go",
"deposit_contract_test.go",
@@ -113,7 +110,6 @@ go_test(
"//proto/prysm/v1alpha1:go_default_library",
"//proto/testing:go_default_library",
"//testing/assert:go_default_library",
"//testing/assertions:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",

View File

@@ -1,253 +0,0 @@
package kv
import (
"bytes"
"context"
"fmt"
"sort"
"github.com/pkg/errors"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)
// SaveBlobSidecar saves the blobs for a given epoch in the sidecar bucket. When we receive a blob:
//
// 1. Convert slot using a modulo operator to [0, maxSlots] where maxSlots = MAX_BLOB_EPOCHS*SLOTS_PER_EPOCH
//
// 2. Compute key for blob as bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
//
// 3. Begin the save algorithm: If the incoming blob has a slot bigger than the saved slot at the spot
// in the rotating keys buffer, we overwrite all elements for that slot.
func (s *Store) SaveBlobSidecar(ctx context.Context, scs []*ethpb.BlobSidecar) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlobSidecar")
defer span.End()
sortSideCars(scs)
if err := s.verifySideCars(scs); err != nil {
return err
}
slot := scs[0].Slot
return s.db.Update(func(tx *bolt.Tx) error {
encodedBlobSidecar, err := encode(ctx, &ethpb.BlobSidecars{Sidecars: scs})
if err != nil {
return err
}
bkt := tx.Bucket(blobsBucket)
c := bkt.Cursor()
newKey := blobSidecarKey(scs[0])
rotatingBufferPrefix := newKey[0:8]
var replacingKey []byte
for k, _ := c.Seek(rotatingBufferPrefix); bytes.HasPrefix(k, rotatingBufferPrefix); k, _ = c.Next() {
if len(k) != 0 {
replacingKey = k
oldSlotBytes := replacingKey[8:16]
oldSlot := bytesutil.BytesToSlotBigEndian(oldSlotBytes)
if oldSlot >= slot {
return fmt.Errorf("attempted to save blob with slot %d but already have older blob with slot %d", slot, oldSlot)
}
break
}
}
// If there is no element stored at blob.slot % MAX_SLOTS_TO_PERSIST_BLOBS, then we simply
// store the blob by key and exit early.
if len(replacingKey) != 0 {
if err := bkt.Delete(replacingKey); err != nil {
log.WithError(err).Warnf("Could not delete blob with key %#x", replacingKey)
}
}
return bkt.Put(newKey, encodedBlobSidecar)
})
}
// verifySideCars ensures that all sidecars have the same slot, parent root, block root, and proposer index.
// It also ensures that indices are sequential and start at 0 and no more than MAX_BLOB_EPOCHS.
func (s *Store) verifySideCars(scs []*ethpb.BlobSidecar) error {
if len(scs) == 0 {
return errors.New("nil or empty blob sidecars")
}
if uint64(len(scs)) > fieldparams.MaxBlobsPerBlock {
return fmt.Errorf("too many sidecars: %d > %d", len(scs), fieldparams.MaxBlobsPerBlock)
}
sl := scs[0].Slot
pr := scs[0].BlockParentRoot
r := scs[0].BlockRoot
p := scs[0].ProposerIndex
for i, sc := range scs {
if sc.Slot != sl {
return fmt.Errorf("sidecar slot mismatch: %d != %d", sc.Slot, sl)
}
if !bytes.Equal(sc.BlockParentRoot, pr) {
return fmt.Errorf("sidecar parent root mismatch: %x != %x", sc.BlockParentRoot, pr)
}
if !bytes.Equal(sc.BlockRoot, r) {
return fmt.Errorf("sidecar root mismatch: %x != %x", sc.BlockRoot, r)
}
if sc.ProposerIndex != p {
return fmt.Errorf("sidecar proposer index mismatch: %d != %d", sc.ProposerIndex, p)
}
if sc.Index != uint64(i) {
return fmt.Errorf("sidecar index mismatch: %d != %d", sc.Index, i)
}
}
return nil
}
// sortSideCars sorts the sidecars by their index.
func sortSideCars(scs []*ethpb.BlobSidecar) {
sort.Slice(scs, func(i, j int) bool {
return scs[i].Index < scs[j].Index
})
}
// BlobSidecarsByRoot retrieves the blobs for the given beacon block root.
// If the `indices` argument is omitted, all blobs for the root will be returned.
// Otherwise, the result will be filtered to only include the specified indices.
// An error will result if an invalid index is specified.
// The bucket size is bounded by 131072 entries. That's the most blobs a node will keep before rotating it out.
func (s *Store) BlobSidecarsByRoot(ctx context.Context, root [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobSidecarsByRoot")
defer span.End()
var enc []byte
if err := s.db.View(func(tx *bolt.Tx) error {
c := tx.Bucket(blobsBucket).Cursor()
// Bucket size is bounded and bolt cursors are fast. Moreover, a thin caching layer can be added.
for k, v := c.First(); k != nil; k, v = c.Next() {
if bytes.HasSuffix(k, root[:]) {
enc = v
break
}
}
return nil
}); err != nil {
return nil, err
}
if enc == nil {
return nil, ErrNotFound
}
sc := &ethpb.BlobSidecars{}
if err := decode(ctx, enc, sc); err != nil {
return nil, err
}
return filterForIndices(sc, indices...)
}
func filterForIndices(sc *ethpb.BlobSidecars, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
if len(indices) == 0 {
return sc.Sidecars, nil
}
// This loop assumes that the BlobSidecars value stores the complete set of blobs for a block
// in ascending order from eg 0..3, without gaps. This allows us to assume the indices argument
// maps 1:1 with indices in the BlobSidecars storage object.
maxIdx := uint64(len(sc.Sidecars)) - 1
sidecars := make([]*ethpb.BlobSidecar, len(indices))
for i, idx := range indices {
if idx > maxIdx {
return nil, errors.Wrapf(ErrNotFound, "BlobSidecars missing index: index %d", idx)
}
sidecars[i] = sc.Sidecars[idx]
}
return sidecars, nil
}
// BlobSidecarsBySlot retrieves BlobSidecars for the given slot.
// If the `indices` argument is omitted, all blobs for the root will be returned.
// Otherwise, the result will be filtered to only include the specified indices.
// An error will result if an invalid index is specified.
// The bucket size is bounded by 131072 entries. That's the most blobs a node will keep before rotating it out.
func (s *Store) BlobSidecarsBySlot(ctx context.Context, slot types.Slot, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobSidecarsBySlot")
defer span.End()
var enc []byte
sk := slotKey(slot)
if err := s.db.View(func(tx *bolt.Tx) error {
c := tx.Bucket(blobsBucket).Cursor()
// Bucket size is bounded and bolt cursors are fast. Moreover, a thin caching layer can be added.
for k, v := c.Seek(sk); bytes.HasPrefix(k, sk); k, _ = c.Next() {
slotInKey := bytesutil.BytesToSlotBigEndian(k[8:16])
if slotInKey == slot {
enc = v
break
}
}
return nil
}); err != nil {
return nil, err
}
if enc == nil {
return nil, ErrNotFound
}
sc := &ethpb.BlobSidecars{}
if err := decode(ctx, enc, sc); err != nil {
return nil, err
}
return filterForIndices(sc, indices...)
}
// DeleteBlobSidecar returns true if the blobs are in the db.
func (s *Store) DeleteBlobSidecar(ctx context.Context, beaconBlockRoot [32]byte) error {
_, span := trace.StartSpan(ctx, "BeaconDB.DeleteBlobSidecar")
defer span.End()
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blobsBucket)
c := bkt.Cursor()
for k, _ := c.First(); k != nil; k, _ = c.Next() {
if bytes.HasSuffix(k, beaconBlockRoot[:]) {
if err := bkt.Delete(k); err != nil {
return err
}
}
}
return nil
})
}
// We define a blob sidecar key as: bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
// where slot_to_rotating_buffer(slot) = slot % MAX_SLOTS_TO_PERSIST_BLOBS.
func blobSidecarKey(blob *ethpb.BlobSidecar) []byte {
key := slotKey(blob.Slot)
key = append(key, bytesutil.SlotToBytesBigEndian(blob.Slot)...)
key = append(key, blob.BlockRoot...)
return key
}
func slotKey(slot types.Slot) []byte {
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
maxEpochsToPersistBlobs := params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
maxSlotsToPersistBlobs := types.Slot(maxEpochsToPersistBlobs.Mul(uint64(slotsPerEpoch)))
return bytesutil.SlotToBytesBigEndian(slot.ModSlot(maxSlotsToPersistBlobs))
}
func checkEpochsForBlobSidecarsRequestBucket(db *bolt.DB) error {
if err := db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(epochsForBlobSidecarsRequestBucket)
k := []byte("epoch-key")
v := b.Get(k)
if v == nil {
if err := b.Put(k, bytesutil.Uint64ToBytesBigEndian(uint64(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest))); err != nil {
return err
}
return nil
}
e := bytesutil.BytesToUint64BigEndian(v)
if e != uint64(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest) {
return fmt.Errorf("epochs for blobs request value in DB %d does not match config value %d", e, params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest)
}
return nil
}); err != nil {
return err
}
return nil
}

View File

@@ -1,317 +0,0 @@
package kv
import (
"context"
"crypto/rand"
"fmt"
"testing"
"github.com/pkg/errors"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assertions"
"github.com/prysmaticlabs/prysm/v4/testing/require"
bolt "go.etcd.io/bbolt"
)
func equalBlobSlices(expect []*ethpb.BlobSidecar, got []*ethpb.BlobSidecar) error {
if len(expect) != len(got) {
return fmt.Errorf("mismatched lengths, expect=%d, got=%d", len(expect), len(got))
}
for i := 0; i < len(expect); i++ {
es := expect[i]
gs := got[i]
var e string
assertions.DeepEqual(assertions.SprintfAssertionLoggerFn(&e), es, gs)
if e != "" {
return errors.New(e)
}
}
return nil
}
func TestStore_BlobSidecars(t *testing.T) {
ctx := context.Background()
t.Run("empty", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, 0)
require.ErrorContains(t, "nil or empty blob sidecars", db.SaveBlobSidecar(ctx, scs))
})
t.Run("empty by root", func(t *testing.T) {
db := setupDB(t)
got, err := db.BlobSidecarsByRoot(ctx, [32]byte{})
require.ErrorIs(t, ErrNotFound, err)
require.Equal(t, 0, len(got))
})
t.Run("empty by slot", func(t *testing.T) {
db := setupDB(t)
got, err := db.BlobSidecarsBySlot(ctx, 1)
require.ErrorIs(t, ErrNotFound, err)
require.Equal(t, 0, len(got))
})
t.Run("save and retrieve by root (one)", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, 1)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, 1, len(scs))
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
})
t.Run("save and retrieve by root (max)", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
})
t.Run("save and retrieve valid subset by root", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
// we'll request indices 0 and 3, so make a slice with those indices for comparison
expect := make([]*ethpb.BlobSidecar, 2)
expect[0] = scs[0]
expect[1] = scs[3]
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot), 0, 3)
require.NoError(t, err)
require.NoError(t, equalBlobSlices(expect, got))
require.Equal(t, uint64(0), got[0].Index)
require.Equal(t, uint64(3), got[1].Index)
})
t.Run("error for invalid index when retrieving by root", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot), uint64(len(scs)))
require.ErrorIs(t, err, ErrNotFound)
require.Equal(t, 0, len(got))
})
t.Run("save and retrieve by slot (one)", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, 1)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, 1, len(scs))
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot)
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
})
t.Run("save and retrieve by slot (max)", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot)
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
})
t.Run("save and retrieve valid subset by slot", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
// we'll request indices 0 and 3, so make a slice with those indices for comparison
expect := make([]*ethpb.BlobSidecar, 2)
expect[0] = scs[0]
expect[1] = scs[3]
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot, 0, 3)
require.NoError(t, err)
require.NoError(t, equalBlobSlices(expect, got))
require.Equal(t, uint64(0), got[0].Index)
require.Equal(t, uint64(3), got[1].Index)
})
t.Run("error for invalid index when retrieving by slot", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot, uint64(len(scs)))
require.ErrorIs(t, err, ErrNotFound)
require.Equal(t, 0, len(got))
})
t.Run("delete works", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
require.NoError(t, db.DeleteBlobSidecar(ctx, bytesutil.ToBytes32(scs[0].BlockRoot)))
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.ErrorIs(t, ErrNotFound, err)
require.Equal(t, 0, len(got))
})
t.Run("saving a blob with older slot", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
require.ErrorContains(t, "but already have older blob with slot", db.SaveBlobSidecar(ctx, scs))
})
t.Run("saving a new blob for rotation", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
oldBlockRoot := scs[0].BlockRoot
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(oldBlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
newScs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
newRetentionSlot := primitives.Slot(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest.Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
for _, sc := range newScs {
sc.Slot = sc.Slot + newRetentionSlot
}
require.NoError(t, db.SaveBlobSidecar(ctx, newScs))
_, err = db.BlobSidecarsBySlot(ctx, 100)
require.ErrorIs(t, ErrNotFound, err)
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(newScs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(newScs, got))
})
}
func generateBlobSidecars(t *testing.T, n uint64) []*ethpb.BlobSidecar {
blobSidecars := make([]*ethpb.BlobSidecar, n)
for i := uint64(0); i < n; i++ {
blobSidecars[i] = generateBlobSidecar(t, i)
}
return blobSidecars
}
func generateBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSidecar {
blob := make([]byte, 131072)
_, err := rand.Read(blob)
require.NoError(t, err)
kzgCommitment := make([]byte, 48)
_, err = rand.Read(kzgCommitment)
require.NoError(t, err)
kzgProof := make([]byte, 48)
_, err = rand.Read(kzgProof)
require.NoError(t, err)
return &ethpb.BlobSidecar{
BlockRoot: bytesutil.PadTo([]byte{'a'}, 32),
Index: index,
Slot: 100,
BlockParentRoot: bytesutil.PadTo([]byte{'b'}, 32),
ProposerIndex: 101,
Blob: blob,
KzgCommitment: kzgCommitment,
KzgProof: kzgProof,
}
}
func TestStore_verifySideCars(t *testing.T) {
s := setupDB(t)
tests := []struct {
name string
scs []*ethpb.BlobSidecar
error string
}{
{name: "empty", scs: []*ethpb.BlobSidecar{}, error: "nil or empty blob sidecars"},
{name: "too many sidecars", scs: generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock+1), error: "too many sidecars: 7 > 6"},
{name: "invalid slot", scs: []*ethpb.BlobSidecar{{Slot: 1}, {Slot: 2}}, error: "sidecar slot mismatch: 2 != 1"},
{name: "invalid proposer index", scs: []*ethpb.BlobSidecar{{ProposerIndex: 1}, {ProposerIndex: 2}}, error: "sidecar proposer index mismatch: 2 != 1"},
{name: "invalid root", scs: []*ethpb.BlobSidecar{{BlockRoot: []byte{1}}, {BlockRoot: []byte{2}}}, error: "sidecar root mismatch: 02 != 01"},
{name: "invalid parent root", scs: []*ethpb.BlobSidecar{{BlockParentRoot: []byte{1}}, {BlockParentRoot: []byte{2}}}, error: "sidecar parent root mismatch: 02 != 01"},
{name: "invalid side index", scs: []*ethpb.BlobSidecar{{Index: 0}, {Index: 0}}, error: "sidecar index mismatch: 0 != 1"},
{name: "happy path", scs: []*ethpb.BlobSidecar{{Index: 0}, {Index: 1}}, error: ""},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := s.verifySideCars(tt.scs)
if tt.error != "" {
require.Equal(t, tt.error, err.Error())
} else {
require.NoError(t, err)
}
})
}
}
func TestStore_sortSidecars(t *testing.T) {
scs := []*ethpb.BlobSidecar{
{Index: 6},
{Index: 4},
{Index: 2},
{Index: 1},
{Index: 3},
{Index: 5},
{},
}
sortSideCars(scs)
for i := 0; i < len(scs)-1; i++ {
require.Equal(t, uint64(i), scs[i].Index)
}
}
func BenchmarkStore_BlobSidecarsByRoot(b *testing.B) {
s := setupDB(b)
ctx := context.Background()
require.NoError(b, s.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{
{BlockRoot: bytesutil.PadTo([]byte{'a'}, 32), Slot: 0},
}))
err := s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blobsBucket)
for i := 1; i < 131071; i++ {
r := make([]byte, 32)
_, err := rand.Read(r)
require.NoError(b, err)
scs := []*ethpb.BlobSidecar{
{BlockRoot: r, Slot: primitives.Slot(i)},
}
k := blobSidecarKey(scs[0])
encodedBlobSidecar, err := encode(ctx, &ethpb.BlobSidecars{Sidecars: scs})
require.NoError(b, err)
require.NoError(b, bkt.Put(k, encodedBlobSidecar))
}
return nil
})
require.NoError(b, err)
require.NoError(b, s.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{
{BlockRoot: bytesutil.PadTo([]byte{'b'}, 32), Slot: 131071},
}))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := s.BlobSidecarsByRoot(ctx, [32]byte{'b'})
require.NoError(b, err)
}
}
func Test_checkEpochsForBlobSidecarsRequestBucket(t *testing.T) {
dbStore := setupDB(t)
require.NoError(t, checkEpochsForBlobSidecarsRequestBucket(dbStore.db)) // First write
require.NoError(t, checkEpochsForBlobSidecarsRequestBucket(dbStore.db)) // First check
nConfig := params.BeaconNetworkConfig()
nConfig.MinEpochsForBlobsSidecarsRequest = 42069
params.OverrideBeaconNetworkConfig(nConfig)
require.ErrorContains(t, "epochs for blobs request value in DB 4096 does not match config value 42069", checkEpochsForBlobSidecarsRequestBucket(dbStore.db))
}

View File

@@ -818,16 +818,6 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.ReadOnlySignedBea
if err := rawBlock.UnmarshalSSZ(enc[len(capellaBlindKey):]); err != nil {
return nil, errors.Wrap(err, "could not unmarshal blinded Capella block")
}
case hasDenebKey(enc):
rawBlock = &ethpb.SignedBeaconBlockDeneb{}
if err := rawBlock.UnmarshalSSZ(enc[len(denebKey):]); err != nil {
return nil, errors.Wrap(err, "could not unmarshal Deneb block")
}
case hasDenebBlindKey(enc):
rawBlock = &ethpb.SignedBlindedBeaconBlockDeneb{}
if err := rawBlock.UnmarshalSSZ(enc[len(denebBlindKey):]); err != nil {
return nil, errors.Wrap(err, "could not unmarshal blinded Deneb block")
}
default:
// Marshal block bytes to phase 0 beacon block.
rawBlock = &ethpb.SignedBeaconBlock{}
@@ -864,8 +854,6 @@ func marshalBlockFull(
return nil, err
}
switch blk.Version() {
case version.Deneb:
return snappy.Encode(nil, append(denebKey, encodedBlock...)), nil
case version.Capella:
return snappy.Encode(nil, append(capellaKey, encodedBlock...)), nil
case version.Bellatrix:
@@ -900,8 +888,6 @@ func marshalBlockBlinded(
return nil, errors.Wrap(err, "could not marshal blinded block")
}
switch blk.Version() {
case version.Deneb:
return snappy.Encode(nil, append(denebBlindKey, encodedBlock...)), nil
case version.Capella:
return snappy.Encode(nil, append(capellaBlindKey, encodedBlock...)), nil
case version.Bellatrix:

View File

@@ -90,40 +90,6 @@ var blockTests = []struct {
return blocks.NewSignedBeaconBlock(b)
},
},
{
name: "deneb",
newBlock: func(slot primitives.Slot, root []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
b := util.NewBeaconBlockDeneb()
b.Block.Slot = slot
if root != nil {
b.Block.ParentRoot = root
b.Block.Body.BlobKzgCommitments = [][]byte{
bytesutil.PadTo([]byte{0x01}, 48),
bytesutil.PadTo([]byte{0x02}, 48),
bytesutil.PadTo([]byte{0x03}, 48),
bytesutil.PadTo([]byte{0x04}, 48),
}
}
return blocks.NewSignedBeaconBlock(b)
},
},
{
name: "deneb blind",
newBlock: func(slot primitives.Slot, root []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
b := util.NewBlindedBeaconBlockDeneb()
b.Block.Slot = slot
if root != nil {
b.Block.ParentRoot = root
b.Block.Body.BlobKzgCommitments = [][]byte{
bytesutil.PadTo([]byte{0x05}, 48),
bytesutil.PadTo([]byte{0x06}, 48),
bytesutil.PadTo([]byte{0x07}, 48),
bytesutil.PadTo([]byte{0x08}, 48),
}
}
return blocks.NewSignedBeaconBlock(b)
},
},
}
func TestStore_SaveBackfillBlockRoot(t *testing.T) {
@@ -393,10 +359,6 @@ func TestStore_BlocksCRUD_NoCache(t *testing.T) {
wanted, err = blk.ToBlinded()
require.NoError(t, err)
}
if _, err := blk.PbDenebBlock(); err == nil {
wanted, err = blk.ToBlinded()
require.NoError(t, err)
}
wantedPb, err := wanted.Proto()
require.NoError(t, err)
retrievedPb, err := retrievedBlock.Proto()
@@ -620,10 +582,6 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
wanted, err = wanted.ToBlinded()
require.NoError(t, err)
}
if _, err := block1.PbDenebBlock(); err == nil {
wanted, err = wanted.ToBlinded()
require.NoError(t, err)
}
wantedPb, err := wanted.Proto()
require.NoError(t, err)
bPb, err := b.Proto()
@@ -646,10 +604,6 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
wanted2, err = block2.ToBlinded()
require.NoError(t, err)
}
if _, err := block2.PbDenebBlock(); err == nil {
wanted2, err = block2.ToBlinded()
require.NoError(t, err)
}
wanted2Pb, err := wanted2.Proto()
require.NoError(t, err)
bPb, err = b.Proto()
@@ -672,10 +626,6 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
wanted, err = wanted.ToBlinded()
require.NoError(t, err)
}
if _, err := block3.PbDenebBlock(); err == nil {
wanted, err = wanted.ToBlinded()
require.NoError(t, err)
}
wantedPb, err = wanted.Proto()
require.NoError(t, err)
bPb, err = b.Proto()
@@ -716,10 +666,6 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
wanted, err = block1.ToBlinded()
require.NoError(t, err)
}
if _, err := block1.PbDenebBlock(); err == nil {
wanted, err = block1.ToBlinded()
require.NoError(t, err)
}
wantedPb, err := wanted.Proto()
require.NoError(t, err)
bPb, err := b.Proto()
@@ -741,10 +687,6 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
wanted, err = genesisBlock.ToBlinded()
require.NoError(t, err)
}
if _, err := genesisBlock.PbDenebBlock(); err == nil {
wanted, err = genesisBlock.ToBlinded()
require.NoError(t, err)
}
wantedPb, err = wanted.Proto()
require.NoError(t, err)
bPb, err = b.Proto()
@@ -766,10 +708,6 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
wanted, err = genesisBlock.ToBlinded()
require.NoError(t, err)
}
if _, err := genesisBlock.PbDenebBlock(); err == nil {
wanted, err = genesisBlock.ToBlinded()
require.NoError(t, err)
}
wantedPb, err = wanted.Proto()
require.NoError(t, err)
bPb, err = b.Proto()
@@ -870,10 +808,6 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
wanted, err = b1.ToBlinded()
require.NoError(t, err)
}
if _, err := b1.PbDenebBlock(); err == nil {
wanted, err = b1.ToBlinded()
require.NoError(t, err)
}
retrieved0Pb, err := retrievedBlocks[0].Proto()
require.NoError(t, err)
wantedPb, err := wanted.Proto()
@@ -894,10 +828,6 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
wanted, err = b2.ToBlinded()
require.NoError(t, err)
}
if _, err := b2.PbDenebBlock(); err == nil {
wanted, err = b2.ToBlinded()
require.NoError(t, err)
}
retrieved0Pb, err = retrievedBlocks[0].Proto()
require.NoError(t, err)
wantedPb, err = wanted.Proto()
@@ -912,10 +842,6 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
wanted, err = b3.ToBlinded()
require.NoError(t, err)
}
if _, err := b3.PbDenebBlock(); err == nil {
wanted, err = b3.ToBlinded()
require.NoError(t, err)
}
retrieved1Pb, err := retrievedBlocks[1].Proto()
require.NoError(t, err)
wantedPb, err = wanted.Proto()

View File

@@ -37,17 +37,3 @@ func hasCapellaBlindKey(enc []byte) bool {
}
return bytes.Equal(enc[:len(capellaBlindKey)], capellaBlindKey)
}
func hasDenebKey(enc []byte) bool {
if len(denebKey) >= len(enc) {
return false
}
return bytes.Equal(enc[:len(denebKey)], denebKey)
}
func hasDenebBlindKey(enc []byte) bool {
if len(denebBlindKey) >= len(enc) {
return false
}
return bytes.Equal(enc[:len(denebBlindKey)], denebBlindKey)
}

View File

@@ -129,9 +129,6 @@ var Buckets = [][]byte{
feeRecipientBucket,
registrationBucket,
blobsBucket,
epochsForBlobSidecarsRequestBucket,
}
// NewKVStore initializes a new boltDB key-value store at the directory
@@ -202,11 +199,6 @@ func NewKVStore(ctx context.Context, dirPath string) (*Store, error) {
if err := kv.setupBlockStorageType(ctx); err != nil {
return nil, err
}
if err := checkEpochsForBlobSidecarsRequestBucket(boltDB); err != nil {
return nil, errors.Wrap(err, "failed to check epochs for blob sidecars request bucket")
}
return kv, nil
}

View File

@@ -46,7 +46,6 @@ var (
finalizedCheckpointKey = []byte("finalized-checkpoint")
powchainDataKey = []byte("powchain-data")
lastValidatedCheckpointKey = []byte("last-validated-checkpoint")
blobsBucket = []byte("blobs")
// Below keys are used to identify objects are to be fork compatible.
// Objects that are only compatible with specific forks should be prefixed with such keys.
@@ -56,9 +55,6 @@ var (
capellaKey = []byte("capella")
capellaBlindKey = []byte("blind-capella")
saveBlindedBeaconBlocksKey = []byte("save-blinded-beacon-blocks")
denebKey = []byte("deneb")
denebBlindKey = []byte("blind-deneb")
// block root included in the beacon state used by weak subjectivity initial sync
originCheckpointBlockRootKey = []byte("origin-checkpoint-block-root")
// block root tracking the progress of backfill, or pointing at genesis if backfill has not been initiated
@@ -74,7 +70,4 @@ var (
// Migrations
migrationsBucket = []byte("migrations")
// Stores how long to keep the blob sidecars for.
epochsForBlobSidecarsRequestBucket = []byte("epochs-for-blob-sidecars-request")
)

View File

@@ -229,28 +229,34 @@ func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, bl
// look at issue https://github.com/prysmaticlabs/prysm/issues/9262.
switch rawType := states[i].ToProtoUnsafe().(type) {
case *ethpb.BeaconState:
pbState, err := getPhase0PbState(rawType)
pbState, err := statenative.ProtobufBeaconStatePhase0(rawType)
if err != nil {
return err
}
if pbState == nil {
return errors.New("nil state")
}
valEntries := pbState.Validators
pbState.Validators = make([]*ethpb.Validator, 0)
encodedState, err := encode(ctx, pbState)
if err != nil {
return err
}
pbState.Validators = valEntries
if err := bucket.Put(rt[:], encodedState); err != nil {
return err
}
pbState.Validators = valEntries
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
return err
}
case *ethpb.BeaconStateAltair:
pbState, err := getAltairPbState(rawType)
pbState, err := statenative.ProtobufBeaconStateAltair(rawType)
if err != nil {
return err
}
if pbState == nil {
return errors.New("nil state")
}
valEntries := pbState.Validators
pbState.Validators = make([]*ethpb.Validator, 0)
rawObj, err := pbState.MarshalSSZ()
@@ -266,10 +272,13 @@ func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, bl
return err
}
case *ethpb.BeaconStateBellatrix:
pbState, err := getBellatrixPbState(rawType)
pbState, err := statenative.ProtobufBeaconStateBellatrix(rawType)
if err != nil {
return err
}
if pbState == nil {
return errors.New("nil state")
}
valEntries := pbState.Validators
pbState.Validators = make([]*ethpb.Validator, 0)
rawObj, err := pbState.MarshalSSZ()
@@ -285,28 +294,12 @@ func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, bl
return err
}
case *ethpb.BeaconStateCapella:
pbState, err := getCapellaPbState(rawType)
pbState, err := statenative.ProtobufBeaconStateCapella(rawType)
if err != nil {
return err
}
valEntries := pbState.Validators
pbState.Validators = make([]*ethpb.Validator, 0)
rawObj, err := pbState.MarshalSSZ()
if err != nil {
return err
}
encodedState := snappy.Encode(nil, append(capellaKey, rawObj...))
if err := bucket.Put(rt[:], encodedState); err != nil {
return err
}
pbState.Validators = valEntries
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
return err
}
case *ethpb.BeaconStateDeneb:
pbState, err := getDenebPbState(rawType)
if err != nil {
return err
if pbState == nil {
return errors.New("nil state")
}
valEntries := pbState.Validators
pbState.Validators = make([]*ethpb.Validator, 0)
@@ -330,61 +323,6 @@ func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, bl
return s.storeValidatorEntriesSeparately(ctx, tx, validatorsEntries)
}
func getPhase0PbState(rawState interface{}) (*ethpb.BeaconState, error) {
pbState, err := statenative.ProtobufBeaconStatePhase0(rawState)
if err != nil {
return nil, err
}
if pbState == nil {
return nil, errors.New("nil state")
}
return pbState, nil
}
func getAltairPbState(rawState interface{}) (*ethpb.BeaconStateAltair, error) {
pbState, err := statenative.ProtobufBeaconStateAltair(rawState)
if err != nil {
return nil, err
}
if pbState == nil {
return nil, errors.New("nil state")
}
return pbState, nil
}
func getBellatrixPbState(rawState interface{}) (*ethpb.BeaconStateBellatrix, error) {
pbState, err := statenative.ProtobufBeaconStateBellatrix(rawState)
if err != nil {
return nil, err
}
if pbState == nil {
return nil, errors.New("nil state")
}
return pbState, nil
}
func getCapellaPbState(rawState interface{}) (*ethpb.BeaconStateCapella, error) {
pbState, err := statenative.ProtobufBeaconStateCapella(rawState)
if err != nil {
return nil, err
}
if pbState == nil {
return nil, errors.New("nil state")
}
return pbState, nil
}
func getDenebPbState(rawState interface{}) (*ethpb.BeaconStateDeneb, error) {
pbState, err := statenative.ProtobufBeaconStateDeneb(rawState)
if err != nil {
return nil, err
}
if pbState == nil {
return nil, errors.New("nil state")
}
return pbState, nil
}
func (s *Store) storeValidatorEntriesSeparately(ctx context.Context, tx *bolt.Tx, validatorsEntries map[string]*ethpb.Validator) error {
valBkt := tx.Bucket(stateValidatorsBucket)
for hashStr, validatorEntry := range validatorsEntries {
@@ -534,19 +472,6 @@ func (s *Store) unmarshalState(_ context.Context, enc []byte, validatorEntries [
}
switch {
case hasDenebKey(enc):
protoState := &ethpb.BeaconStateDeneb{}
if err := protoState.UnmarshalSSZ(enc[len(denebKey):]); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal encoding for Deneb")
}
ok, err := s.isStateValidatorMigrationOver()
if err != nil {
return nil, err
}
if ok {
protoState.Validators = validatorEntries
}
return statenative.InitializeFromProtoUnsafeDeneb(protoState)
case hasCapellaKey(enc):
// Marshal state bytes to capella beacon state.
protoState := &ethpb.BeaconStateCapella{}
@@ -654,19 +579,6 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
return nil, err
}
return snappy.Encode(nil, append(capellaKey, rawObj...)), nil
case *ethpb.BeaconStateDeneb:
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateDeneb)
if !ok {
return nil, errors.New("non valid inner state")
}
if rState == nil {
return nil, errors.New("nil state")
}
rawObj, err := rState.MarshalSSZ()
if err != nil {
return nil, err
}
return snappy.Encode(nil, append(denebKey, rawObj...)), nil
default:
return nil, errors.New("invalid inner state")
}

View File

@@ -900,100 +900,6 @@ func TestBellatrixState_CanDelete(t *testing.T) {
require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil")
}
func TestDenebState_CanSaveRetrieve(t *testing.T) {
db := setupDB(t)
r := [32]byte{'A'}
require.Equal(t, false, db.HasState(context.Background(), r))
st, _ := util.DeterministicGenesisStateDeneb(t, 1)
require.NoError(t, st.SetSlot(100))
require.NoError(t, db.SaveState(context.Background(), st, r))
require.Equal(t, true, db.HasState(context.Background(), r))
savedS, err := db.State(context.Background(), r)
require.NoError(t, err)
require.DeepSSZEqual(t, st.ToProtoUnsafe(), savedS.ToProtoUnsafe())
savedS, err = db.State(context.Background(), [32]byte{'B'})
require.NoError(t, err)
require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil")
}
func TestDenebState_CanDelete(t *testing.T) {
db := setupDB(t)
r := [32]byte{'A'}
require.Equal(t, false, db.HasState(context.Background(), r))
st, _ := util.DeterministicGenesisStateDeneb(t, 1)
require.NoError(t, st.SetSlot(100))
require.NoError(t, db.SaveState(context.Background(), st, r))
require.Equal(t, true, db.HasState(context.Background(), r))
require.NoError(t, db.DeleteState(context.Background(), r))
savedS, err := db.State(context.Background(), r)
require.NoError(t, err)
require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil")
}
func TestStateDeneb_CanSaveRetrieveValidatorEntries(t *testing.T) {
db := setupDB(t)
// enable historical state representation flag to test this
resetCfg := features.InitWithReset(&features.Flags{
EnableHistoricalSpaceRepresentation: true,
})
defer resetCfg()
r := [32]byte{'A'}
require.Equal(t, false, db.HasState(context.Background(), r))
stateValidators := validators(10)
st, _ := util.DeterministicGenesisStateDeneb(t, 20)
require.NoError(t, st.SetSlot(100))
require.NoError(t, st.SetValidators(stateValidators))
ctx := context.Background()
require.NoError(t, db.SaveState(ctx, st, r))
assert.Equal(t, true, db.HasState(context.Background(), r))
savedS, err := db.State(context.Background(), r)
require.NoError(t, err)
require.DeepSSZEqual(t, st.Validators(), savedS.Validators(), "saved state with validators and retrieved state are not matching")
// check if the index of the second state is still present.
err = db.db.Update(func(tx *bolt.Tx) error {
idxBkt := tx.Bucket(blockRootValidatorHashesBucket)
data := idxBkt.Get(r[:])
require.NotEqual(t, 0, len(data))
return nil
})
require.NoError(t, err)
// check if all the validator entries are still intact in the validator entry bucket.
err = db.db.Update(func(tx *bolt.Tx) error {
valBkt := tx.Bucket(stateValidatorsBucket)
// if any of the original validator entry is not present, then fail the test.
for _, val := range stateValidators {
hash, hashErr := val.HashTreeRoot()
assert.NoError(t, hashErr)
data := valBkt.Get(hash[:])
require.NotNil(t, data)
require.NotEqual(t, 0, len(data))
}
return nil
})
require.NoError(t, err)
}
func BenchmarkState_CheckStateSaveTime_1(b *testing.B) { checkStateSaveTime(b, 1) }
func BenchmarkState_CheckStateSaveTime_10(b *testing.B) { checkStateSaveTime(b, 10) }

View File

@@ -48,6 +48,7 @@ go_library(
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//io/logs:go_default_library",
"//math:go_default_library",
"//monitoring/clientstats:go_default_library",
"//monitoring/tracing:go_default_library",
"//network:go_default_library",

View File

@@ -23,6 +23,7 @@ import (
payloadattribute "github.com/prysmaticlabs/prysm/v4/consensus-types/payload-attribute"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/math"
pb "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time/slots"
@@ -49,7 +50,6 @@ const (
NewPayloadMethod = "engine_newPayloadV1"
// NewPayloadMethodV2 v2 request string for JSON-RPC.
NewPayloadMethodV2 = "engine_newPayloadV2"
NewPayloadMethodV3 = "engine_newPayloadV3"
// ForkchoiceUpdatedMethod v1 request string for JSON-RPC.
ForkchoiceUpdatedMethod = "engine_forkchoiceUpdatedV1"
// ForkchoiceUpdatedMethodV2 v2 request string for JSON-RPC.
@@ -58,7 +58,6 @@ const (
GetPayloadMethod = "engine_getPayloadV1"
// GetPayloadMethodV2 v2 request string for JSON-RPC.
GetPayloadMethodV2 = "engine_getPayloadV2"
GetPayloadMethodV3 = "engine_getPayloadV3"
// ExchangeTransitionConfigurationMethod v1 request string for JSON-RPC.
ExchangeTransitionConfigurationMethod = "engine_exchangeTransitionConfigurationV1"
// ExecutionBlockByHashMethod request string for JSON-RPC.
@@ -97,11 +96,11 @@ type ExecutionPayloadReconstructor interface {
// EngineCaller defines a client that can interact with an Ethereum
// execution node's engine service via JSON-RPC.
type EngineCaller interface {
NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes [][32]byte) ([]byte, error)
NewPayload(ctx context.Context, payload interfaces.ExecutionData) ([]byte, error)
ForkchoiceUpdated(
ctx context.Context, state *pb.ForkchoiceState, attrs payloadattribute.Attributer,
) (*pb.PayloadIDBytes, []byte, error)
GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (interfaces.ExecutionData, *pb.BlobsBundle, error)
GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (interfaces.ExecutionData, error)
ExchangeTransitionConfiguration(
ctx context.Context, cfg *pb.TransitionConfiguration,
) error
@@ -112,7 +111,7 @@ type EngineCaller interface {
var EmptyBlockHash = errors.New("Block hash is empty 0x0000...")
// NewPayload calls the engine_newPayloadVX method via JSON-RPC.
func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes [][32]byte) ([]byte, error) {
func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionData) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.NewPayload")
defer span.End()
start := time.Now()
@@ -144,15 +143,6 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
if err != nil {
return nil, handleRPCError(err)
}
case *pb.ExecutionPayloadDeneb:
payloadPb, ok := payload.Proto().(*pb.ExecutionPayloadDeneb)
if !ok {
return nil, errors.New("execution data must be a Deneb execution payload")
}
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethodV3, payloadPb, versionedHashes)
if err != nil {
return nil, handleRPCError(err)
}
default:
return nil, errors.New("unknown execution data type")
}
@@ -200,7 +190,7 @@ func (s *Service) ForkchoiceUpdated(
if err != nil {
return nil, nil, handleRPCError(err)
}
case version.Capella, version.Deneb:
case version.Capella:
a, err := attrs.PbV2()
if err != nil {
return nil, nil, err
@@ -230,8 +220,7 @@ func (s *Service) ForkchoiceUpdated(
}
// GetPayload calls the engine_getPayloadVX method via JSON-RPC.
// It returns the execution data as well as the blobs bundle.
func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (interfaces.ExecutionData, *pb.BlobsBundle, error) {
func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (interfaces.ExecutionData, error) {
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetPayload")
defer span.End()
start := time.Now()
@@ -243,42 +232,23 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primit
ctx, cancel := context.WithDeadline(ctx, d)
defer cancel()
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch {
result := &pb.ExecutionPayloadDenebWithValueAndBlobsBundle{}
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethodV3, pb.PayloadIDBytes(payloadId))
if err != nil {
return nil, nil, handleRPCError(err)
}
ed, err := blocks.WrappedExecutionPayloadDeneb(result.Payload, blocks.PayloadValueToGwei(result.Value))
if err != nil {
return nil, nil, err
}
return ed, result.BlobsBundle, nil
}
if slots.ToEpoch(slot) >= params.BeaconConfig().CapellaForkEpoch {
result := &pb.ExecutionPayloadCapellaWithValue{}
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethodV2, pb.PayloadIDBytes(payloadId))
if err != nil {
return nil, nil, handleRPCError(err)
return nil, handleRPCError(err)
}
ed, err := blocks.WrappedExecutionPayloadCapella(result.Payload, blocks.PayloadValueToGwei(result.Value))
if err != nil {
return nil, nil, err
}
return ed, nil, nil
v := big.NewInt(0).SetBytes(bytesutil.ReverseByteOrder(result.Value))
return blocks.WrappedExecutionPayloadCapella(result.Payload, math.WeiToGwei(v))
}
result := &pb.ExecutionPayload{}
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethod, pb.PayloadIDBytes(payloadId))
if err != nil {
return nil, nil, handleRPCError(err)
return nil, handleRPCError(err)
}
ed, err := blocks.WrappedExecutionPayload(result)
if err != nil {
return nil, nil, err
}
return ed, nil, nil
return blocks.WrappedExecutionPayload(result)
}
// ExchangeTransitionConfiguration calls the engine_exchangeTransitionConfigurationV1 method via JSON-RPC.
@@ -714,8 +684,7 @@ func fullPayloadFromExecutionBlock(
txs[i] = txBin
}
switch block.Version {
case version.Bellatrix:
if block.Version == version.Bellatrix {
return blocks.WrappedExecutionPayload(&pb.ExecutionPayload{
ParentHash: header.ParentHash(),
FeeRecipient: header.FeeRecipient(),
@@ -732,51 +701,24 @@ func fullPayloadFromExecutionBlock(
BlockHash: blockHash[:],
Transactions: txs,
})
case version.Capella:
return blocks.WrappedExecutionPayloadCapella(&pb.ExecutionPayloadCapella{
ParentHash: header.ParentHash(),
FeeRecipient: header.FeeRecipient(),
StateRoot: header.StateRoot(),
ReceiptsRoot: header.ReceiptsRoot(),
LogsBloom: header.LogsBloom(),
PrevRandao: header.PrevRandao(),
BlockNumber: header.BlockNumber(),
GasLimit: header.GasLimit(),
GasUsed: header.GasUsed(),
Timestamp: header.Timestamp(),
ExtraData: header.ExtraData(),
BaseFeePerGas: header.BaseFeePerGas(),
BlockHash: blockHash[:],
Transactions: txs,
Withdrawals: block.Withdrawals,
}, 0) // We can't get the block value and don't care about the block value for this instance
case version.Deneb:
edg, err := header.ExcessDataGas()
if err != nil {
return nil, errors.Wrap(err, "unable to extract ExcessDataGas attribute from excution payload header")
}
return blocks.WrappedExecutionPayloadDeneb(
&pb.ExecutionPayloadDeneb{
ParentHash: header.ParentHash(),
FeeRecipient: header.FeeRecipient(),
StateRoot: header.StateRoot(),
ReceiptsRoot: header.ReceiptsRoot(),
LogsBloom: header.LogsBloom(),
PrevRandao: header.PrevRandao(),
BlockNumber: header.BlockNumber(),
GasLimit: header.GasLimit(),
GasUsed: header.GasUsed(),
Timestamp: header.Timestamp(),
ExtraData: header.ExtraData(),
BaseFeePerGas: header.BaseFeePerGas(),
BlockHash: blockHash[:],
Transactions: txs,
Withdrawals: block.Withdrawals,
ExcessDataGas: edg,
}, 0) // We can't get the block value and don't care about the block value for this instance
default:
return nil, fmt.Errorf("unknown execution block version %d", block.Version)
}
return blocks.WrappedExecutionPayloadCapella(&pb.ExecutionPayloadCapella{
ParentHash: header.ParentHash(),
FeeRecipient: header.FeeRecipient(),
StateRoot: header.StateRoot(),
ReceiptsRoot: header.ReceiptsRoot(),
LogsBloom: header.LogsBloom(),
PrevRandao: header.PrevRandao(),
BlockNumber: header.BlockNumber(),
GasLimit: header.GasLimit(),
GasUsed: header.GasUsed(),
Timestamp: header.Timestamp(),
ExtraData: header.ExtraData(),
BaseFeePerGas: header.BaseFeePerGas(),
BlockHash: blockHash[:],
Transactions: txs,
Withdrawals: block.Withdrawals,
}, 0) // We can't get the block value and don't care about the block value for this instance
}
// Handles errors received from the RPC server according to the specification.

View File

@@ -75,7 +75,7 @@ func TestClient_IPC(t *testing.T) {
want, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
require.Equal(t, true, ok)
payloadId := [8]byte{1}
resp, _, err := srv.GetPayload(ctx, payloadId, 1)
resp, err := srv.GetPayload(ctx, payloadId, 1)
require.NoError(t, err)
resPb, err := resp.PbBellatrix()
require.NoError(t, err)
@@ -85,7 +85,7 @@ func TestClient_IPC(t *testing.T) {
want, ok := fix["ExecutionPayloadCapellaWithValue"].(*pb.ExecutionPayloadCapellaWithValue)
require.Equal(t, true, ok)
payloadId := [8]byte{1}
resp, _, err := srv.GetPayload(ctx, payloadId, params.BeaconConfig().SlotsPerEpoch)
resp, err := srv.GetPayload(ctx, payloadId, params.BeaconConfig().SlotsPerEpoch)
require.NoError(t, err)
resPb, err := resp.PbCapella()
require.NoError(t, err)
@@ -118,7 +118,7 @@ func TestClient_IPC(t *testing.T) {
require.Equal(t, true, ok)
wrappedPayload, err := blocks.WrappedExecutionPayload(req)
require.NoError(t, err)
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload, [][32]byte{})
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload)
require.NoError(t, err)
require.DeepEqual(t, bytesutil.ToBytes32(want.LatestValidHash), bytesutil.ToBytes32(latestValidHash))
})
@@ -129,7 +129,7 @@ func TestClient_IPC(t *testing.T) {
require.Equal(t, true, ok)
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(req, 0)
require.NoError(t, err)
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload, [][32]byte{})
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload)
require.NoError(t, err)
require.DeepEqual(t, bytesutil.ToBytes32(want.LatestValidHash), bytesutil.ToBytes32(latestValidHash))
})
@@ -163,7 +163,6 @@ func TestClient_HTTP(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.CapellaForkEpoch = 1
cfg.DenebForkEpoch = 2
params.OverrideBeaconConfig(cfg)
t.Run(GetPayloadMethod, func(t *testing.T) {
@@ -204,7 +203,7 @@ func TestClient_HTTP(t *testing.T) {
client.rpcClient = rpcClient
// We call the RPC method via HTTP and expect a proper result.
resp, _, err := client.GetPayload(ctx, payloadId, 1)
resp, err := client.GetPayload(ctx, payloadId, 1)
require.NoError(t, err)
pb, err := resp.PbBellatrix()
require.NoError(t, err)
@@ -248,7 +247,7 @@ func TestClient_HTTP(t *testing.T) {
client.rpcClient = rpcClient
// We call the RPC method via HTTP and expect a proper result.
resp, _, err := client.GetPayload(ctx, payloadId, params.BeaconConfig().SlotsPerEpoch)
resp, err := client.GetPayload(ctx, payloadId, params.BeaconConfig().SlotsPerEpoch)
require.NoError(t, err)
pb, err := resp.PbCapella()
require.NoError(t, err)
@@ -263,60 +262,6 @@ func TestClient_HTTP(t *testing.T) {
require.NoError(t, err)
require.Equal(t, uint64(1236), v)
})
t.Run(GetPayloadMethodV3, func(t *testing.T) {
payloadId := [8]byte{1}
want, ok := fix["ExecutionPayloadDenebWithValue"].(*pb.GetPayloadV3ResponseJson)
require.Equal(t, true, ok)
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
defer func() {
require.NoError(t, r.Body.Close())
}()
enc, err := io.ReadAll(r.Body)
require.NoError(t, err)
jsonRequestString := string(enc)
reqArg, err := json.Marshal(pb.PayloadIDBytes(payloadId))
require.NoError(t, err)
// We expect the JSON string RPC request contains the right arguments.
require.Equal(t, true, strings.Contains(
jsonRequestString, string(reqArg),
))
resp := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"result": want,
}
err = json.NewEncoder(w).Encode(resp)
require.NoError(t, err)
}))
defer srv.Close()
rpcClient, err := rpc.DialHTTP(srv.URL)
require.NoError(t, err)
defer rpcClient.Close()
client := &Service{}
client.rpcClient = rpcClient
// We call the RPC method via HTTP and expect a proper result.
resp, blobsBundle, err := client.GetPayload(ctx, payloadId, 2*params.BeaconConfig().SlotsPerEpoch)
require.NoError(t, err)
g, err := resp.ExcessDataGas()
require.NoError(t, err)
require.DeepEqual(t, uint64(3), g)
g, err = resp.DataGasUsed()
require.NoError(t, err)
require.DeepEqual(t, uint64(2), g)
commitments := [][]byte{bytesutil.PadTo([]byte("commitment1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("commitment2"), fieldparams.BLSPubkeyLength)}
require.DeepEqual(t, commitments, blobsBundle.KzgCommitments)
proofs := [][]byte{bytesutil.PadTo([]byte("proof1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("proof2"), fieldparams.BLSPubkeyLength)}
require.DeepEqual(t, proofs, blobsBundle.Proofs)
blobs := [][]byte{bytesutil.PadTo([]byte("a"), fieldparams.BlobLength), bytesutil.PadTo([]byte("b"), fieldparams.BlobLength)}
require.DeepEqual(t, blobs, blobsBundle.Blobs)
})
t.Run(ForkchoiceUpdatedMethod+" VALID status", func(t *testing.T) {
forkChoiceState := &pb.ForkchoiceState{
HeadBlockHash: []byte("head"),
@@ -467,7 +412,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
resp, err := client.NewPayload(ctx, wrappedPayload)
require.NoError(t, err)
require.DeepEqual(t, want.LatestValidHash, resp)
})
@@ -481,21 +426,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
require.NoError(t, err)
require.DeepEqual(t, want.LatestValidHash, resp)
})
t.Run(NewPayloadMethodV3+" VALID status", func(t *testing.T) {
execPayload, ok := fix["ExecutionPayloadDeneb"].(*pb.ExecutionPayloadDeneb)
require.Equal(t, true, ok)
want, ok := fix["ValidPayloadStatus"].(*pb.PayloadStatus)
require.Equal(t, true, ok)
client := newPayloadV3Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
resp, err := client.NewPayload(ctx, wrappedPayload)
require.NoError(t, err)
require.DeepEqual(t, want.LatestValidHash, resp)
})
@@ -509,7 +440,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
resp, err := client.NewPayload(ctx, wrappedPayload)
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
@@ -523,21 +454,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
t.Run(NewPayloadMethodV3+" SYNCING status", func(t *testing.T) {
execPayload, ok := fix["ExecutionPayloadDeneb"].(*pb.ExecutionPayloadDeneb)
require.Equal(t, true, ok)
want, ok := fix["SyncingStatus"].(*pb.PayloadStatus)
require.Equal(t, true, ok)
client := newPayloadV3Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
resp, err := client.NewPayload(ctx, wrappedPayload)
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
@@ -551,7 +468,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
resp, err := client.NewPayload(ctx, wrappedPayload)
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
@@ -565,21 +482,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
t.Run(NewPayloadMethodV3+" INVALID_BLOCK_HASH status", func(t *testing.T) {
execPayload, ok := fix["ExecutionPayloadDeneb"].(*pb.ExecutionPayloadDeneb)
require.Equal(t, true, ok)
want, ok := fix["InvalidBlockHashStatus"].(*pb.PayloadStatus)
require.Equal(t, true, ok)
client := newPayloadV3Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
resp, err := client.NewPayload(ctx, wrappedPayload)
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
@@ -593,7 +496,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
resp, err := client.NewPayload(ctx, wrappedPayload)
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
require.DeepEqual(t, want.LatestValidHash, resp)
})
@@ -607,21 +510,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
require.DeepEqual(t, want.LatestValidHash, resp)
})
t.Run(NewPayloadMethodV3+" INVALID status", func(t *testing.T) {
execPayload, ok := fix["ExecutionPayloadDeneb"].(*pb.ExecutionPayloadDeneb)
require.Equal(t, true, ok)
want, ok := fix["InvalidStatus"].(*pb.PayloadStatus)
require.Equal(t, true, ok)
client := newPayloadV3Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
resp, err := client.NewPayload(ctx, wrappedPayload)
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
require.DeepEqual(t, want.LatestValidHash, resp)
})
@@ -635,7 +524,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
resp, err := client.NewPayload(ctx, wrappedPayload)
require.ErrorIs(t, ErrUnknownPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
@@ -1417,25 +1306,6 @@ func fixtures() map[string]interface{} {
Transactions: [][]byte{foo[:]},
Withdrawals: []*pb.Withdrawal{},
}
executionPayloadFixtureDeneb := &pb.ExecutionPayloadDeneb{
ParentHash: foo[:],
FeeRecipient: bar,
StateRoot: foo[:],
ReceiptsRoot: foo[:],
LogsBloom: baz,
PrevRandao: foo[:],
BlockNumber: 1,
GasLimit: 1,
GasUsed: 1,
Timestamp: 1,
ExtraData: foo[:],
BaseFeePerGas: bytesutil.PadTo(baseFeePerGas.Bytes(), fieldparams.RootLength),
BlockHash: foo[:],
Transactions: [][]byte{foo[:]},
Withdrawals: []*pb.Withdrawal{},
DataGasUsed: 2,
ExcessDataGas: 3,
}
hexUint := hexutil.Uint64(1)
executionPayloadWithValueFixtureCapella := &pb.GetPayloadV2ResponseJson{
ExecutionPayload: &pb.ExecutionPayloadCapellaJSON{
@@ -1456,34 +1326,6 @@ func fixtures() map[string]interface{} {
},
BlockValue: "0x11fffffffff",
}
dgu := hexutil.Uint64(2)
edg := hexutil.Uint64(3)
executionPayloadWithValueFixtureDeneb := &pb.GetPayloadV3ResponseJson{
ExecutionPayload: &pb.ExecutionPayloadDenebJSON{
ParentHash: &common.Hash{'a'},
FeeRecipient: &common.Address{'b'},
StateRoot: &common.Hash{'c'},
ReceiptsRoot: &common.Hash{'d'},
LogsBloom: &hexutil.Bytes{'e'},
PrevRandao: &common.Hash{'f'},
BaseFeePerGas: "0x123",
BlockHash: &common.Hash{'g'},
Transactions: []hexutil.Bytes{{'h'}},
Withdrawals: []*pb.Withdrawal{},
BlockNumber: &hexUint,
GasLimit: &hexUint,
GasUsed: &hexUint,
Timestamp: &hexUint,
DataGasUsed: &dgu,
ExcessDataGas: &edg,
},
BlockValue: "0x11fffffffff",
BlobsBundle: &pb.BlobBundleJSON{
Commitments: [][48]byte{bytesutil.ToBytes48([]byte("commitment1")), bytesutil.ToBytes48([]byte("commitment2"))},
Proofs: [][48]byte{bytesutil.ToBytes48([]byte("proof1")), bytesutil.ToBytes48([]byte("proof2"))},
Blobs: [][]byte{{'a'}, {'b'}},
},
}
parent := bytesutil.PadTo([]byte("parentHash"), fieldparams.RootLength)
sha3Uncles := bytesutil.PadTo([]byte("sha3Uncles"), fieldparams.RootLength)
miner := bytesutil.PadTo([]byte("miner"), fieldparams.FeeRecipientLength)
@@ -1579,9 +1421,7 @@ func fixtures() map[string]interface{} {
"ExecutionBlock": executionBlock,
"ExecutionPayload": executionPayloadFixture,
"ExecutionPayloadCapella": executionPayloadFixtureCapella,
"ExecutionPayloadDeneb": executionPayloadFixtureDeneb,
"ExecutionPayloadCapellaWithValue": executionPayloadWithValueFixtureCapella,
"ExecutionPayloadDenebWithValue": executionPayloadWithValueFixtureDeneb,
"ValidPayloadStatus": validStatus,
"InvalidBlockHashStatus": inValidBlockHashStatus,
"AcceptedStatus": acceptedStatus,
@@ -1656,126 +1496,6 @@ func Test_fullPayloadFromExecutionBlock(t *testing.T) {
}
}
func Test_fullPayloadFromExecutionBlockCapella(t *testing.T) {
type args struct {
header *pb.ExecutionPayloadHeaderCapella
block *pb.ExecutionBlock
}
wantedHash := common.BytesToHash([]byte("foo"))
tests := []struct {
name string
args args
want func() interfaces.ExecutionData
err string
}{
{
name: "block hash field in header and block hash mismatch",
args: args{
header: &pb.ExecutionPayloadHeaderCapella{
BlockHash: []byte("foo"),
},
block: &pb.ExecutionBlock{
Version: version.Capella,
Hash: common.BytesToHash([]byte("bar")),
},
},
err: "does not match execution block hash",
},
{
name: "ok",
args: args{
header: &pb.ExecutionPayloadHeaderCapella{
BlockHash: wantedHash[:],
},
block: &pb.ExecutionBlock{
Version: version.Capella,
Hash: wantedHash,
},
},
want: func() interfaces.ExecutionData {
p, err := blocks.WrappedExecutionPayloadCapella(&pb.ExecutionPayloadCapella{
BlockHash: wantedHash[:],
Transactions: [][]byte{},
}, 0)
require.NoError(t, err)
return p
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
wrapped, err := blocks.WrappedExecutionPayloadHeaderCapella(tt.args.header, 0)
require.NoError(t, err)
got, err := fullPayloadFromExecutionBlock(wrapped, tt.args.block)
if err != nil {
assert.ErrorContains(t, tt.err, err)
} else {
assert.DeepEqual(t, tt.want(), got)
}
})
}
}
func Test_fullPayloadFromExecutionBlockDeneb(t *testing.T) {
type args struct {
header *pb.ExecutionPayloadHeaderDeneb
block *pb.ExecutionBlock
}
wantedHash := common.BytesToHash([]byte("foo"))
tests := []struct {
name string
args args
want func() interfaces.ExecutionData
err string
}{
{
name: "block hash field in header and block hash mismatch",
args: args{
header: &pb.ExecutionPayloadHeaderDeneb{
BlockHash: []byte("foo"),
},
block: &pb.ExecutionBlock{
Version: version.Deneb,
Hash: common.BytesToHash([]byte("bar")),
},
},
err: "does not match execution block hash",
},
{
name: "ok",
args: args{
header: &pb.ExecutionPayloadHeaderDeneb{
BlockHash: wantedHash[:],
},
block: &pb.ExecutionBlock{
Version: version.Deneb,
Hash: wantedHash,
},
},
want: func() interfaces.ExecutionData {
p, err := blocks.WrappedExecutionPayloadDeneb(&pb.ExecutionPayloadDeneb{
BlockHash: wantedHash[:],
Transactions: [][]byte{},
}, 0)
require.NoError(t, err)
return p
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
wrapped, err := blocks.WrappedExecutionPayloadHeaderDeneb(tt.args.header, 0)
require.NoError(t, err)
got, err := fullPayloadFromExecutionBlock(wrapped, tt.args.block)
if err != nil {
assert.ErrorContains(t, tt.err, err)
} else {
assert.DeepEqual(t, tt.want(), got)
}
})
}
}
func TestHeaderByHash_NotFound(t *testing.T) {
srv := &Service{}
srv.rpcClient = RPCClientBad{}
@@ -2099,40 +1819,6 @@ func newPayloadV2Setup(t *testing.T, status *pb.PayloadStatus, payload *pb.Execu
return service
}
func newPayloadV3Setup(t *testing.T, status *pb.PayloadStatus, payload *pb.ExecutionPayloadDeneb) *Service {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
defer func() {
require.NoError(t, r.Body.Close())
}()
enc, err := io.ReadAll(r.Body)
require.NoError(t, err)
jsonRequestString := string(enc)
reqArg, err := json.Marshal(payload)
require.NoError(t, err)
// We expect the JSON string RPC request contains the right arguments.
require.Equal(t, true, strings.Contains(
jsonRequestString, string(reqArg),
))
resp := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"result": status,
}
err = json.NewEncoder(w).Encode(resp)
require.NoError(t, err)
}))
rpcClient, err := rpc.DialHTTP(srv.URL)
require.NoError(t, err)
service := &Service{}
service.rpcClient = rpcClient
return service
}
func TestCapella_PayloadBodiesByHash(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
EnableOptionalEngineMethods: true,

View File

@@ -23,7 +23,6 @@ go_library(
"//consensus-types/payload-attribute:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//time/slots:go_default_library",

View File

@@ -14,7 +14,6 @@ import (
payloadattribute "github.com/prysmaticlabs/prysm/v4/consensus-types/payload-attribute"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/math"
pb "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
@@ -26,7 +25,6 @@ type EngineClient struct {
ForkChoiceUpdatedResp []byte
ExecutionPayload *pb.ExecutionPayload
ExecutionPayloadCapella *pb.ExecutionPayloadCapella
ExecutionPayloadDeneb *pb.ExecutionPayloadDeneb
ExecutionBlock *pb.ExecutionBlock
Err error
ErrLatestExecBlock error
@@ -41,11 +39,10 @@ type EngineClient struct {
TerminalBlockHashExists bool
OverrideValidHash [32]byte
BlockValue uint64
BlobsBundle *pb.BlobsBundle
}
// NewPayload --
func (e *EngineClient) NewPayload(_ context.Context, _ interfaces.ExecutionData, _ [][32]byte) ([]byte, error) {
func (e *EngineClient) NewPayload(_ context.Context, _ interfaces.ExecutionData) ([]byte, error) {
return e.NewPayloadResp, e.ErrNewPayload
}
@@ -60,26 +57,15 @@ func (e *EngineClient) ForkchoiceUpdated(
}
// GetPayload --
func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, s primitives.Slot) (interfaces.ExecutionData, *pb.BlobsBundle, error) {
if slots.ToEpoch(s) >= params.BeaconConfig().DenebForkEpoch {
ed, err := blocks.WrappedExecutionPayloadDeneb(e.ExecutionPayloadDeneb, math.Gwei(e.BlockValue))
if err != nil {
return nil, nil, err
}
return ed, e.BlobsBundle, nil
}
func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, s primitives.Slot) (interfaces.ExecutionData, error) {
if slots.ToEpoch(s) >= params.BeaconConfig().CapellaForkEpoch {
ed, err := blocks.WrappedExecutionPayloadCapella(e.ExecutionPayloadCapella, math.Gwei(e.BlockValue))
if err != nil {
return nil, nil, err
}
return ed, nil, nil
return blocks.WrappedExecutionPayloadCapella(e.ExecutionPayloadCapella, e.BlockValue)
}
p, err := blocks.WrappedExecutionPayload(e.ExecutionPayload)
if err != nil {
return nil, nil, err
return nil, err
}
return p, nil, e.ErrGetPayload
return p, e.ErrGetPayload
}
// ExchangeTransitionConfiguration --

View File

@@ -39,7 +39,7 @@ func New() *ForkChoice {
b := make([]uint64, 0)
v := make([]Vote, 0)
return &ForkChoice{store: s, balances: b, votes: v}
return &ForkChoice{store: s, balances: b, votes: v, fcLock: new(fcLock)}
}
// NodeCount returns the current number of nodes in the Store.

View File

@@ -1,7 +1,11 @@
package doublylinkedtree
import (
"bytes"
"runtime/debug"
"runtime/pprof"
"sync"
"time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
@@ -11,7 +15,7 @@ import (
// ForkChoice defines the overall fork choice store which includes all block nodes, validator's latest votes and balances.
type ForkChoice struct {
sync.RWMutex
*fcLock
store *Store
votes []Vote // tracks individual validator's last vote.
balances []uint64 // tracks individual validator's balances last accounted in votes.
@@ -68,3 +72,52 @@ type Vote struct {
nextRoot [fieldparams.RootLength]byte // next voting root.
nextEpoch primitives.Epoch // epoch of next voting period.
}
type fcLock struct {
lk sync.RWMutex
t time.Time
currChan chan int
}
func (f *fcLock) Lock() {
f.lk.Lock()
f.t = time.Now()
f.currChan = make(chan int)
go func(t time.Time, c chan int) {
tim := time.NewTimer(3 * time.Second)
select {
case <-c:
tim.Stop()
case <-tim.C:
tim.Stop()
pfile := pprof.Lookup("goroutine")
bf := bytes.NewBuffer([]byte{})
err := pfile.WriteTo(bf, 1)
_ = err
log.Warnf("FC lock is taking longer than 3 seconds with the complete stack of %s", bf.String())
}
}(time.Now(), f.currChan)
}
func (f *fcLock) Unlock() {
t := time.Since(f.t)
f.t = time.Time{}
close(f.currChan)
f.lk.Unlock()
if t > time.Second {
log.Warnf("FC lock is taking longer than 1 second: %s with the complete stack of %s", t.String(), string(debug.Stack()))
}
}
func (f *fcLock) RLock() {
t := time.Now()
f.lk.RLock()
dt := time.Since(t)
if dt > time.Second {
log.Warnf("FC Rlock is taking longer than 1 second: %s with stack %s", dt.String(), string(debug.Stack()))
}
}
func (f *fcLock) RUnlock() {
f.lk.RUnlock()
}

View File

@@ -158,7 +158,8 @@ func TestConfigureNetwork_ConfigFile(t *testing.T) {
return cmd.LoadFlagsFromConfig(cliCtx, comFlags)
},
Action: func(cliCtx *cli.Context) error {
require.Equal(t, true, cliCtx.IsSet(cmd.BootstrapNode.Name))
//TODO: https://github.com/urfave/cli/issues/1197 right now does not set flag
require.Equal(t, false, cliCtx.IsSet(cmd.BootstrapNode.Name))
require.Equal(t, strings.Join([]string{"node1", "node2"}, ","),
strings.Join(cliCtx.StringSlice(cmd.BootstrapNode.Name), ","))

View File

@@ -230,8 +230,8 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
return nil, err
}
log.Debugln("Registering Deterministic Genesis Service")
if err := beacon.registerDeterministicGenesisService(); err != nil {
log.Debugln("Registering Determinstic Genesis Service")
if err := beacon.registerDeterminsticGenesisService(); err != nil {
return nil, err
}
@@ -924,7 +924,7 @@ func (b *BeaconNode) registerGRPCGateway(router *mux.Router) error {
return b.services.RegisterService(g)
}
func (b *BeaconNode) registerDeterministicGenesisService() error {
func (b *BeaconNode) registerDeterminsticGenesisService() error {
genesisTime := b.cliCtx.Uint64(flags.InteropGenesisTimeFlag.Name)
genesisValidators := b.cliCtx.Uint64(flags.InteropNumValidatorsFlag.Name)

View File

@@ -47,7 +47,6 @@ go_test(
deps = [
"//async:go_default_library",
"//beacon-chain/operations/attestations/kv:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//crypto/bls:go_default_library",

View File

@@ -14,7 +14,6 @@ go_library(
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/hash:go_default_library",
@@ -40,8 +39,8 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",

View File

@@ -2,12 +2,9 @@ package kv
import (
"context"
"runtime"
"sync"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
attaggregation "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation/aggregation/attestations"
@@ -26,11 +23,21 @@ func (c *AttCaches) AggregateUnaggregatedAttestations(ctx context.Context) error
if err != nil {
return err
}
return c.aggregateUnaggregatedAtts(ctx, unaggregatedAtts)
return c.aggregateUnaggregatedAttestations(ctx, unaggregatedAtts)
}
func (c *AttCaches) aggregateUnaggregatedAtts(ctx context.Context, unaggregatedAtts []*ethpb.Attestation) error {
_, span := trace.StartSpan(ctx, "operations.attestations.kv.aggregateUnaggregatedAtts")
// AggregateUnaggregatedAttestationsBySlotIndex aggregates the unaggregated attestations and saves
// newly aggregated attestations in the pool. Unaggregated attestations are filtered by slot and
// committee index.
func (c *AttCaches) AggregateUnaggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) error {
ctx, span := trace.StartSpan(ctx, "operations.attestations.kv.AggregateUnaggregatedAttestationsBySlotIndex")
defer span.End()
unaggregatedAtts := c.UnaggregatedAttestationsBySlotIndex(ctx, slot, committeeIndex)
return c.aggregateUnaggregatedAttestations(ctx, unaggregatedAtts)
}
func (c *AttCaches) aggregateUnaggregatedAttestations(ctx context.Context, unaggregatedAtts []*ethpb.Attestation) error {
ctx, span := trace.StartSpan(ctx, "operations.attestations.kv.aggregateUnaggregatedAttestations")
defer span.End()
attsByDataRoot := make(map[[32]byte][]*ethpb.Attestation, len(unaggregatedAtts))
@@ -45,32 +52,26 @@ func (c *AttCaches) aggregateUnaggregatedAtts(ctx context.Context, unaggregatedA
// Aggregate unaggregated attestations from the pool and save them in the pool.
// Track the unaggregated attestations that aren't able to aggregate.
leftOverUnaggregatedAtt := make(map[[32]byte]bool)
if features.Get().AggregateParallel {
leftOverUnaggregatedAtt = c.aggregateParallel(attsByDataRoot, leftOverUnaggregatedAtt)
} else {
for _, atts := range attsByDataRoot {
aggregated, err := attaggregation.AggregateDisjointOneBitAtts(atts)
for _, atts := range attsByDataRoot {
aggregated, err := attaggregation.AggregateDisjointOneBitAtts(atts)
if err != nil {
return errors.Wrap(err, "could not aggregate unaggregated attestations")
}
if aggregated == nil {
return errors.New("could not aggregate unaggregated attestations")
}
if helpers.IsAggregated(aggregated) {
if err := c.SaveAggregatedAttestations([]*ethpb.Attestation{aggregated}); err != nil {
return err
}
} else {
h, err := hashFn(aggregated)
if err != nil {
return errors.Wrap(err, "could not aggregate unaggregated attestations")
}
if aggregated == nil {
return errors.New("could not aggregate unaggregated attestations")
}
if helpers.IsAggregated(aggregated) {
if err := c.SaveAggregatedAttestations([]*ethpb.Attestation{aggregated}); err != nil {
return err
}
} else {
h, err := hashFn(aggregated)
if err != nil {
return err
}
leftOverUnaggregatedAtt[h] = true
return err
}
leftOverUnaggregatedAtt[h] = true
}
}
// Remove the unaggregated attestations from the pool that were successfully aggregated.
for _, att := range unaggregatedAtts {
h, err := hashFn(att)
@@ -87,58 +88,6 @@ func (c *AttCaches) aggregateUnaggregatedAtts(ctx context.Context, unaggregatedA
return nil
}
// aggregateParallel aggregates attestations in parallel for `atts` and saves them in the pool,
// returns the unaggregated attestations that weren't able to aggregate.
// Given `n` CPU cores, it creates a channel of size `n` and spawns `n` goroutines to aggregate attestations
func (c *AttCaches) aggregateParallel(atts map[[32]byte][]*ethpb.Attestation, leftOver map[[32]byte]bool) map[[32]byte]bool {
var leftoverLock sync.Mutex
wg := sync.WaitGroup{}
n := runtime.GOMAXPROCS(0) // defaults to the value of runtime.NumCPU
ch := make(chan []*ethpb.Attestation, n)
wg.Add(n)
for i := 0; i < n; i++ {
go func() {
defer wg.Done()
for as := range ch {
aggregated, err := attaggregation.AggregateDisjointOneBitAtts(as)
if err != nil {
log.WithError(err).Error("could not aggregate unaggregated attestations")
continue
}
if aggregated == nil {
log.Error("nil aggregated attestation")
continue
}
if helpers.IsAggregated(aggregated) {
if err := c.SaveAggregatedAttestations([]*ethpb.Attestation{aggregated}); err != nil {
log.WithError(err).Error("could not save aggregated attestation")
continue
}
} else {
h, err := hashFn(aggregated)
if err != nil {
log.WithError(err).Error("could not hash attestation")
continue
}
leftoverLock.Lock()
leftOver[h] = true
leftoverLock.Unlock()
}
}
}()
}
for _, as := range atts {
ch <- as
}
close(ch)
wg.Wait()
return leftOver
}
// SaveAggregatedAttestation saves an aggregated attestation in cache.
func (c *AttCaches) SaveAggregatedAttestation(att *ethpb.Attestation) error {
if err := helpers.ValidateNilAttestation(att); err != nil {
@@ -216,7 +165,7 @@ func (c *AttCaches) AggregatedAttestations() []*ethpb.Attestation {
// AggregatedAttestationsBySlotIndex returns the aggregated attestations in cache,
// filtered by committee index and slot.
func (c *AttCaches) AggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []*ethpb.Attestation {
_, span := trace.StartSpan(ctx, "operations.attestations.kv.AggregatedAttestationsBySlotIndex")
ctx, span := trace.StartSpan(ctx, "operations.attestations.kv.AggregatedAttestationsBySlotIndex")
defer span.End()
atts := make([]*ethpb.Attestation, 0)

View File

@@ -9,7 +9,7 @@ import (
"github.com/pkg/errors"
fssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
@@ -18,11 +18,6 @@ import (
)
func TestKV_Aggregated_AggregateUnaggregatedAttestations(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
AggregateParallel: true,
})
defer resetFn()
cache := NewAttCaches()
priv, err := bls.RandKey()
require.NoError(t, err)
@@ -44,6 +39,61 @@ func TestKV_Aggregated_AggregateUnaggregatedAttestations(t *testing.T) {
require.Equal(t, 1, len(cache.AggregatedAttestationsBySlotIndex(context.Background(), 2, 0)), "Did not aggregate correctly")
}
func TestKV_Aggregated_AggregateUnaggregatedAttestationsBySlotIndex(t *testing.T) {
cache := NewAttCaches()
genData := func(slot primitives.Slot, committeeIndex primitives.CommitteeIndex) *ethpb.AttestationData {
return util.HydrateAttestationData(&ethpb.AttestationData{
Slot: slot,
CommitteeIndex: committeeIndex,
})
}
genSign := func() []byte {
priv, err := bls.RandKey()
require.NoError(t, err)
return priv.Sign([]byte{'a'}).Marshal()
}
atts := []*ethpb.Attestation{
// The first slot.
{AggregationBits: bitfield.Bitlist{0b1001}, Data: genData(1, 2), Signature: genSign()},
{AggregationBits: bitfield.Bitlist{0b1010}, Data: genData(1, 2), Signature: genSign()},
{AggregationBits: bitfield.Bitlist{0b1100}, Data: genData(1, 2), Signature: genSign()},
{AggregationBits: bitfield.Bitlist{0b1001}, Data: genData(1, 3), Signature: genSign()},
{AggregationBits: bitfield.Bitlist{0b1100}, Data: genData(1, 3), Signature: genSign()},
// The second slot.
{AggregationBits: bitfield.Bitlist{0b1001}, Data: genData(2, 3), Signature: genSign()},
{AggregationBits: bitfield.Bitlist{0b1010}, Data: genData(2, 3), Signature: genSign()},
{AggregationBits: bitfield.Bitlist{0b1100}, Data: genData(2, 4), Signature: genSign()},
}
ctx := context.Background()
// Make sure that no error is produced if aggregation is requested on empty unaggregated list.
require.NoError(t, cache.AggregateUnaggregatedAttestationsBySlotIndex(ctx, 1, 2))
require.NoError(t, cache.AggregateUnaggregatedAttestationsBySlotIndex(ctx, 2, 3))
require.Equal(t, 0, len(cache.UnaggregatedAttestationsBySlotIndex(ctx, 1, 2)))
require.Equal(t, 0, len(cache.AggregatedAttestationsBySlotIndex(ctx, 1, 2)), "Did not aggregate correctly")
require.Equal(t, 0, len(cache.UnaggregatedAttestationsBySlotIndex(ctx, 1, 3)))
require.Equal(t, 0, len(cache.AggregatedAttestationsBySlotIndex(ctx, 1, 3)), "Did not aggregate correctly")
// Persist unaggregated attestations, and aggregate on per slot/committee index base.
require.NoError(t, cache.SaveUnaggregatedAttestations(atts))
require.NoError(t, cache.AggregateUnaggregatedAttestationsBySlotIndex(ctx, 1, 2))
require.NoError(t, cache.AggregateUnaggregatedAttestationsBySlotIndex(ctx, 2, 3))
// Committee attestations at a slot should be aggregated.
require.Equal(t, 0, len(cache.UnaggregatedAttestationsBySlotIndex(ctx, 1, 2)))
require.Equal(t, 1, len(cache.AggregatedAttestationsBySlotIndex(ctx, 1, 2)), "Did not aggregate correctly")
// Committee attestations haven't been aggregated.
require.Equal(t, 2, len(cache.UnaggregatedAttestationsBySlotIndex(ctx, 1, 3)))
require.Equal(t, 0, len(cache.AggregatedAttestationsBySlotIndex(ctx, 1, 3)), "Did not aggregate correctly")
// Committee at a second slot is aggregated.
require.Equal(t, 0, len(cache.UnaggregatedAttestationsBySlotIndex(ctx, 2, 3)))
require.Equal(t, 1, len(cache.AggregatedAttestationsBySlotIndex(ctx, 2, 3)), "Did not aggregate correctly")
// The second committee at second slot is not aggregated.
require.Equal(t, 1, len(cache.UnaggregatedAttestationsBySlotIndex(ctx, 2, 4)))
require.Equal(t, 0, len(cache.AggregatedAttestationsBySlotIndex(ctx, 2, 4)), "Did not aggregate correctly")
}
func TestKV_Aggregated_SaveAggregatedAttestation(t *testing.T) {
tests := []struct {
name string

View File

@@ -15,6 +15,7 @@ import (
type Pool interface {
// For Aggregated attestations
AggregateUnaggregatedAttestations(ctx context.Context) error
AggregateUnaggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) error
SaveAggregatedAttestation(att *ethpb.Attestation) error
SaveAggregatedAttestations(atts []*ethpb.Attestation) error
AggregatedAttestations() []*ethpb.Attestation

View File

@@ -7,7 +7,6 @@ import (
"testing"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
attaggregation "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation/aggregation/attestations"
@@ -18,11 +17,6 @@ import (
)
func TestBatchAttestations_Multiple(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
AggregateParallel: true,
})
defer resetFn()
s, err := NewService(context.Background(), &Config{Pool: NewPool()})
require.NoError(t, err)

View File

@@ -202,67 +202,6 @@ func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMs
}
}
// BroadcastBlob broadcasts a blob to the p2p network, the message is assumed to be
// broadcasted to the current fork and to the input subnet.
func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.SignedBlobSidecar) error {
ctx, span := trace.StartSpan(ctx, "p2p.BroadcastBlob")
defer span.End()
if blob == nil {
return errors.New("attempted to broadcast nil blob sidecar")
}
forkDigest, err := s.currentForkDigest()
if err != nil {
err := errors.Wrap(err, "could not retrieve fork digest")
tracing.AnnotateError(span, err)
return err
}
// Non-blocking broadcast, with attempts to discover a subnet peer if none available.
go s.broadcastBlob(ctx, subnet, blob, forkDigest)
return nil
}
func (s *Service) broadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.SignedBlobSidecar, forkDigest [4]byte) {
_, span := trace.StartSpan(ctx, "p2p.broadcastBlob")
defer span.End()
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
oneSlot := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
ctx, cancel := context.WithTimeout(ctx, oneSlot)
defer cancel()
wrappedSubIdx := subnet + blobSubnetLockerVal
s.subnetLocker(wrappedSubIdx).RLock()
hasPeer := s.hasPeerWithSubnet(blobSubnetToTopic(subnet, forkDigest))
s.subnetLocker(wrappedSubIdx).RUnlock()
if !hasPeer {
blobSidecarCommitteeBroadcastAttempts.Inc()
if err := func() error {
s.subnetLocker(wrappedSubIdx).Lock()
defer s.subnetLocker(wrappedSubIdx).Unlock()
ok, err := s.FindPeersWithSubnet(ctx, blobSubnetToTopic(subnet, forkDigest), subnet, 1)
if err != nil {
return err
}
if ok {
blobSidecarCommitteeBroadcasts.Inc()
return nil
}
return errors.New("failed to find peers for subnet")
}(); err != nil {
log.WithError(err).Error("Failed to find peers")
tracing.AnnotateError(span, err)
}
}
if err := s.broadcastObject(ctx, blobSidecar, blobSubnetToTopic(subnet, forkDigest)); err != nil {
log.WithError(err).Error("Failed to broadcast blob sidecar")
tracing.AnnotateError(span, err)
}
}
// method to broadcast messages to other peers in our gossip mesh.
func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic string) error {
ctx, span := trace.StartSpan(ctx, "p2p.broadcastObject")
@@ -299,7 +238,3 @@ func attestationToTopic(subnet uint64, forkDigest [4]byte) string {
func syncCommitteeToTopic(subnet uint64, forkDigest [4]byte) string {
return fmt.Sprintf(SyncCommitteeSubnetTopicFormat, forkDigest, subnet)
}
func blobSubnetToTopic(subnet uint64, forkDigest [4]byte) string {
return fmt.Sprintf(BlobSubnetTopicFormat, forkDigest, subnet)
}

View File

@@ -17,7 +17,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/scorers"
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/consensus-types/wrapper"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
@@ -448,76 +447,3 @@ func TestService_BroadcastSyncCommittee(t *testing.T) {
t.Error("Failed to receive pubsub within 1s")
}
}
func TestService_BroadcastBlob(t *testing.T) {
p1 := p2ptest.NewTestP2P(t)
p2 := p2ptest.NewTestP2P(t)
p1.Connect(p2)
require.NotEqual(t, 0, len(p1.BHost.Network().Peers()), "No peers")
p := &Service{
host: p1.BHost,
pubsub: p1.PubSub(),
joinedTopics: map[string]*pubsub.Topic{},
cfg: &Config{},
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
subnetsLock: make(map[uint64]*sync.RWMutex),
subnetsLockLock: sync.Mutex{},
peers: peers.NewStatus(context.Background(), &peers.StatusConfig{
ScorerParams: &scorers.Config{},
}),
}
blobSidecar := &ethpb.SignedBlobSidecar{
Message: &ethpb.BlobSidecar{
BlockRoot: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength),
Index: 1,
Slot: 2,
BlockParentRoot: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength),
ProposerIndex: 3,
Blob: bytesutil.PadTo([]byte{'C'}, fieldparams.BlobLength),
KzgCommitment: bytesutil.PadTo([]byte{'D'}, fieldparams.BLSPubkeyLength),
KzgProof: bytesutil.PadTo([]byte{'E'}, fieldparams.BLSPubkeyLength),
},
Signature: bytesutil.PadTo([]byte{'F'}, fieldparams.BLSSignatureLength),
}
subnet := uint64(0)
topic := BlobSubnetTopicFormat
GossipTypeMapping[reflect.TypeOf(blobSidecar)] = topic
digest, err := p.currentForkDigest()
require.NoError(t, err)
topic = fmt.Sprintf(topic, digest, subnet)
// External peer subscribes to the topic.
topic += p.Encoding().ProtocolSuffix()
sub, err := p2.SubscribeToTopic(topic)
require.NoError(t, err)
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
// Async listen for the pubsub, must be before the broadcast.
var wg sync.WaitGroup
wg.Add(1)
go func(tt *testing.T) {
defer wg.Done()
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
incomingMessage, err := sub.Next(ctx)
require.NoError(t, err)
result := &ethpb.SignedBlobSidecar{}
require.NoError(t, p.Encoding().DecodeGossip(incomingMessage.Data, result))
require.DeepEqual(t, result, blobSidecar)
}(t)
// Attempt to broadcast nil object should fail.
ctx := context.Background()
require.ErrorContains(t, "attempted to broadcast nil", p.BroadcastBlob(ctx, subnet, nil))
// Broadcast to peers and wait.
require.NoError(t, p.BroadcastBlob(ctx, subnet, blobSidecar))
require.Equal(t, false, util.WaitTimeout(&wg, 1*time.Second), "Failed to receive pubsub within 1s")
}

View File

@@ -21,7 +21,6 @@ var gossipTopicMappings = map[string]proto.Message{
SyncContributionAndProofSubnetTopicFormat: &ethpb.SignedContributionAndProof{},
SyncCommitteeSubnetTopicFormat: &ethpb.SyncCommitteeMessage{},
BlsToExecutionChangeSubnetTopicFormat: &ethpb.SignedBLSToExecutionChange{},
BlobSubnetTopicFormat: &ethpb.SignedBlobSidecar{},
}
// GossipTopicMappings is a function to return the assigned data type
@@ -63,8 +62,6 @@ func init() {
GossipTypeMapping[reflect.TypeOf(&ethpb.SignedBeaconBlockAltair{})] = BlockSubnetTopicFormat
// Specially handle Bellatrix objects.
GossipTypeMapping[reflect.TypeOf(&ethpb.SignedBeaconBlockBellatrix{})] = BlockSubnetTopicFormat
// Specially handle Capella objects.
// Specially handle Capella objects
GossipTypeMapping[reflect.TypeOf(&ethpb.SignedBeaconBlockCapella{})] = BlockSubnetTopicFormat
// Specially handle Deneb objects.
GossipTypeMapping[reflect.TypeOf(&ethpb.SignedBeaconBlockDeneb{})] = BlockSubnetTopicFormat
}

View File

@@ -35,7 +35,6 @@ type Broadcaster interface {
Broadcast(context.Context, proto.Message) error
BroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation) error
BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage) error
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.SignedBlobSidecar) error
}
// SetStreamHandler configures p2p to handle streams of a certain topic ID.

View File

@@ -60,18 +60,10 @@ var (
"the subnet. The beacon node increments this counter when the broadcast is blocked " +
"until a subnet peer can be found.",
})
blobSidecarCommitteeBroadcasts = promauto.NewCounter(prometheus.CounterOpts{
Name: "p2p_blob_sidecar_committee_broadcasts",
Help: "The number of blob sidecar committee messages that were broadcast with no peer on.",
})
syncCommitteeBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
Name: "p2p_sync_committee_subnet_attempted_broadcasts",
Help: "The number of sync committee that were attempted to be broadcast.",
})
blobSidecarCommitteeBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
Name: "p2p_blob_sidecar_committee_attempted_broadcasts",
Help: "The number of blob sidecar committee messages that were attempted to be broadcast.",
})
// Gossip Tracer Metrics
pubsubTopicsActive = promauto.NewGaugeVec(prometheus.GaugeOpts{

View File

@@ -22,8 +22,6 @@ func TestMain(m *testing.M) {
flags.Init(&flags.GlobalFlags{
BlockBatchLimit: 64,
BlockBatchLimitBurstFactor: 10,
BlobBatchLimit: 8,
BlobBatchLimitBurstFactor: 2,
})
defer func() {
flags.Init(resetFlags)

View File

@@ -57,17 +57,12 @@ func (s *Service) CanSubscribe(topic string) bool {
log.WithError(err).Error("Could not determine Capella fork digest")
return false
}
denebForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, s.genesisValidatorsRoot)
if err != nil {
log.WithError(err).Error("Could not determine Deneb fork digest")
return false
}
switch parts[2] {
case fmt.Sprintf("%x", phase0ForkDigest):
case fmt.Sprintf("%x", altairForkDigest):
case fmt.Sprintf("%x", bellatrixForkDigest):
case fmt.Sprintf("%x", capellaForkDigest):
case fmt.Sprintf("%x", denebForkDigest):
default:
return false
}

View File

@@ -90,7 +90,7 @@ func TestService_CanSubscribe(t *testing.T) {
formatting := []interface{}{digest}
// Special case for attestation subnets which have a second formatting placeholder.
if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat || topic == BlobSubnetTopicFormat {
if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat {
formatting = append(formatting, 0 /* some subnet ID */)
}

View File

@@ -37,12 +37,6 @@ const PingMessageName = "/ping"
// MetadataMessageName specifies the name for the metadata message topic.
const MetadataMessageName = "/metadata"
// BlobSidecarsByRangeName is the name for the BlobSidecarsByRange v1 message topic.
const BlobSidecarsByRangeName = "/blob_sidecars_by_range"
// BlobSidecarsByRootName is the name for the BlobSidecarsByRoot v1 message topic.
const BlobSidecarsByRootName = "/blob_sidecars_by_root"
const (
// V1 RPC Topics
// RPCStatusTopicV1 defines the v1 topic for the status rpc method.
@@ -58,14 +52,6 @@ const (
// RPCMetaDataTopicV1 defines the v1 topic for the metadata rpc method.
RPCMetaDataTopicV1 = protocolPrefix + MetadataMessageName + SchemaVersionV1
// RPCBlobSidecarsByRangeTopicV1 is a topic for requesting blob sidecars
// in the slot range [start_slot, start_slot + count), leading up to the current head block as selected by fork choice.
// Protocol ID: /eth2/beacon_chain/req/blob_sidecars_by_range/1/ - New in deneb.
RPCBlobSidecarsByRangeTopicV1 = protocolPrefix + BlobSidecarsByRangeName + SchemaVersionV1
// RPCBlobSidecarsByRootTopicV1 is a topic for requesting blob sidecars by their block root. New in deneb.
// /eth2/beacon_chain/req/blob_sidecars_by_root/1/
RPCBlobSidecarsByRootTopicV1 = protocolPrefix + BlobSidecarsByRootName + SchemaVersionV1
// V2 RPC Topics
// RPCBlocksByRangeTopicV2 defines v2 the topic for the blocks by range rpc method.
RPCBlocksByRangeTopicV2 = protocolPrefix + BeaconBlocksByRangeMessageName + SchemaVersionV2
@@ -97,10 +83,6 @@ var RPCTopicMappings = map[string]interface{}{
// RPC Metadata Message
RPCMetaDataTopicV1: new(interface{}),
RPCMetaDataTopicV2: new(interface{}),
// BlobSidecarsByRange v1 Message
RPCBlobSidecarsByRangeTopicV1: new(pb.BlobSidecarsByRangeRequest),
// BlobSidecarsByRoot v1 Message
RPCBlobSidecarsByRootTopicV1: new(p2ptypes.BlobSidecarsByRootReq),
}
// Maps all registered protocol prefixes.
@@ -117,8 +99,6 @@ var messageMapping = map[string]bool{
BeaconBlocksByRootsMessageName: true,
PingMessageName: true,
MetadataMessageName: true,
BlobSidecarsByRangeName: true,
BlobSidecarsByRootName: true,
}
// Maps all the RPC messages which are to updated in altair.
@@ -133,19 +113,6 @@ var versionMapping = map[string]bool{
SchemaVersionV2: true,
}
// OmitContextBytesV1 keeps track of which RPC methods do not write context bytes in their v1 incarnations.
// Phase0 did not have the notion of context bytes, which prefix wire-encoded values with a [4]byte identifier
// to convey the schema for the receiver to use. These RPCs had a version bump to V2 when the context byte encoding
// was introduced. For other RPC methods, context bytes are always required.
var OmitContextBytesV1 = map[string]bool{
StatusMessageName: true,
GoodbyeMessageName: true,
BeaconBlocksByRangeMessageName: true,
BeaconBlocksByRootsMessageName: true,
PingMessageName: true,
MetadataMessageName: true,
}
// VerifyTopicMapping verifies that the topic and its accompanying
// message type is correct.
func VerifyTopicMapping(topic string, msg interface{}) error {

View File

@@ -31,13 +31,6 @@ var syncCommsSubnetEnrKey = params.BeaconNetworkConfig().SyncCommsSubnetKey
// chosen as more than 64(attestation subnet count).
const syncLockerVal = 100
// The value used with the blob sidecar subnet, in order
// to create an appropriate key to retrieve
// the relevant lock. This is used to differentiate
// blob subnets from others. This is deliberately
// chosen more than sync and attestation subnet combined.
const blobSubnetLockerVal = 110
// FindPeersWithSubnet performs a network search for peers
// subscribed to a particular subnet. Then we try to connect
// with those peers. This method will block until the required amount of

View File

@@ -143,11 +143,6 @@ func (_ *FakeP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *
return nil
}
// BroadcastBlob -- fake.
func (_ *FakeP2P) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.SignedBlobSidecar) error {
return nil
}
// InterceptPeerDial -- fake.
func (_ *FakeP2P) InterceptPeerDial(peer.ID) (allow bool) {
return true

View File

@@ -33,9 +33,3 @@ func (m *MockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ uin
m.BroadcastCalled = true
return nil
}
// BroadcastBlob broadcasts a blob for mock.
func (m *MockBroadcaster) BroadcastBlob(context.Context, uint64, *ethpb.SignedBlobSidecar) error {
m.BroadcastCalled = true
return nil
}

View File

@@ -176,12 +176,6 @@ func (p *TestP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *
return nil
}
// BroadcastBlob broadcasts a blob for mock.
func (p *TestP2P) BroadcastBlob(context.Context, uint64, *ethpb.SignedBlobSidecar) error {
p.BroadcastCalled = true
return nil
}
// SetStreamHandler for RPC.
func (p *TestP2P) SetStreamHandler(topic string, handler network.StreamHandler) {
p.BHost.SetStreamHandler(protocol.ID(topic), handler)

View File

@@ -28,8 +28,7 @@ const (
GossipContributionAndProofMessage = "sync_committee_contribution_and_proof"
// GossipBlsToExecutionChangeMessage is the name for the bls to execution change message type.
GossipBlsToExecutionChangeMessage = "bls_to_execution_change"
// GossipBlobSidecarMessage is the name for the blob sidecar message type.
GossipBlobSidecarMessage = "blob_sidecar"
// Topic Formats
//
// AttestationSubnetTopicFormat is the topic format for the attestation subnet.
@@ -50,6 +49,4 @@ const (
SyncContributionAndProofSubnetTopicFormat = GossipProtocolAndDigest + GossipContributionAndProofMessage
// BlsToExecutionChangeSubnetTopicFormat is the topic format for the bls to execution change subnet.
BlsToExecutionChangeSubnetTopicFormat = GossipProtocolAndDigest + GossipBlsToExecutionChangeMessage
// BlobSubnetTopicFormat is the topic format for the blob subnet.
BlobSubnetTopicFormat = GossipProtocolAndDigest + GossipBlobSidecarMessage + "_%d"
)

View File

@@ -41,9 +41,7 @@ go_test(
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
],
)

View File

@@ -12,6 +12,4 @@ var (
ErrRateLimited = errors.New("rate limited")
ErrIODeadline = errors.New("i/o deadline exceeded")
ErrInvalidRequest = errors.New("invalid range, step or count")
ErrBlobLTMinRequest = errors.New("blob slot < minimum_request_epoch")
ErrMaxBlobReqExceeded = errors.New("requested more than MAX_REQUEST_BLOB_SIDECARS")
)

View File

@@ -4,13 +4,9 @@
package types
import (
"bytes"
"sort"
"github.com/pkg/errors"
ssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/prysm/v4/config/params"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
const rootLength = 32
@@ -68,7 +64,7 @@ func (r *BeaconBlockByRootsReq) UnmarshalSSZ(buf []byte) error {
bufLen := len(buf)
maxLength := int(params.BeaconNetworkConfig().MaxRequestBlocks * rootLength)
if bufLen > maxLength {
return errors.Errorf("expected buffer with length of up to %d but received length %d", maxLength, bufLen)
return errors.Errorf("expected buffer with length of upto %d but received length %d", maxLength, bufLen)
}
if bufLen%rootLength != 0 {
return ssz.ErrIncorrectByteSize
@@ -124,90 +120,3 @@ func (m *ErrorMessage) UnmarshalSSZ(buf []byte) error {
*m = errMsg
return nil
}
// BlobSidecarsByRootReq is used to specify a list of blob targets (root+index) in a BlobSidecarsByRoot RPC request.
type BlobSidecarsByRootReq []*eth.BlobIdentifier
// BlobIdentifier is a fixed size value, so we can compute its fixed size at start time (see init below)
var blobIdSize int
// SizeSSZ returns the size of the serialized representation.
func (b *BlobSidecarsByRootReq) SizeSSZ() int {
return len(*b) * blobIdSize
}
// MarshalSSZTo appends the serialized BlobSidecarsByRootReq value to the provided byte slice.
func (b *BlobSidecarsByRootReq) MarshalSSZTo(dst []byte) ([]byte, error) {
// A List without an enclosing container is marshaled exactly like a vector, no length offset required.
marshalledObj, err := b.MarshalSSZ()
if err != nil {
return nil, err
}
return append(dst, marshalledObj...), nil
}
// MarshalSSZ serializes the BlobSidecarsByRootReq value to a byte slice.
func (b *BlobSidecarsByRootReq) MarshalSSZ() ([]byte, error) {
buf := make([]byte, len(*b)*blobIdSize)
for i, id := range *b {
by, err := id.MarshalSSZ()
if err != nil {
return nil, err
}
copy(buf[i*blobIdSize:(i+1)*blobIdSize], by)
}
return buf, nil
}
// UnmarshalSSZ unmarshals the provided bytes buffer into the
// BlobSidecarsByRootReq value.
func (b *BlobSidecarsByRootReq) UnmarshalSSZ(buf []byte) error {
bufLen := len(buf)
maxLength := int(params.BeaconNetworkConfig().MaxRequestBlobSidecars) * blobIdSize
if bufLen > maxLength {
return errors.Errorf("expected buffer with length of up to %d but received length %d", maxLength, bufLen)
}
if bufLen%blobIdSize != 0 {
return errors.Wrapf(ssz.ErrIncorrectByteSize, "size=%d", bufLen)
}
count := bufLen / blobIdSize
*b = make([]*eth.BlobIdentifier, count)
for i := 0; i < count; i++ {
id := &eth.BlobIdentifier{}
err := id.UnmarshalSSZ(buf[i*blobIdSize : (i+1)*blobIdSize])
if err != nil {
return err
}
(*b)[i] = id
}
return nil
}
var _ sort.Interface = BlobSidecarsByRootReq{}
// Less reports whether the element with index i must sort before the element with index j.
// BlobIdentifier will be sorted in lexicographic order by root, with Blob Index as tiebreaker for a given root.
func (s BlobSidecarsByRootReq) Less(i, j int) bool {
rootCmp := bytes.Compare(s[i].BlockRoot, s[j].BlockRoot)
if rootCmp != 0 {
// They aren't equal; return true if i < j, false if i > j.
return rootCmp < 0
}
// They are equal; blob index is the tie breaker.
return s[i].Index < s[j].Index
}
// Swap swaps the elements with indexes i and j.
func (s BlobSidecarsByRootReq) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// Len is the number of elements in the collection.
func (s BlobSidecarsByRootReq) Len() int {
return len(s)
}
func init() {
sizer := &eth.BlobIdentifier{}
blobIdSize = sizer.SizeSSZ()
}

View File

@@ -4,82 +4,12 @@ import (
"encoding/hex"
"testing"
ssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func generateBlobIdentifiers(n int) []*eth.BlobIdentifier {
r := make([]*eth.BlobIdentifier, n)
for i := 0; i < n; i++ {
r[i] = &eth.BlobIdentifier{
BlockRoot: bytesutil.PadTo([]byte{byte(i)}, 32),
Index: 0,
}
}
return r
}
func TestBlobSidecarsByRootReq_MarshalSSZ(t *testing.T) {
cases := []struct {
name string
ids []*eth.BlobIdentifier
marshalErr error
unmarshalErr error
unmarshalMod func([]byte) []byte
}{
{
name: "empty list",
},
{
name: "single item list",
ids: generateBlobIdentifiers(1),
},
{
name: "10 item list",
ids: generateBlobIdentifiers(10),
},
{
name: "wonky unmarshal size",
ids: generateBlobIdentifiers(10),
unmarshalMod: func(in []byte) []byte {
in = append(in, byte(0))
return in
},
unmarshalErr: ssz.ErrIncorrectByteSize,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
r := BlobSidecarsByRootReq(c.ids)
by, err := r.MarshalSSZ()
if c.marshalErr != nil {
require.ErrorIs(t, err, c.marshalErr)
return
}
require.NoError(t, err)
if c.unmarshalMod != nil {
by = c.unmarshalMod(by)
}
got := &BlobSidecarsByRootReq{}
err = got.UnmarshalSSZ(by)
if c.unmarshalErr != nil {
require.ErrorIs(t, err, c.unmarshalErr)
return
}
require.NoError(t, err)
for i, gid := range *got {
require.DeepEqual(t, c.ids[i], gid)
}
})
}
}
func TestBeaconBlockByRootsReq_Limit(t *testing.T) {
fixedRoots := make([][32]byte, 0)
for i := uint64(0); i < params.BeaconNetworkConfig().MaxRequestBlocks+100; i++ {
@@ -95,7 +25,7 @@ func TestBeaconBlockByRootsReq_Limit(t *testing.T) {
buf = append(buf, rt[:]...)
}
req2 := BeaconBlockByRootsReq(nil)
require.ErrorContains(t, "expected buffer with length of up to", req2.UnmarshalSSZ(buf))
require.ErrorContains(t, "expected buffer with length of upto", req2.UnmarshalSSZ(buf))
}
func TestErrorResponse_Limit(t *testing.T) {

View File

@@ -25,7 +25,6 @@ go_library(
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/rpc/eth/beacon:go_default_library",
"//beacon-chain/rpc/eth/builder:go_default_library",
"//beacon-chain/rpc/eth/debug:go_default_library",
"//beacon-chain/rpc/eth/events:go_default_library",
"//beacon-chain/rpc/eth/node:go_default_library",

View File

@@ -241,35 +241,33 @@ func wrapSignedContributionAndProofsArray(
}
type phase0PublishBlockRequestJson struct {
Phase0Block *SignedBeaconBlockJson `json:"phase0_block"`
Phase0Block *BeaconBlockJson `json:"phase0_block"`
Signature string `json:"signature" hex:"true"`
}
type altairPublishBlockRequestJson struct {
AltairBlock *SignedBeaconBlockAltairJson `json:"altair_block"`
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
Signature string `json:"signature" hex:"true"`
}
type bellatrixPublishBlockRequestJson struct {
BellatrixBlock *SignedBeaconBlockBellatrixJson `json:"bellatrix_block"`
}
type bellatrixPublishBlindedBlockRequestJson struct {
BellatrixBlock *SignedBlindedBeaconBlockBellatrixJson `json:"bellatrix_block"`
BellatrixBlock *BeaconBlockBellatrixJson `json:"bellatrix_block"`
Signature string `json:"signature" hex:"true"`
}
type capellaPublishBlockRequestJson struct {
CapellaBlock *SignedBeaconBlockCapellaJson `json:"capella_block"`
CapellaBlock *BeaconBlockCapellaJson `json:"capella_block"`
Signature string `json:"signature" hex:"true"`
}
type bellatrixPublishBlindedBlockRequestJson struct {
BellatrixBlock *BlindedBeaconBlockBellatrixJson `json:"bellatrix_block"`
Signature string `json:"signature" hex:"true"`
}
type capellaPublishBlindedBlockRequestJson struct {
CapellaBlock *SignedBlindedBeaconBlockCapellaJson `json:"capella_block"`
}
type denebPublishBlockRequestJson struct {
DenebContents *SignedBeaconBlockContentsDenebJson `json:"deneb_contents"`
}
type denebPublishBlindedBlockRequestJson struct {
DenebContents *SignedBlindedBeaconBlockContentsDenebJson `json:"deneb_contents"`
CapellaBlock *BlindedBeaconBlockCapellaJson `json:"capella_block"`
Signature string `json:"signature" hex:"true"`
}
// setInitialPublishBlockPostRequest is triggered before we deserialize the request JSON into a struct.
@@ -282,50 +280,31 @@ func setInitialPublishBlockPostRequest(endpoint *apimiddleware.Endpoint,
req *http.Request,
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
s := struct {
Slot string
Message struct {
Slot string
}
}{}
buf, err := io.ReadAll(req.Body)
if err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not read body")
}
typeParseMap := make(map[string]json.RawMessage)
if err := json.Unmarshal(buf, &typeParseMap); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not parse object")
if err := json.Unmarshal(buf, &s); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not read slot from body")
}
if val, ok := typeParseMap["message"]; ok {
if err := json.Unmarshal(val, &s); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not unmarshal field 'message' ")
}
} else if val, ok := typeParseMap["signed_block"]; ok {
temp := struct {
Message struct {
Slot string
}
}{}
if err := json.Unmarshal(val, &temp); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not unmarshal field 'signed_block' ")
}
s.Slot = temp.Message.Slot
} else {
return false, &apimiddleware.DefaultErrorJson{Message: "could not parse slot from request", Code: http.StatusInternalServerError}
}
slot, err := strconv.ParseUint(s.Slot, 10, 64)
slot, err := strconv.ParseUint(s.Message.Slot, 10, 64)
if err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "slot is not an unsigned integer")
}
currentEpoch := slots.ToEpoch(primitives.Slot(slot))
if currentEpoch < params.BeaconConfig().AltairForkEpoch {
endpoint.PostRequest = &SignedBeaconBlockJson{}
endpoint.PostRequest = &SignedBeaconBlockContainerJson{}
} else if currentEpoch < params.BeaconConfig().BellatrixForkEpoch {
endpoint.PostRequest = &SignedBeaconBlockAltairJson{}
endpoint.PostRequest = &SignedBeaconBlockAltairContainerJson{}
} else if currentEpoch < params.BeaconConfig().CapellaForkEpoch {
endpoint.PostRequest = &SignedBeaconBlockBellatrixJson{}
} else if currentEpoch < params.BeaconConfig().DenebForkEpoch {
endpoint.PostRequest = &SignedBeaconBlockCapellaJson{}
endpoint.PostRequest = &SignedBeaconBlockBellatrixContainerJson{}
} else {
endpoint.PostRequest = &SignedBeaconBlockContentsDenebJson{}
endpoint.PostRequest = &SignedBeaconBlockCapellaContainerJson{}
}
req.Body = io.NopCloser(bytes.NewBuffer(buf))
return true, nil
@@ -336,42 +315,38 @@ func setInitialPublishBlockPostRequest(endpoint *apimiddleware.Endpoint,
// We do a simple conversion depending on the type of endpoint.PostRequest
// (which was filled out previously in setInitialPublishBlockPostRequest).
func preparePublishedBlock(endpoint *apimiddleware.Endpoint, _ http.ResponseWriter, _ *http.Request) apimiddleware.ErrorJson {
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockJson); ok {
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockContainerJson); ok {
// Prepare post request that can be properly decoded on gRPC side.
actualPostReq := &phase0PublishBlockRequestJson{
Phase0Block: block,
Phase0Block: block.Message,
Signature: block.Signature,
}
endpoint.PostRequest = actualPostReq
return nil
}
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockAltairJson); ok {
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockAltairContainerJson); ok {
// Prepare post request that can be properly decoded on gRPC side.
actualPostReq := &altairPublishBlockRequestJson{
AltairBlock: block,
AltairBlock: block.Message,
Signature: block.Signature,
}
endpoint.PostRequest = actualPostReq
return nil
}
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockBellatrixJson); ok {
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockBellatrixContainerJson); ok {
// Prepare post request that can be properly decoded on gRPC side.
actualPostReq := &bellatrixPublishBlockRequestJson{
BellatrixBlock: block,
BellatrixBlock: block.Message,
Signature: block.Signature,
}
endpoint.PostRequest = actualPostReq
return nil
}
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockCapellaJson); ok {
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockCapellaContainerJson); ok {
// Prepare post request that can be properly decoded on gRPC side.
actualPostReq := &capellaPublishBlockRequestJson{
CapellaBlock: block,
}
endpoint.PostRequest = actualPostReq
return nil
}
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockContentsDenebJson); ok {
// Prepare post request that can be properly decoded on gRPC side.
actualPostReq := &denebPublishBlockRequestJson{
DenebContents: block,
CapellaBlock: block.Message,
Signature: block.Signature,
}
endpoint.PostRequest = actualPostReq
return nil
@@ -389,50 +364,31 @@ func setInitialPublishBlindedBlockPostRequest(endpoint *apimiddleware.Endpoint,
req *http.Request,
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
s := struct {
Slot string
Message struct {
Slot string
}
}{}
buf, err := io.ReadAll(req.Body)
if err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not read body")
}
typeParseMap := make(map[string]json.RawMessage)
if err = json.Unmarshal(buf, &typeParseMap); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not parse object")
if err := json.Unmarshal(buf, &s); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not read slot from body")
}
if val, ok := typeParseMap["message"]; ok {
if err = json.Unmarshal(val, &s); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not unmarshal field 'message' ")
}
} else if val, ok = typeParseMap["signed_blinded_block"]; ok {
temp := struct {
Message struct {
Slot string
}
}{}
if err = json.Unmarshal(val, &temp); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not unmarshal field 'signed_block' ")
}
s.Slot = temp.Message.Slot
} else {
return false, &apimiddleware.DefaultErrorJson{Message: "could not parse slot from request", Code: http.StatusInternalServerError}
}
slot, err := strconv.ParseUint(s.Slot, 10, 64)
slot, err := strconv.ParseUint(s.Message.Slot, 10, 64)
if err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "slot is not an unsigned integer")
}
currentEpoch := slots.ToEpoch(primitives.Slot(slot))
if currentEpoch < params.BeaconConfig().AltairForkEpoch {
endpoint.PostRequest = &SignedBeaconBlockJson{}
endpoint.PostRequest = &SignedBeaconBlockContainerJson{}
} else if currentEpoch < params.BeaconConfig().BellatrixForkEpoch {
endpoint.PostRequest = &SignedBeaconBlockAltairJson{}
endpoint.PostRequest = &SignedBeaconBlockAltairContainerJson{}
} else if currentEpoch < params.BeaconConfig().CapellaForkEpoch {
endpoint.PostRequest = &SignedBlindedBeaconBlockBellatrixJson{}
} else if currentEpoch < params.BeaconConfig().DenebForkEpoch {
endpoint.PostRequest = &SignedBlindedBeaconBlockCapellaJson{}
endpoint.PostRequest = &SignedBlindedBeaconBlockBellatrixContainerJson{}
} else {
endpoint.PostRequest = &SignedBlindedBeaconBlockContentsDenebJson{}
endpoint.PostRequest = &SignedBlindedBeaconBlockCapellaContainerJson{}
}
req.Body = io.NopCloser(bytes.NewBuffer(buf))
return true, nil
@@ -443,51 +399,38 @@ func setInitialPublishBlindedBlockPostRequest(endpoint *apimiddleware.Endpoint,
// We do a simple conversion depending on the type of endpoint.PostRequest
// (which was filled out previously in setInitialPublishBlockPostRequest).
func preparePublishedBlindedBlock(endpoint *apimiddleware.Endpoint, _ http.ResponseWriter, _ *http.Request) apimiddleware.ErrorJson {
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockJson); ok {
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockContainerJson); ok {
// Prepare post request that can be properly decoded on gRPC side.
actualPostReq := &phase0PublishBlockRequestJson{
Phase0Block: block,
Phase0Block: block.Message,
Signature: block.Signature,
}
endpoint.PostRequest = actualPostReq
return nil
}
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockAltairJson); ok {
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockAltairContainerJson); ok {
// Prepare post request that can be properly decoded on gRPC side.
actualPostReq := &altairPublishBlockRequestJson{
AltairBlock: block,
AltairBlock: block.Message,
Signature: block.Signature,
}
endpoint.PostRequest = actualPostReq
return nil
}
if block, ok := endpoint.PostRequest.(*SignedBlindedBeaconBlockBellatrixJson); ok {
if block, ok := endpoint.PostRequest.(*SignedBlindedBeaconBlockBellatrixContainerJson); ok {
// Prepare post request that can be properly decoded on gRPC side.
actualPostReq := &bellatrixPublishBlindedBlockRequestJson{
BellatrixBlock: &SignedBlindedBeaconBlockBellatrixJson{
Message: block.Message,
Signature: block.Signature,
},
BellatrixBlock: block.Message,
Signature: block.Signature,
}
endpoint.PostRequest = actualPostReq
return nil
}
if block, ok := endpoint.PostRequest.(*SignedBlindedBeaconBlockCapellaJson); ok {
if block, ok := endpoint.PostRequest.(*SignedBlindedBeaconBlockCapellaContainerJson); ok {
// Prepare post request that can be properly decoded on gRPC side.
actualPostReq := &capellaPublishBlindedBlockRequestJson{
CapellaBlock: &SignedBlindedBeaconBlockCapellaJson{
Message: block.Message,
Signature: block.Signature,
},
}
endpoint.PostRequest = actualPostReq
return nil
}
if blockContents, ok := endpoint.PostRequest.(*SignedBlindedBeaconBlockContentsDenebJson); ok {
// Prepare post request that can be properly decoded on gRPC side.
actualPostReq := &denebPublishBlindedBlockRequestJson{
DenebContents: &SignedBlindedBeaconBlockContentsDenebJson{
SignedBlindedBlock: blockContents.SignedBlindedBlock,
SignedBlindedBlobSidecars: blockContents.SignedBlindedBlobSidecars,
},
CapellaBlock: block.Message,
Signature: block.Signature,
}
endpoint.PostRequest = actualPostReq
return nil
@@ -533,59 +476,45 @@ func prepareValidatorAggregates(body []byte, responseContainer interface{}) (api
}
type phase0BlockResponseJson struct {
Version string `json:"version" enum:"true"`
Data *SignedBeaconBlockJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
}
type altairBlockResponseJson struct {
Version string `json:"version" enum:"true"`
Data *SignedBeaconBlockAltairJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
}
type bellatrixBlockResponseJson struct {
Version string `json:"version" enum:"true"`
Data *SignedBeaconBlockBellatrixJson `json:"data"`
Data *SignedBeaconBlockContainerJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
}
type altairBlockResponseJson struct {
Version string `json:"version" enum:"true"`
Data *SignedBeaconBlockAltairContainerJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
}
type bellatrixBlockResponseJson struct {
Version string `json:"version" enum:"true"`
Data *SignedBeaconBlockBellatrixContainerJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
}
type capellaBlockResponseJson struct {
Version string `json:"version"`
Data *SignedBeaconBlockCapellaJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
}
type denebBlockResponseJson struct {
Version string `json:"version"`
Data *SignedBeaconBlockDenebJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
}
type bellatrixBlindedBlockResponseJson struct {
Version string `json:"version" enum:"true"`
Data *SignedBlindedBeaconBlockBellatrixJson `json:"data"`
Version string `json:"version"`
Data *SignedBeaconBlockCapellaContainerJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
}
type capellaBlindedBlockResponseJson struct {
Version string `json:"version" enum:"true"`
Data *SignedBlindedBeaconBlockCapellaJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
type bellatrixBlindedBlockResponseJson struct {
Version string `json:"version" enum:"true"`
Data *SignedBlindedBeaconBlockBellatrixContainerJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
}
type denebBlindedBlockResponseJson struct {
Version string `json:"version"`
Data *SignedBlindedBeaconBlockDenebJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
type capellaBlindedBlockResponseJson struct {
Version string `json:"version" enum:"true"`
Data *SignedBlindedBeaconBlockCapellaContainerJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
}
func serializeV2Block(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
@@ -599,7 +528,7 @@ func serializeV2Block(response interface{}) (apimiddleware.RunDefault, []byte, a
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_PHASE0.String())):
actualRespContainer = &phase0BlockResponseJson{
Version: respContainer.Version,
Data: &SignedBeaconBlockJson{
Data: &SignedBeaconBlockContainerJson{
Message: respContainer.Data.Phase0Block,
Signature: respContainer.Data.Signature,
},
@@ -609,7 +538,7 @@ func serializeV2Block(response interface{}) (apimiddleware.RunDefault, []byte, a
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_ALTAIR.String())):
actualRespContainer = &altairBlockResponseJson{
Version: respContainer.Version,
Data: &SignedBeaconBlockAltairJson{
Data: &SignedBeaconBlockAltairContainerJson{
Message: respContainer.Data.AltairBlock,
Signature: respContainer.Data.Signature,
},
@@ -619,7 +548,7 @@ func serializeV2Block(response interface{}) (apimiddleware.RunDefault, []byte, a
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_BELLATRIX.String())):
actualRespContainer = &bellatrixBlockResponseJson{
Version: respContainer.Version,
Data: &SignedBeaconBlockBellatrixJson{
Data: &SignedBeaconBlockBellatrixContainerJson{
Message: respContainer.Data.BellatrixBlock,
Signature: respContainer.Data.Signature,
},
@@ -629,23 +558,13 @@ func serializeV2Block(response interface{}) (apimiddleware.RunDefault, []byte, a
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_CAPELLA.String())):
actualRespContainer = &capellaBlockResponseJson{
Version: respContainer.Version,
Data: &SignedBeaconBlockCapellaJson{
Data: &SignedBeaconBlockCapellaContainerJson{
Message: respContainer.Data.CapellaBlock,
Signature: respContainer.Data.Signature,
},
ExecutionOptimistic: respContainer.ExecutionOptimistic,
Finalized: respContainer.Finalized,
}
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_DENEB.String())):
actualRespContainer = &denebBlockResponseJson{
Version: respContainer.Version,
Data: &SignedBeaconBlockDenebJson{
Message: respContainer.Data.DenebBlock,
Signature: respContainer.Data.Signature,
},
ExecutionOptimistic: respContainer.ExecutionOptimistic,
Finalized: respContainer.Finalized,
}
default:
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
}
@@ -668,7 +587,7 @@ func serializeBlindedBlock(response interface{}) (apimiddleware.RunDefault, []by
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_PHASE0.String())):
actualRespContainer = &phase0BlockResponseJson{
Version: respContainer.Version,
Data: &SignedBeaconBlockJson{
Data: &SignedBeaconBlockContainerJson{
Message: respContainer.Data.Phase0Block,
Signature: respContainer.Data.Signature,
},
@@ -678,7 +597,7 @@ func serializeBlindedBlock(response interface{}) (apimiddleware.RunDefault, []by
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_ALTAIR.String())):
actualRespContainer = &altairBlockResponseJson{
Version: respContainer.Version,
Data: &SignedBeaconBlockAltairJson{
Data: &SignedBeaconBlockAltairContainerJson{
Message: respContainer.Data.AltairBlock,
Signature: respContainer.Data.Signature,
},
@@ -688,7 +607,7 @@ func serializeBlindedBlock(response interface{}) (apimiddleware.RunDefault, []by
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_BELLATRIX.String())):
actualRespContainer = &bellatrixBlindedBlockResponseJson{
Version: respContainer.Version,
Data: &SignedBlindedBeaconBlockBellatrixJson{
Data: &SignedBlindedBeaconBlockBellatrixContainerJson{
Message: respContainer.Data.BellatrixBlock,
Signature: respContainer.Data.Signature,
},
@@ -698,23 +617,13 @@ func serializeBlindedBlock(response interface{}) (apimiddleware.RunDefault, []by
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_CAPELLA.String())):
actualRespContainer = &capellaBlindedBlockResponseJson{
Version: respContainer.Version,
Data: &SignedBlindedBeaconBlockCapellaJson{
Data: &SignedBlindedBeaconBlockCapellaContainerJson{
Message: respContainer.Data.CapellaBlock,
Signature: respContainer.Data.Signature,
},
ExecutionOptimistic: respContainer.ExecutionOptimistic,
Finalized: respContainer.Finalized,
}
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_DENEB.String())):
actualRespContainer = &denebBlindedBlockResponseJson{
Version: respContainer.Version,
Data: &SignedBlindedBeaconBlockDenebJson{
Message: respContainer.Data.DenebBlock,
Signature: respContainer.Data.Signature,
},
ExecutionOptimistic: respContainer.ExecutionOptimistic,
Finalized: respContainer.Finalized,
}
default:
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
}
@@ -746,11 +655,6 @@ type capellaStateResponseJson struct {
Data *BeaconStateCapellaJson `json:"data"`
}
type denebStateResponseJson struct {
Version string `json:"version" enum:"true"`
Data *BeaconStateDenebJson `json:"data"`
}
func serializeV2State(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
respContainer, ok := response.(*BeaconStateV2ResponseJson)
if !ok {
@@ -779,11 +683,6 @@ func serializeV2State(response interface{}) (apimiddleware.RunDefault, []byte, a
Version: respContainer.Version,
Data: respContainer.Data.CapellaState,
}
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_DENEB.String())):
actualRespContainer = &denebStateResponseJson{
Version: respContainer.Version,
Data: respContainer.Data.DenebState,
}
default:
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported state version '%s'", respContainer.Version))
}
@@ -815,11 +714,6 @@ type capellaProduceBlockResponseJson struct {
Data *BeaconBlockCapellaJson `json:"data"`
}
type denebProduceBlockResponseJson struct {
Version string `json:"version" enum:"true"`
Data *BeaconBlockContentsDenebJson `json:"data"`
}
type bellatrixProduceBlindedBlockResponseJson struct {
Version string `json:"version" enum:"true"`
Data *BlindedBeaconBlockBellatrixJson `json:"data"`
@@ -830,11 +724,6 @@ type capellaProduceBlindedBlockResponseJson struct {
Data *BlindedBeaconBlockCapellaJson `json:"data"`
}
type denebProduceBlindedBlockResponseJson struct {
Version string `json:"version" enum:"true"`
Data *BlindedBeaconBlockContentsDenebJson `json:"data"`
}
func serializeProducedV2Block(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
respContainer, ok := response.(*ProduceBlockResponseV2Json)
if !ok {
@@ -863,11 +752,6 @@ func serializeProducedV2Block(response interface{}) (apimiddleware.RunDefault, [
Version: respContainer.Version,
Data: respContainer.Data.CapellaBlock,
}
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_DENEB.String())):
actualRespContainer = &denebProduceBlockResponseJson{
Version: respContainer.Version,
Data: respContainer.Data.DenebContents,
}
default:
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
}
@@ -907,11 +791,6 @@ func serializeProducedBlindedBlock(response interface{}) (apimiddleware.RunDefau
Version: respContainer.Version,
Data: respContainer.Data.CapellaBlock,
}
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_DENEB.String())):
actualRespContainer = &denebProduceBlindedBlockResponseJson{
Version: respContainer.Version,
Data: respContainer.Data.DenebContents,
}
default:
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
}

View File

@@ -406,15 +406,13 @@ func TestSetInitialPublishBlockPostRequest(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.BellatrixForkEpoch = params.BeaconConfig().AltairForkEpoch + 1
cfg.CapellaForkEpoch = params.BeaconConfig().BellatrixForkEpoch + 1
cfg.DenebForkEpoch = params.BeaconConfig().CapellaForkEpoch + 1
params.OverrideBeaconConfig(cfg)
endpoint := &apimiddleware.Endpoint{}
s := struct {
Message struct {
Slot string
} `json:"message"`
}
}{}
t.Run("Phase 0", func(t *testing.T) {
s.Message = struct{ Slot string }{Slot: "0"}
@@ -427,7 +425,7 @@ func TestSetInitialPublishBlockPostRequest(t *testing.T) {
runDefault, errJson := setInitialPublishBlockPostRequest(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
assert.Equal(t, reflect.TypeOf(SignedBeaconBlockJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
assert.Equal(t, reflect.TypeOf(SignedBeaconBlockContainerJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
})
t.Run("Altair", func(t *testing.T) {
slot, err := slots.EpochStart(params.BeaconConfig().AltairForkEpoch)
@@ -442,7 +440,7 @@ func TestSetInitialPublishBlockPostRequest(t *testing.T) {
runDefault, errJson := setInitialPublishBlockPostRequest(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
assert.Equal(t, reflect.TypeOf(SignedBeaconBlockAltairJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
assert.Equal(t, reflect.TypeOf(SignedBeaconBlockAltairContainerJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
})
t.Run("Bellatrix", func(t *testing.T) {
slot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
@@ -457,7 +455,7 @@ func TestSetInitialPublishBlockPostRequest(t *testing.T) {
runDefault, errJson := setInitialPublishBlockPostRequest(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
assert.Equal(t, reflect.TypeOf(SignedBeaconBlockBellatrixJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
assert.Equal(t, reflect.TypeOf(SignedBeaconBlockBellatrixContainerJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
})
t.Run("Capella", func(t *testing.T) {
params.SetupTestConfigCleanup(t)
@@ -477,48 +475,14 @@ func TestSetInitialPublishBlockPostRequest(t *testing.T) {
runDefault, errJson := setInitialPublishBlockPostRequest(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
assert.Equal(t, reflect.TypeOf(SignedBeaconBlockCapellaJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
})
t.Run("Deneb", func(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.DenebForkEpoch = cfg.CapellaForkEpoch.Add(2)
params.OverrideBeaconConfig(cfg)
slot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
require.NoError(t, err)
denebS := struct {
SignedBlock struct {
Message struct {
Slot string
} `json:"message"`
} `json:"signed_block"`
}{}
denebS.SignedBlock = struct {
Message struct {
Slot string
} `json:"message"`
}{
Message: struct {
Slot string
}{Slot: strconv.FormatUint(uint64(slot), 10)},
}
j, err := json.Marshal(denebS)
require.NoError(t, err)
var body bytes.Buffer
_, err = body.Write(j)
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := setInitialPublishBlockPostRequest(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
assert.Equal(t, reflect.TypeOf(SignedBeaconBlockContentsDenebJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
assert.Equal(t, reflect.TypeOf(SignedBeaconBlockCapellaContainerJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
})
}
func TestPreparePublishedBlock(t *testing.T) {
t.Run("Phase 0", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SignedBeaconBlockJson{
PostRequest: &SignedBeaconBlockContainerJson{
Message: &BeaconBlockJson{
Body: &BeaconBlockBodyJson{},
},
@@ -532,7 +496,7 @@ func TestPreparePublishedBlock(t *testing.T) {
t.Run("Altair", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SignedBeaconBlockAltairJson{
PostRequest: &SignedBeaconBlockAltairContainerJson{
Message: &BeaconBlockAltairJson{
Body: &BeaconBlockBodyAltairJson{},
},
@@ -546,7 +510,7 @@ func TestPreparePublishedBlock(t *testing.T) {
t.Run("Bellatrix", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SignedBeaconBlockBellatrixJson{
PostRequest: &SignedBeaconBlockBellatrixContainerJson{
Message: &BeaconBlockBellatrixJson{
Body: &BeaconBlockBodyBellatrixJson{},
},
@@ -568,15 +532,13 @@ func TestSetInitialPublishBlindedBlockPostRequest(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.BellatrixForkEpoch = params.BeaconConfig().AltairForkEpoch + 1
cfg.CapellaForkEpoch = params.BeaconConfig().BellatrixForkEpoch + 1
cfg.DenebForkEpoch = params.BeaconConfig().CapellaForkEpoch + 1
params.OverrideBeaconConfig(cfg)
endpoint := &apimiddleware.Endpoint{}
s := struct {
Message struct {
Slot string
} `json:"message"`
}
}{}
t.Run("Phase 0", func(t *testing.T) {
s.Message = struct{ Slot string }{Slot: "0"}
@@ -589,7 +551,7 @@ func TestSetInitialPublishBlindedBlockPostRequest(t *testing.T) {
runDefault, errJson := setInitialPublishBlindedBlockPostRequest(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
assert.Equal(t, reflect.TypeOf(SignedBeaconBlockJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
assert.Equal(t, reflect.TypeOf(SignedBeaconBlockContainerJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
})
t.Run("Altair", func(t *testing.T) {
slot, err := slots.EpochStart(params.BeaconConfig().AltairForkEpoch)
@@ -604,7 +566,7 @@ func TestSetInitialPublishBlindedBlockPostRequest(t *testing.T) {
runDefault, errJson := setInitialPublishBlindedBlockPostRequest(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
assert.Equal(t, reflect.TypeOf(SignedBeaconBlockAltairJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
assert.Equal(t, reflect.TypeOf(SignedBeaconBlockAltairContainerJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
})
t.Run("Bellatrix", func(t *testing.T) {
slot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
@@ -619,59 +581,14 @@ func TestSetInitialPublishBlindedBlockPostRequest(t *testing.T) {
runDefault, errJson := setInitialPublishBlindedBlockPostRequest(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
assert.Equal(t, reflect.TypeOf(SignedBlindedBeaconBlockBellatrixJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
})
t.Run("Capella", func(t *testing.T) {
slot, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
require.NoError(t, err)
s.Message = struct{ Slot string }{Slot: strconv.FormatUint(uint64(slot), 10)}
j, err := json.Marshal(s)
require.NoError(t, err)
var body bytes.Buffer
_, err = body.Write(j)
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := setInitialPublishBlindedBlockPostRequest(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
assert.Equal(t, reflect.TypeOf(SignedBlindedBeaconBlockCapellaJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
})
t.Run("Deneb", func(t *testing.T) {
slot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
require.NoError(t, err)
denebS := struct {
SignedBlindedBlock struct {
Message struct {
Slot string
} `json:"message"`
} `json:"signed_blinded_block"`
}{}
denebS.SignedBlindedBlock = struct {
Message struct {
Slot string
} `json:"message"`
}{
Message: struct {
Slot string
}{Slot: strconv.FormatUint(uint64(slot), 10)},
}
j, err := json.Marshal(denebS)
require.NoError(t, err)
var body bytes.Buffer
_, err = body.Write(j)
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := setInitialPublishBlindedBlockPostRequest(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
assert.Equal(t, reflect.TypeOf(SignedBlindedBeaconBlockContentsDenebJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
assert.Equal(t, reflect.TypeOf(SignedBlindedBeaconBlockBellatrixContainerJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
})
}
func TestPreparePublishedBlindedBlock(t *testing.T) {
t.Run("Phase 0", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SignedBeaconBlockJson{
PostRequest: &SignedBeaconBlockContainerJson{
Message: &BeaconBlockJson{
Body: &BeaconBlockBodyJson{},
},
@@ -685,7 +602,7 @@ func TestPreparePublishedBlindedBlock(t *testing.T) {
t.Run("Altair", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SignedBeaconBlockAltairJson{
PostRequest: &SignedBeaconBlockAltairContainerJson{
Message: &BeaconBlockAltairJson{
Body: &BeaconBlockBodyAltairJson{},
},
@@ -699,7 +616,7 @@ func TestPreparePublishedBlindedBlock(t *testing.T) {
t.Run("Bellatrix", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SignedBlindedBeaconBlockBellatrixJson{
PostRequest: &SignedBlindedBeaconBlockBellatrixContainerJson{
Message: &BlindedBeaconBlockBellatrixJson{
Body: &BlindedBeaconBlockBodyBellatrixJson{},
},
@@ -710,34 +627,7 @@ func TestPreparePublishedBlindedBlock(t *testing.T) {
_, ok := endpoint.PostRequest.(*bellatrixPublishBlindedBlockRequestJson)
assert.Equal(t, true, ok)
})
t.Run("Capella", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SignedBlindedBeaconBlockCapellaJson{
Message: &BlindedBeaconBlockCapellaJson{
Body: &BlindedBeaconBlockBodyCapellaJson{},
},
},
}
errJson := preparePublishedBlindedBlock(endpoint, nil, nil)
require.Equal(t, true, errJson == nil)
_, ok := endpoint.PostRequest.(*capellaPublishBlindedBlockRequestJson)
assert.Equal(t, true, ok)
})
t.Run("Deneb", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SignedBlindedBeaconBlockContentsDenebJson{
SignedBlindedBlock: &SignedBlindedBeaconBlockDenebJson{
Message: &BlindedBeaconBlockDenebJson{},
},
SignedBlindedBlobSidecars: []*SignedBlindedBlobSidecarJson{},
},
}
errJson := preparePublishedBlindedBlock(endpoint, nil, nil)
require.Equal(t, true, errJson == nil)
_, ok := endpoint.PostRequest.(*denebPublishBlindedBlockRequestJson)
assert.Equal(t, true, ok)
})
t.Run("unsupported block type", func(t *testing.T) {
errJson := preparePublishedBlock(&apimiddleware.Endpoint{}, nil, nil)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "unsupported block type"))
@@ -1269,56 +1159,6 @@ func TestSerializeProducedV2Block(t *testing.T) {
assert.Equal(t, "root", beaconBlock.StateRoot)
require.NotNil(t, beaconBlock.Body)
})
t.Run("Deneb", func(t *testing.T) {
response := &ProduceBlockResponseV2Json{
Version: ethpbv2.Version_DENEB.String(),
Data: &BeaconBlockContainerV2Json{
DenebContents: &BeaconBlockContentsDenebJson{
Block: &BeaconBlockDenebJson{
Slot: "1",
ProposerIndex: "1",
ParentRoot: "root",
StateRoot: "root",
Body: &BeaconBlockBodyDenebJson{},
},
BlobSidecars: []*BlobSidecarJson{{
BlockRoot: "root",
Index: "1",
Slot: "1",
BlockParentRoot: "root",
ProposerIndex: "1",
Blob: "blob",
KzgCommitment: "kzgcommitment",
KzgProof: "kzgproof",
}},
},
},
}
runDefault, j, errJson := serializeProducedV2Block(response)
require.Equal(t, nil, errJson)
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
require.NotNil(t, j)
resp := &denebProduceBlockResponseJson{}
require.NoError(t, json.Unmarshal(j, resp))
require.NotNil(t, resp.Data)
require.NotNil(t, resp.Data)
beaconBlock := resp.Data.Block
assert.Equal(t, "1", beaconBlock.Slot)
assert.Equal(t, "1", beaconBlock.ProposerIndex)
assert.Equal(t, "root", beaconBlock.ParentRoot)
assert.Equal(t, "root", beaconBlock.StateRoot)
assert.NotNil(t, beaconBlock.Body)
require.Equal(t, 1, len(resp.Data.BlobSidecars))
sidecar := resp.Data.BlobSidecars[0]
assert.Equal(t, "root", sidecar.BlockRoot)
assert.Equal(t, "1", sidecar.Index)
assert.Equal(t, "1", sidecar.Slot)
assert.Equal(t, "root", sidecar.BlockParentRoot)
assert.Equal(t, "1", sidecar.ProposerIndex)
assert.Equal(t, "blob", sidecar.Blob)
assert.Equal(t, "kzgcommitment", sidecar.KzgCommitment)
assert.Equal(t, "kzgproof", sidecar.KzgProof)
})
t.Run("incorrect response type", func(t *testing.T) {
response := &types.Empty{}
runDefault, j, errJson := serializeProducedV2Block(response)

View File

@@ -114,7 +114,7 @@ type BlockHeaderResponseJson struct {
}
type BlockResponseJson struct {
Data *SignedBeaconBlockJson `json:"data"`
Data *SignedBeaconBlockContainerJson `json:"data"`
}
type BlockV2ResponseJson struct {
@@ -366,7 +366,7 @@ type BlockRootContainerJson struct {
Root string `json:"root" hex:"true"`
}
type SignedBeaconBlockJson struct {
type SignedBeaconBlockContainerJson struct {
Message *BeaconBlockJson `json:"message"`
Signature string `json:"signature" hex:"true"`
}
@@ -395,7 +395,6 @@ type SignedBeaconBlockContainerV2Json struct {
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
BellatrixBlock *BeaconBlockBellatrixJson `json:"bellatrix_block"`
CapellaBlock *BeaconBlockCapellaJson `json:"capella_block"`
DenebBlock *BeaconBlockDenebJson `json:"deneb_block"`
Signature string `json:"signature" hex:"true"`
}
@@ -404,111 +403,48 @@ type SignedBlindedBeaconBlockContainerJson struct {
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
BellatrixBlock *BlindedBeaconBlockBellatrixJson `json:"bellatrix_block"`
CapellaBlock *BlindedBeaconBlockCapellaJson `json:"capella_block"`
DenebBlock *BlindedBeaconBlockDenebJson `json:"deneb_block"`
Signature string `json:"signature" hex:"true"`
}
type SignedBlindedBeaconBlockContentsContainerJson struct {
Phase0Block *SignedBeaconBlockJson `json:"phase0_block"`
AltairBlock *SignedBeaconBlockAltairJson `json:"altair_block"`
BellatrixBlock *SignedBlindedBeaconBlockBellatrixJson `json:"bellatrix_block"`
CapellaBlock *SignedBlindedBeaconBlockCapellaJson `json:"capella_block"`
DenebContents *SignedBlindedBeaconBlockContentsDenebJson `json:"deneb_contents"`
}
type BeaconBlockContainerV2Json struct {
Phase0Block *BeaconBlockJson `json:"phase0_block"`
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
BellatrixBlock *BeaconBlockBellatrixJson `json:"bellatrix_block"`
CapellaBlock *BeaconBlockCapellaJson `json:"capella_block"`
DenebContents *BeaconBlockContentsDenebJson `json:"deneb_contents"`
Phase0Block *BeaconBlockJson `json:"phase0_block"`
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
BellatrixBlock *BeaconBlockBellatrixJson `json:"bellatrix_block"`
CapellaBlock *BeaconBlockCapellaJson `json:"capella_block"`
}
type BlindedBeaconBlockContainerJson struct {
Phase0Block *BeaconBlockJson `json:"phase0_block"`
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
BellatrixBlock *BlindedBeaconBlockBellatrixJson `json:"bellatrix_block"`
CapellaBlock *BlindedBeaconBlockCapellaJson `json:"capella_block"`
DenebContents *BlindedBeaconBlockContentsDenebJson `json:"deneb_contents"`
Phase0Block *BeaconBlockJson `json:"phase0_block"`
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
BellatrixBlock *BlindedBeaconBlockBellatrixJson `json:"bellatrix_block"`
CapellaBlock *BlindedBeaconBlockCapellaJson `json:"capella_block"`
}
type SignedBeaconBlockAltairJson struct {
type SignedBeaconBlockAltairContainerJson struct {
Message *BeaconBlockAltairJson `json:"message"`
Signature string `json:"signature" hex:"true"`
}
type SignedBeaconBlockBellatrixJson struct {
type SignedBeaconBlockBellatrixContainerJson struct {
Message *BeaconBlockBellatrixJson `json:"message"`
Signature string `json:"signature" hex:"true"`
}
type SignedBeaconBlockCapellaJson struct {
type SignedBeaconBlockCapellaContainerJson struct {
Message *BeaconBlockCapellaJson `json:"message"`
Signature string `json:"signature" hex:"true"`
}
type SignedBeaconBlockContentsDenebJson struct {
SignedBlock *SignedBeaconBlockDenebJson `json:"signed_block"`
SignedBlobSidecars []*SignedBlobSidecarJson `json:"signed_blob_sidecars"`
}
type SignedBlobSidecarJson struct {
Message *BlobSidecarJson `json:"message"`
Signature string `json:"signature" hex:"true"`
}
type BlobSidecarJson struct {
BlockRoot string `json:"block_root" hex:"true"`
Index string `json:"index"`
Slot string `json:"slot"`
BlockParentRoot string `json:"block_parent_root" hex:"true"`
ProposerIndex string `json:"proposer_index"`
Blob string `json:"blob" hex:"true"` // pattern: "^0x[a-fA-F0-9]{262144}$"
KzgCommitment string `json:"kzg_commitment" hex:"true"` // pattern: "^0x[a-fA-F0-9]{96}$" ssz-size:"48"
KzgProof string `json:"kzg_proof,omitempty" hex:"true"` // pattern: "^0x[a-fA-F0-9]{96}$" ssz-size:"48"
}
type BlindedBlobSidecarJson struct {
BlockRoot string `json:"block_root" hex:"true"`
Index string `json:"index"`
Slot string `json:"slot"`
BlockParentRoot string `json:"block_parent_root" hex:"true"`
ProposerIndex string `json:"proposer_index"`
BlobRoot string `json:"blob_root" hex:"true"`
KzgCommitment string `json:"kzg_commitment" hex:"true"` // pattern: "^0x[a-fA-F0-9]{96}$" ssz-size:"48"
KzgProof string `json:"kzg_proof,omitempty" hex:"true"` // pattern: "^0x[a-fA-F0-9]{96}$" ssz-size:"48"
}
type SignedBeaconBlockDenebJson struct {
Message *BeaconBlockDenebJson `json:"message"`
Signature string `json:"signature" hex:"true"`
}
type SignedBlindedBeaconBlockBellatrixJson struct {
type SignedBlindedBeaconBlockBellatrixContainerJson struct {
Message *BlindedBeaconBlockBellatrixJson `json:"message"`
Signature string `json:"signature" hex:"true"`
}
type SignedBlindedBeaconBlockCapellaJson struct {
type SignedBlindedBeaconBlockCapellaContainerJson struct {
Message *BlindedBeaconBlockCapellaJson `json:"message"`
Signature string `json:"signature" hex:"true"`
}
type SignedBlindedBeaconBlockContentsDenebJson struct {
SignedBlindedBlock *SignedBlindedBeaconBlockDenebJson `json:"signed_blinded_block"`
SignedBlindedBlobSidecars []*SignedBlindedBlobSidecarJson `json:"signed_blinded_blob_sidecars"`
}
type SignedBlindedBeaconBlockDenebJson struct {
Message *BlindedBeaconBlockDenebJson `json:"message"`
Signature string `json:"signature" hex:"true"`
}
type SignedBlindedBlobSidecarJson struct {
Message *BlindedBlobSidecarJson `json:"message"`
Signature string `json:"signature" hex:"true"`
}
type BeaconBlockAltairJson struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
@@ -533,19 +469,6 @@ type BeaconBlockCapellaJson struct {
Body *BeaconBlockBodyCapellaJson `json:"body"`
}
type BeaconBlockContentsDenebJson struct {
Block *BeaconBlockDenebJson `json:"block"`
BlobSidecars []*BlobSidecarJson `json:"blob_sidecars"`
}
type BeaconBlockDenebJson struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
ParentRoot string `json:"parent_root" hex:"true"`
StateRoot string `json:"state_root" hex:"true"`
Body *BeaconBlockBodyDenebJson `json:"body"`
}
type BlindedBeaconBlockBellatrixJson struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
@@ -562,19 +485,6 @@ type BlindedBeaconBlockCapellaJson struct {
Body *BlindedBeaconBlockBodyCapellaJson `json:"body"`
}
type BlindedBeaconBlockDenebJson struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
ParentRoot string `json:"parent_root" hex:"true"`
StateRoot string `json:"state_root" hex:"true"`
Body *BlindedBeaconBlockBodyDenebJson `json:"body"`
}
type BlindedBeaconBlockContentsDenebJson struct {
BlindedBlock *BlindedBeaconBlockDenebJson `json:"blinded_block"`
BlindedBlobSidecars []*BlindedBlobSidecarJson `json:"blinded_blob_sidecars"`
}
type BeaconBlockBodyAltairJson struct {
RandaoReveal string `json:"randao_reveal" hex:"true"`
Eth1Data *Eth1DataJson `json:"eth1_data"`
@@ -614,21 +524,6 @@ type BeaconBlockBodyCapellaJson struct {
BLSToExecutionChanges []*SignedBLSToExecutionChangeJson `json:"bls_to_execution_changes"`
}
type BeaconBlockBodyDenebJson struct {
RandaoReveal string `json:"randao_reveal" hex:"true"`
Eth1Data *Eth1DataJson `json:"eth1_data"`
Graffiti string `json:"graffiti" hex:"true"`
ProposerSlashings []*ProposerSlashingJson `json:"proposer_slashings"`
AttesterSlashings []*AttesterSlashingJson `json:"attester_slashings"`
Attestations []*AttestationJson `json:"attestations"`
Deposits []*DepositJson `json:"deposits"`
VoluntaryExits []*SignedVoluntaryExitJson `json:"voluntary_exits"`
SyncAggregate *SyncAggregateJson `json:"sync_aggregate"`
ExecutionPayload *ExecutionPayloadDenebJson `json:"execution_payload"`
BLSToExecutionChanges []*SignedBLSToExecutionChangeJson `json:"bls_to_execution_changes"`
BlobKzgCommitments []string `json:"blob_kzg_commitments" hex:"true"`
}
type BlindedBeaconBlockBodyBellatrixJson struct {
RandaoReveal string `json:"randao_reveal" hex:"true"`
Eth1Data *Eth1DataJson `json:"eth1_data"`
@@ -656,21 +551,6 @@ type BlindedBeaconBlockBodyCapellaJson struct {
BLSToExecutionChanges []*SignedBLSToExecutionChangeJson `json:"bls_to_execution_changes"`
}
type BlindedBeaconBlockBodyDenebJson struct {
RandaoReveal string `json:"randao_reveal" hex:"true"`
Eth1Data *Eth1DataJson `json:"eth1_data"`
Graffiti string `json:"graffiti" hex:"true"`
ProposerSlashings []*ProposerSlashingJson `json:"proposer_slashings"`
AttesterSlashings []*AttesterSlashingJson `json:"attester_slashings"`
Attestations []*AttestationJson `json:"attestations"`
Deposits []*DepositJson `json:"deposits"`
VoluntaryExits []*SignedVoluntaryExitJson `json:"voluntary_exits"`
SyncAggregate *SyncAggregateJson `json:"sync_aggregate"`
ExecutionPayloadHeader *ExecutionPayloadHeaderDenebJson `json:"execution_payload_header"`
BLSToExecutionChanges []*SignedBLSToExecutionChangeJson `json:"bls_to_execution_changes"`
BlobKzgCommitments []string `json:"blob_kzg_commitments" hex:"true"`
}
type ExecutionPayloadJson struct {
ParentHash string `json:"parent_hash" hex:"true"`
FeeRecipient string `json:"fee_recipient" hex:"true"`
@@ -706,26 +586,6 @@ type ExecutionPayloadCapellaJson struct {
Withdrawals []*WithdrawalJson `json:"withdrawals"`
}
type ExecutionPayloadDenebJson struct {
ParentHash string `json:"parent_hash" hex:"true"`
FeeRecipient string `json:"fee_recipient" hex:"true"`
StateRoot string `json:"state_root" hex:"true"`
ReceiptsRoot string `json:"receipts_root" hex:"true"`
LogsBloom string `json:"logs_bloom" hex:"true"`
PrevRandao string `json:"prev_randao" hex:"true"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
TimeStamp string `json:"timestamp"`
ExtraData string `json:"extra_data" hex:"true"`
BaseFeePerGas string `json:"base_fee_per_gas" uint256:"true"`
DataGasUsed string `json:"data_gas_used"` // new in deneb
ExcessDataGas string `json:"excess_data_gas"` // new in deneb
BlockHash string `json:"block_hash" hex:"true"`
Transactions []string `json:"transactions" hex:"true"`
Withdrawals []*WithdrawalJson `json:"withdrawals"`
}
type ExecutionPayloadHeaderJson struct {
ParentHash string `json:"parent_hash" hex:"true"`
FeeRecipient string `json:"fee_recipient" hex:"true"`
@@ -761,26 +621,6 @@ type ExecutionPayloadHeaderCapellaJson struct {
WithdrawalsRoot string `json:"withdrawals_root" hex:"true"`
}
type ExecutionPayloadHeaderDenebJson struct {
ParentHash string `json:"parent_hash" hex:"true"`
FeeRecipient string `json:"fee_recipient" hex:"true"`
StateRoot string `json:"state_root" hex:"true"`
ReceiptsRoot string `json:"receipts_root" hex:"true"`
LogsBloom string `json:"logs_bloom" hex:"true"`
PrevRandao string `json:"prev_randao" hex:"true"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
TimeStamp string `json:"timestamp"`
ExtraData string `json:"extra_data" hex:"true"`
BaseFeePerGas string `json:"base_fee_per_gas" uint256:"true"`
DataGasUsed string `json:"data_gas_used"` // new in deneb
ExcessDataGas string `json:"excess_data_gas"` // new in deneb
BlockHash string `json:"block_hash" hex:"true"`
TransactionsRoot string `json:"transactions_root" hex:"true"`
WithdrawalsRoot string `json:"withdrawals_root" hex:"true"`
}
type SyncAggregateJson struct {
SyncCommitteeBits string `json:"sync_committee_bits" hex:"true"`
SyncCommitteeSignature string `json:"sync_committee_signature" hex:"true"`
@@ -1037,43 +877,11 @@ type BeaconStateCapellaJson struct {
HistoricalSummaries []*HistoricalSummaryJson `json:"historical_summaries"`
}
type BeaconStateDenebJson struct {
GenesisTime string `json:"genesis_time"`
GenesisValidatorsRoot string `json:"genesis_validators_root" hex:"true"`
Slot string `json:"slot"`
Fork *ForkJson `json:"fork"`
LatestBlockHeader *BeaconBlockHeaderJson `json:"latest_block_header"`
BlockRoots []string `json:"block_roots" hex:"true"`
StateRoots []string `json:"state_roots" hex:"true"`
HistoricalRoots []string `json:"historical_roots" hex:"true"`
Eth1Data *Eth1DataJson `json:"eth1_data"`
Eth1DataVotes []*Eth1DataJson `json:"eth1_data_votes"`
Eth1DepositIndex string `json:"eth1_deposit_index"`
Validators []*ValidatorJson `json:"validators"`
Balances []string `json:"balances"`
RandaoMixes []string `json:"randao_mixes" hex:"true"`
Slashings []string `json:"slashings"`
PreviousEpochParticipation EpochParticipation `json:"previous_epoch_participation"`
CurrentEpochParticipation EpochParticipation `json:"current_epoch_participation"`
JustificationBits string `json:"justification_bits" hex:"true"`
PreviousJustifiedCheckpoint *CheckpointJson `json:"previous_justified_checkpoint"`
CurrentJustifiedCheckpoint *CheckpointJson `json:"current_justified_checkpoint"`
FinalizedCheckpoint *CheckpointJson `json:"finalized_checkpoint"`
InactivityScores []string `json:"inactivity_scores"`
CurrentSyncCommittee *SyncCommitteeJson `json:"current_sync_committee"`
NextSyncCommittee *SyncCommitteeJson `json:"next_sync_committee"`
LatestExecutionPayloadHeader *ExecutionPayloadHeaderDenebJson `json:"latest_execution_payload_header"` // new in deneb
NextWithdrawalIndex string `json:"next_withdrawal_index"`
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
HistoricalSummaries []*HistoricalSummaryJson `json:"historical_summaries"`
}
type BeaconStateContainerV2Json struct {
Phase0State *BeaconStateJson `json:"phase0_state"`
AltairState *BeaconStateAltairJson `json:"altair_state"`
BellatrixState *BeaconStateBellatrixJson `json:"bellatrix_state"`
CapellaState *BeaconStateCapellaJson `json:"capella_state"`
DenebState *BeaconStateDenebJson `json:"deneb_state"`
}
type ForkJson struct {

View File

@@ -75,15 +75,6 @@ func (bs *Server) GetBlindedBlock(ctx context.Context, req *ethpbv1.BlockRequest
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
}
result, err = bs.getBlindedBlockDeneb(ctx, blk)
if result != nil {
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
}
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
}
@@ -139,15 +130,6 @@ func (bs *Server) GetBlindedBlockSSZ(ctx context.Context, req *ethpbv1.BlockRequ
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
result, err = bs.getBlindedSSZBlockDeneb(ctx, blk)
if result != nil {
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
}
@@ -160,7 +142,7 @@ func (bs *Server) GetBlindedBlockSSZ(ctx context.Context, req *ethpbv1.BlockRequ
// successful. The beacon node is expected to integrate the new block into its state, and
// therefore validate the block internally, however blocks which fail the validation are still
// broadcast but a different status code is returned (202).
func (bs *Server) SubmitBlindedBlock(ctx context.Context, req *ethpbv2.SignedBlindedBeaconBlockContentsContainer) (*emptypb.Empty, error) {
func (bs *Server) SubmitBlindedBlock(ctx context.Context, req *ethpbv2.SignedBlindedBeaconBlockContainer) (*emptypb.Empty, error) {
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlindedBlock")
defer span.End()
@@ -170,24 +152,20 @@ func (bs *Server) SubmitBlindedBlock(ctx context.Context, req *ethpbv2.SignedBli
}
switch blkContainer := req.Message.(type) {
case *ethpbv2.SignedBlindedBeaconBlockContentsContainer_DenebContents:
if err := bs.submitBlindedDenebContents(ctx, blkContainer.DenebContents); err != nil {
case *ethpbv2.SignedBlindedBeaconBlockContainer_CapellaBlock:
if err := bs.submitBlindedCapellaBlock(ctx, blkContainer.CapellaBlock, req.Signature); err != nil {
return nil, err
}
case *ethpbv2.SignedBlindedBeaconBlockContentsContainer_CapellaBlock:
if err := bs.submitBlindedCapellaBlock(ctx, blkContainer.CapellaBlock.Message, blkContainer.CapellaBlock.Signature); err != nil {
case *ethpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock:
if err := bs.submitBlindedBellatrixBlock(ctx, blkContainer.BellatrixBlock, req.Signature); err != nil {
return nil, err
}
case *ethpbv2.SignedBlindedBeaconBlockContentsContainer_BellatrixBlock:
if err := bs.submitBlindedBellatrixBlock(ctx, blkContainer.BellatrixBlock.Message, blkContainer.BellatrixBlock.Signature); err != nil {
case *ethpbv2.SignedBlindedBeaconBlockContainer_Phase0Block:
if err := bs.submitPhase0Block(ctx, blkContainer.Phase0Block, req.Signature); err != nil {
return nil, err
}
case *ethpbv2.SignedBlindedBeaconBlockContentsContainer_Phase0Block:
if err := bs.submitPhase0Block(ctx, blkContainer.Phase0Block.Block, blkContainer.Phase0Block.Signature); err != nil {
return nil, err
}
case *ethpbv2.SignedBlindedBeaconBlockContentsContainer_AltairBlock:
if err := bs.submitAltairBlock(ctx, blkContainer.AltairBlock.Message, blkContainer.AltairBlock.Signature); err != nil {
case *ethpbv2.SignedBlindedBeaconBlockContainer_AltairBlock:
if err := bs.submitAltairBlock(ctx, blkContainer.AltairBlock, req.Signature); err != nil {
return nil, err
}
default:
@@ -503,76 +481,6 @@ func (bs *Server) getBlindedBlockCapella(ctx context.Context, blk interfaces.Rea
}, nil
}
func (bs *Server) getBlindedBlockDeneb(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlindedBlockResponse, error) {
denebBlk, err := blk.PbDenebBlock()
if err != nil {
// ErrUnsupportedGetter means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedField) {
if blindedDenebBlk, err := blk.PbBlindedDenebBlock(); err == nil {
if blindedDenebBlk == nil {
return nil, errNilBlock
}
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedDenebToV2Blinded(blindedDenebBlk.Block)
if err != nil {
return nil, errors.Wrapf(err, "Could not convert beacon block")
}
root, err := blk.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrapf(err, "could not get block root")
}
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
if err != nil {
return nil, errors.Wrapf(err, "could not check if block is optimistic")
}
sig := blk.Signature()
return &ethpbv2.BlindedBlockResponse{
Version: ethpbv2.Version_DENEB,
Data: &ethpbv2.SignedBlindedBeaconBlockContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContainer_DenebBlock{DenebBlock: v2Blk},
Signature: sig[:],
},
ExecutionOptimistic: isOptimistic,
}, nil
}
return nil, err
}
return nil, err
}
if denebBlk == nil {
return nil, errNilBlock
}
blindedBlkInterface, err := blk.ToBlinded()
if err != nil {
return nil, errors.Wrapf(err, "could not convert block to blinded block")
}
blindedDenebBlock, err := blindedBlkInterface.PbBlindedDenebBlock()
if err != nil {
return nil, errors.Wrapf(err, "could not get signed beacon block")
}
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedDenebToV2Blinded(blindedDenebBlock.Block)
if err != nil {
return nil, errors.Wrapf(err, "could not convert beacon block")
}
root, err := blk.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrapf(err, "could not get block root")
}
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
if err != nil {
return nil, errors.Wrapf(err, "could not check if block is optimistic")
}
sig := blk.Signature()
return &ethpbv2.BlindedBlockResponse{
Version: ethpbv2.Version_CAPELLA,
Data: &ethpbv2.SignedBlindedBeaconBlockContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContainer_DenebBlock{DenebBlock: v2Blk},
Signature: sig[:],
},
ExecutionOptimistic: isOptimistic,
}, nil
}
func (bs *Server) getBlindedSSZBlockBellatrix(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
bellatrixBlk, err := blk.PbBellatrixBlock()
if err != nil {
@@ -723,81 +631,6 @@ func (bs *Server) getBlindedSSZBlockCapella(ctx context.Context, blk interfaces.
return &ethpbv2.SSZContainer{Version: ethpbv2.Version_CAPELLA, ExecutionOptimistic: isOptimistic, Data: sszData}, nil
}
func (bs *Server) getBlindedSSZBlockDeneb(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
denebBlk, err := blk.PbDenebBlock()
if err != nil {
// ErrUnsupportedGetter means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedField) {
if blindedDenebBlk, err := blk.PbBlindedDenebBlock(); err == nil {
if blindedDenebBlk == nil {
return nil, errNilBlock
}
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedDenebToV2Blinded(blindedDenebBlk.Block)
if err != nil {
return nil, errors.Wrapf(err, "could not get signed beacon block")
}
root, err := blk.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrapf(err, "could not get block root")
}
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
if err != nil {
return nil, errors.Wrapf(err, "could not check if block is optimistic")
}
sig := blk.Signature()
data := &ethpbv2.SignedBlindedBeaconBlockDeneb{
Message: v2Blk,
Signature: sig[:],
}
sszData, err := data.MarshalSSZ()
if err != nil {
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
}
return &ethpbv2.SSZContainer{
Version: ethpbv2.Version_DENEB,
ExecutionOptimistic: isOptimistic,
Data: sszData,
}, nil
}
return nil, err
}
}
if denebBlk == nil {
return nil, errNilBlock
}
blindedBlkInterface, err := blk.ToBlinded()
if err != nil {
return nil, errors.Wrapf(err, "could not convert block to blinded block")
}
blindedDenebBlock, err := blindedBlkInterface.PbBlindedDenebBlock()
if err != nil {
return nil, errors.Wrapf(err, "could not get signed beacon block")
}
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedDenebToV2Blinded(blindedDenebBlock.Block)
if err != nil {
return nil, errors.Wrapf(err, "could not get signed beacon block")
}
root, err := blk.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrapf(err, "could not get block root")
}
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
if err != nil {
return nil, errors.Wrapf(err, "could not check if block is optimistic")
}
sig := blk.Signature()
data := &ethpbv2.SignedBlindedBeaconBlockDeneb{
Message: v2Blk,
Signature: sig[:],
}
sszData, err := data.MarshalSSZ()
if err != nil {
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
}
return &ethpbv2.SSZContainer{Version: ethpbv2.Version_DENEB, ExecutionOptimistic: isOptimistic, Data: sszData}, nil
}
func (bs *Server) submitBlindedBellatrixBlock(ctx context.Context, blindedBellatrixBlk *ethpbv2.BlindedBeaconBlockBellatrix, sig []byte) error {
b, err := migration.BlindedBellatrixToV1Alpha1SignedBlock(&ethpbv2.SignedBlindedBeaconBlockBellatrix{
Message: blindedBellatrixBlk,
@@ -841,29 +674,3 @@ func (bs *Server) submitBlindedCapellaBlock(ctx context.Context, blindedCapellaB
}
return nil
}
func (bs *Server) submitBlindedDenebContents(ctx context.Context, blindedDenebContents *ethpbv2.SignedBlindedBeaconBlockContentsDeneb) error {
blk, err := migration.BlindedDenebToV1Alpha1SignedBlock(&ethpbv2.SignedBlindedBeaconBlockDeneb{
Message: blindedDenebContents.SignedBlindedBlock.Message,
Signature: blindedDenebContents.SignedBlindedBlock.Signature,
})
if err != nil {
return status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
}
blobs := migration.SignedBlindedBlobsToV1Alpha1SignedBlindedBlobs(blindedDenebContents.SignedBlindedBlobSidecars)
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, &eth.GenericSignedBeaconBlock{
Block: &eth.GenericSignedBeaconBlock_BlindedDeneb{
BlindedDeneb: &eth.SignedBlindedBeaconBlockAndBlobsDeneb{
Block: blk,
Blobs: blobs,
},
},
})
if err != nil {
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
return status.Error(codes.InvalidArgument, err.Error())
}
return status.Errorf(codes.Internal, "Could not propose blinded block: %v", err)
}
return nil
}

View File

@@ -103,27 +103,6 @@ func TestServer_GetBlindedBlock(t *testing.T) {
assert.DeepEqual(t, expected, capellaBlock.CapellaBlock)
assert.Equal(t, ethpbv2.Version_CAPELLA, resp.Version)
})
t.Run("Deneb", func(t *testing.T) {
b := util.NewBlindedBeaconBlockDeneb()
blk, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
mockChainService := &mock.ChainService{}
bs := &Server{
FinalizationFetcher: mockChainService,
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
OptimisticModeFetcher: mockChainService,
}
expected, err := migration.V1Alpha1BeaconBlockBlindedDenebToV2Blinded(b.Block)
require.NoError(t, err)
resp, err := bs.GetBlindedBlock(ctx, &ethpbv1.BlockRequest{})
require.NoError(t, err)
capellaBlock, ok := resp.Data.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_DenebBlock)
require.Equal(t, true, ok)
assert.DeepEqual(t, expected, capellaBlock.DenebBlock)
assert.Equal(t, ethpbv2.Version_DENEB, resp.Version)
})
t.Run("execution optimistic", func(t *testing.T) {
b := util.NewBlindedBeaconBlockBellatrix()
blk, err := blocks.NewSignedBeaconBlock(b)
@@ -263,26 +242,6 @@ func TestServer_GetBlindedBlockSSZ(t *testing.T) {
assert.DeepEqual(t, expected, resp.Data)
assert.Equal(t, ethpbv2.Version_CAPELLA, resp.Version)
})
t.Run("Deneb", func(t *testing.T) {
b := util.NewBlindedBeaconBlockDeneb()
blk, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
mockChainService := &mock.ChainService{}
bs := &Server{
FinalizationFetcher: mockChainService,
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
OptimisticModeFetcher: mockChainService,
}
expected, err := blk.MarshalSSZ()
require.NoError(t, err)
resp, err := bs.GetBlindedBlockSSZ(ctx, &ethpbv1.BlockRequest{})
require.NoError(t, err)
assert.NotNil(t, resp)
assert.DeepEqual(t, expected, resp.Data)
assert.Equal(t, ethpbv2.Version_DENEB, resp.Version)
})
t.Run("execution optimistic", func(t *testing.T) {
b := util.NewBlindedBeaconBlockBellatrix()
blk, err := blocks.NewSignedBeaconBlock(b)
@@ -490,8 +449,9 @@ func TestSubmitBlindedBlock(t *testing.T) {
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
blockReq := &ethpbv2.SignedBlindedBeaconBlockContentsContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContentsContainer_Phase0Block{Phase0Block: &ethpbv1.SignedBeaconBlock{}},
blockReq := &ethpbv2.SignedBlindedBeaconBlockContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContainer_Phase0Block{Phase0Block: &ethpbv1.BeaconBlock{}},
Signature: []byte("sig"),
}
_, err := server.SubmitBlindedBlock(context.Background(), blockReq)
assert.NoError(t, err)
@@ -504,8 +464,9 @@ func TestSubmitBlindedBlock(t *testing.T) {
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
blockReq := &ethpbv2.SignedBlindedBeaconBlockContentsContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContentsContainer_AltairBlock{AltairBlock: &ethpbv2.SignedBeaconBlockAltair{}},
blockReq := &ethpbv2.SignedBlindedBeaconBlockContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContainer_AltairBlock{AltairBlock: &ethpbv2.BeaconBlockAltair{}},
Signature: []byte("sig"),
}
_, err := server.SubmitBlindedBlock(context.Background(), blockReq)
assert.NoError(t, err)
@@ -518,8 +479,9 @@ func TestSubmitBlindedBlock(t *testing.T) {
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
blockReq := &ethpbv2.SignedBlindedBeaconBlockContentsContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContentsContainer_BellatrixBlock{BellatrixBlock: &ethpbv2.SignedBlindedBeaconBlockBellatrix{}},
blockReq := &ethpbv2.SignedBlindedBeaconBlockContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: &ethpbv2.BlindedBeaconBlockBellatrix{}},
Signature: []byte("sig"),
}
_, err := server.SubmitBlindedBlock(context.Background(), blockReq)
assert.NoError(t, err)
@@ -532,27 +494,9 @@ func TestSubmitBlindedBlock(t *testing.T) {
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
blockReq := &ethpbv2.SignedBlindedBeaconBlockContentsContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContentsContainer_CapellaBlock{CapellaBlock: &ethpbv2.SignedBlindedBeaconBlockCapella{}},
}
_, err := server.SubmitBlindedBlock(context.Background(), blockReq)
assert.NoError(t, err)
})
t.Run("Deneb", func(t *testing.T) {
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
server := &Server{
V1Alpha1ValidatorServer: v1alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
blockReq := &ethpbv2.SignedBlindedBeaconBlockContentsContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContentsContainer_DenebContents{
DenebContents: &ethpbv2.SignedBlindedBeaconBlockContentsDeneb{
SignedBlindedBlock: &ethpbv2.SignedBlindedBeaconBlockDeneb{},
SignedBlindedBlobSidecars: []*ethpbv2.SignedBlindedBlobSidecar{},
},
},
blockReq := &ethpbv2.SignedBlindedBeaconBlockContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContainer_CapellaBlock{CapellaBlock: &ethpbv2.BlindedBeaconBlockCapella{}},
Signature: []byte("sig"),
}
_, err := server.SubmitBlindedBlock(context.Background(), blockReq)
assert.NoError(t, err)

View File

@@ -202,7 +202,7 @@ func (bs *Server) ListBlockHeaders(ctx context.Context, req *ethpbv1.BlockHeader
// response (20X) only indicates that the broadcast has been successful. The beacon node is expected to integrate the
// new block into its state, and therefore validate the block internally, however blocks which fail the validation are
// still broadcast but a different status code is returned (202).
func (bs *Server) SubmitBlock(ctx context.Context, req *ethpbv2.SignedBeaconBlockContentsContainer) (*emptypb.Empty, error) {
func (bs *Server) SubmitBlock(ctx context.Context, req *ethpbv2.SignedBeaconBlockContainer) (*emptypb.Empty, error) {
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlock")
defer span.End()
@@ -212,24 +212,20 @@ func (bs *Server) SubmitBlock(ctx context.Context, req *ethpbv2.SignedBeaconBloc
}
switch blkContainer := req.Message.(type) {
case *ethpbv2.SignedBeaconBlockContentsContainer_Phase0Block:
if err := bs.submitPhase0Block(ctx, blkContainer.Phase0Block.Block, blkContainer.Phase0Block.Signature); err != nil {
case *ethpbv2.SignedBeaconBlockContainer_Phase0Block:
if err := bs.submitPhase0Block(ctx, blkContainer.Phase0Block, req.Signature); err != nil {
return nil, err
}
case *ethpbv2.SignedBeaconBlockContentsContainer_AltairBlock:
if err := bs.submitAltairBlock(ctx, blkContainer.AltairBlock.Message, blkContainer.AltairBlock.Signature); err != nil {
case *ethpbv2.SignedBeaconBlockContainer_AltairBlock:
if err := bs.submitAltairBlock(ctx, blkContainer.AltairBlock, req.Signature); err != nil {
return nil, err
}
case *ethpbv2.SignedBeaconBlockContentsContainer_BellatrixBlock:
if err := bs.submitBellatrixBlock(ctx, blkContainer.BellatrixBlock.Message, blkContainer.BellatrixBlock.Signature); err != nil {
case *ethpbv2.SignedBeaconBlockContainer_BellatrixBlock:
if err := bs.submitBellatrixBlock(ctx, blkContainer.BellatrixBlock, req.Signature); err != nil {
return nil, err
}
case *ethpbv2.SignedBeaconBlockContentsContainer_CapellaBlock:
if err := bs.submitCapellaBlock(ctx, blkContainer.CapellaBlock.Message, blkContainer.CapellaBlock.Signature); err != nil {
return nil, err
}
case *ethpbv2.SignedBeaconBlockContentsContainer_DenebContents:
if err := bs.submitDenebContents(ctx, blkContainer.DenebContents); err != nil {
case *ethpbv2.SignedBeaconBlockContainer_CapellaBlock:
if err := bs.submitCapellaBlock(ctx, blkContainer.CapellaBlock, req.Signature); err != nil {
return nil, err
}
default:
@@ -451,17 +447,6 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
result, err = bs.getBlockDeneb(ctx, blk)
if result != nil {
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
@@ -521,16 +506,6 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
result, err = bs.getSSZBlockDeneb(ctx, blk)
if result != nil {
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
}
@@ -851,76 +826,6 @@ func (bs *Server) getBlockCapella(ctx context.Context, blk interfaces.ReadOnlySi
}, nil
}
func (bs *Server) getBlockDeneb(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlockResponseV2, error) {
denebBlk, err := blk.PbDenebBlock()
if err != nil {
// ErrUnsupportedGetter means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedField) {
if blindedDenebBlk, err := blk.PbBlindedDenebBlock(); err == nil {
if blindedDenebBlk == nil {
return nil, errNilBlock
}
signedFullBlock, err := bs.ExecutionPayloadReconstructor.ReconstructFullBlock(ctx, blk)
if err != nil {
return nil, errors.Wrapf(err, "could not reconstruct full execution payload to create signed beacon block")
}
denebBlk, err = signedFullBlock.PbDenebBlock()
if err != nil {
return nil, errors.Wrapf(err, "could not get signed beacon block")
}
v2Blk, err := migration.V1Alpha1BeaconBlockDenebToV2(denebBlk.Block)
if err != nil {
return nil, errors.Wrapf(err, "could not convert beacon block")
}
root, err := blk.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrapf(err, "could not get block root")
}
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
if err != nil {
return nil, errors.Wrapf(err, "could not check if block is optimistic")
}
sig := blk.Signature()
return &ethpbv2.BlockResponseV2{
Version: ethpbv2.Version_DENEB,
Data: &ethpbv2.SignedBeaconBlockContainer{
Message: &ethpbv2.SignedBeaconBlockContainer_DenebBlock{DenebBlock: v2Blk},
Signature: sig[:],
},
ExecutionOptimistic: isOptimistic,
}, nil
}
return nil, err
}
return nil, err
}
if denebBlk == nil {
return nil, errNilBlock
}
v2Blk, err := migration.V1Alpha1BeaconBlockDenebToV2(denebBlk.Block)
if err != nil {
return nil, errors.Wrapf(err, "could not convert beacon block")
}
root, err := blk.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrapf(err, "could not get block root")
}
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
if err != nil {
return nil, errors.Wrapf(err, "could not check if block is optimistic")
}
sig := blk.Signature()
return &ethpbv2.BlockResponseV2{
Version: ethpbv2.Version_DENEB,
Data: &ethpbv2.SignedBeaconBlockContainer{
Message: &ethpbv2.SignedBeaconBlockContainer_DenebBlock{DenebBlock: v2Blk},
Signature: sig[:],
},
ExecutionOptimistic: isOptimistic,
}, nil
}
func getSSZBlockPhase0(blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
phase0Blk, err := blk.PbPhase0Block()
if err != nil {
@@ -1116,82 +1021,6 @@ func (bs *Server) getSSZBlockCapella(ctx context.Context, blk interfaces.ReadOnl
return &ethpbv2.SSZContainer{Version: ethpbv2.Version_CAPELLA, ExecutionOptimistic: isOptimistic, Data: sszData}, nil
}
func (bs *Server) getSSZBlockDeneb(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
denebBlk, err := blk.PbDenebBlock()
if err != nil {
// ErrUnsupportedGetter means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedField) {
if blindedDenebBlk, err := blk.PbBlindedDenebBlock(); err == nil {
if blindedDenebBlk == nil {
return nil, errNilBlock
}
signedFullBlock, err := bs.ExecutionPayloadReconstructor.ReconstructFullBlock(ctx, blk)
if err != nil {
return nil, errors.Wrapf(err, "could not reconstruct full execution payload to create signed beacon block")
}
denebBlk, err = signedFullBlock.PbDenebBlock()
if err != nil {
return nil, errors.Wrapf(err, "could not get signed beacon block")
}
v2Blk, err := migration.V1Alpha1BeaconBlockDenebToV2(denebBlk.Block)
if err != nil {
return nil, errors.Wrapf(err, "could not convert signed beacon block")
}
root, err := blk.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrapf(err, "could not get block root")
}
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
if err != nil {
return nil, errors.Wrapf(err, "could not check if block is optimistic")
}
sig := blk.Signature()
data := &ethpbv2.SignedBeaconBlockDeneb{
Message: v2Blk,
Signature: sig[:],
}
sszData, err := data.MarshalSSZ()
if err != nil {
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
}
return &ethpbv2.SSZContainer{
Version: ethpbv2.Version_DENEB,
ExecutionOptimistic: isOptimistic,
Data: sszData,
}, nil
}
return nil, err
}
return nil, err
}
if denebBlk == nil {
return nil, errNilBlock
}
v2Blk, err := migration.V1Alpha1BeaconBlockDenebToV2(denebBlk.Block)
if err != nil {
return nil, errors.Wrapf(err, "could not convert signed beacon block")
}
root, err := blk.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrapf(err, "could not get block root")
}
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
if err != nil {
return nil, errors.Wrapf(err, "could not check if block is optimistic")
}
sig := blk.Signature()
data := &ethpbv2.SignedBeaconBlockDeneb{
Message: v2Blk,
Signature: sig[:],
}
sszData, err := data.MarshalSSZ()
if err != nil {
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
}
return &ethpbv2.SSZContainer{Version: ethpbv2.Version_DENEB, ExecutionOptimistic: isOptimistic, Data: sszData}, nil
}
func (bs *Server) submitPhase0Block(ctx context.Context, phase0Blk *ethpbv1.BeaconBlock, sig []byte) error {
v1alpha1Blk, err := migration.V1ToV1Alpha1SignedBlock(&ethpbv1.SignedBeaconBlock{Block: phase0Blk, Signature: sig})
if err != nil {
@@ -1267,29 +1096,3 @@ func (bs *Server) submitCapellaBlock(ctx context.Context, capellaBlk *ethpbv2.Be
}
return nil
}
func (bs *Server) submitDenebContents(ctx context.Context, denebContents *ethpbv2.SignedBeaconBlockContentsDeneb) error {
blk, err := migration.DenebToV1Alpha1SignedBlock(&ethpbv2.SignedBeaconBlockDeneb{
Message: denebContents.SignedBlock.Message,
Signature: denebContents.SignedBlock.Signature,
})
if err != nil {
return status.Errorf(codes.Internal, "Could not get block: %v", err)
}
blobs := migration.SignedBlobsToV1Alpha1SignedBlobs(denebContents.SignedBlobSidecars)
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, &eth.GenericSignedBeaconBlock{
Block: &eth.GenericSignedBeaconBlock_Deneb{
Deneb: &eth.SignedBeaconBlockAndBlobsDeneb{
Block: blk,
Blobs: blobs,
},
},
})
if err != nil {
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
return status.Error(codes.InvalidArgument, err.Error())
}
return status.Errorf(codes.Internal, "Could not propose block: %v", err)
}
return nil
}

View File

@@ -360,8 +360,9 @@ func TestServer_SubmitBlock(t *testing.T) {
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
blockReq := &ethpbv2.SignedBeaconBlockContentsContainer{
Message: &ethpbv2.SignedBeaconBlockContentsContainer_Phase0Block{Phase0Block: &ethpbv1.SignedBeaconBlock{}},
blockReq := &ethpbv2.SignedBeaconBlockContainer{
Message: &ethpbv2.SignedBeaconBlockContainer_Phase0Block{Phase0Block: &ethpbv1.BeaconBlock{}},
Signature: []byte("sig"),
}
_, err := server.SubmitBlock(context.Background(), blockReq)
assert.NoError(t, err)
@@ -374,8 +375,9 @@ func TestServer_SubmitBlock(t *testing.T) {
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
blockReq := &ethpbv2.SignedBeaconBlockContentsContainer{
Message: &ethpbv2.SignedBeaconBlockContentsContainer_AltairBlock{AltairBlock: &ethpbv2.SignedBeaconBlockAltair{}},
blockReq := &ethpbv2.SignedBeaconBlockContainer{
Message: &ethpbv2.SignedBeaconBlockContainer_AltairBlock{AltairBlock: &ethpbv2.BeaconBlockAltair{}},
Signature: []byte("sig"),
}
_, err := server.SubmitBlock(context.Background(), blockReq)
assert.NoError(t, err)
@@ -388,8 +390,9 @@ func TestServer_SubmitBlock(t *testing.T) {
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
blockReq := &ethpbv2.SignedBeaconBlockContentsContainer{
Message: &ethpbv2.SignedBeaconBlockContentsContainer_BellatrixBlock{BellatrixBlock: &ethpbv2.SignedBeaconBlockBellatrix{}},
blockReq := &ethpbv2.SignedBeaconBlockContainer{
Message: &ethpbv2.SignedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: &ethpbv2.BeaconBlockBellatrix{}},
Signature: []byte("sig"),
}
_, err := server.SubmitBlock(context.Background(), blockReq)
assert.NoError(t, err)
@@ -402,8 +405,9 @@ func TestServer_SubmitBlock(t *testing.T) {
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
blockReq := &ethpbv2.SignedBeaconBlockContentsContainer{
Message: &ethpbv2.SignedBeaconBlockContentsContainer_CapellaBlock{CapellaBlock: &ethpbv2.SignedBeaconBlockCapella{}},
blockReq := &ethpbv2.SignedBeaconBlockContainer{
Message: &ethpbv2.SignedBeaconBlockContainer_CapellaBlock{CapellaBlock: &ethpbv2.BeaconBlockCapella{}},
Signature: []byte("sig"),
}
_, err := server.SubmitBlock(context.Background(), blockReq)
assert.NoError(t, err)

View File

@@ -51,8 +51,6 @@ func TestGetSpec(t *testing.T) {
config.BellatrixForkEpoch = 101
config.CapellaForkVersion = []byte("CapellaForkVersion")
config.CapellaForkEpoch = 103
config.DenebForkVersion = []byte("DenebForkVersion")
config.DenebForkEpoch = 105
config.BLSWithdrawalPrefixByte = byte('b')
config.ETH1AddressWithdrawalPrefixByte = byte('c')
config.GenesisDelay = 24
@@ -131,9 +129,6 @@ func TestGetSpec(t *testing.T) {
var dam [4]byte
copy(dam[:], []byte{'1', '0', '0', '0'})
config.DomainApplicationMask = dam
var dbs [4]byte
copy(dam[:], []byte{'2', '0', '0', '0'})
config.DomainBlobSidecar = dbs
params.OverrideBeaconConfig(config)
@@ -141,7 +136,7 @@ func TestGetSpec(t *testing.T) {
resp, err := server.GetSpec(context.Background(), &emptypb.Empty{})
require.NoError(t, err)
assert.Equal(t, 111, len(resp.Data))
assert.Equal(t, 108, len(resp.Data))
for k, v := range resp.Data {
switch k {
case "CONFIG_NAME":
@@ -210,10 +205,6 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "0x"+hex.EncodeToString([]byte("CapellaForkVersion")), v)
case "CAPELLA_FORK_EPOCH":
assert.Equal(t, "103", v)
case "DENEB_FORK_VERSION":
assert.Equal(t, "0x"+hex.EncodeToString([]byte("DenebForkVersion")), v)
case "DENEB_FORK_EPOCH":
assert.Equal(t, "105", v)
case "MIN_ANCHOR_POW_BLOCK_DIFFICULTY":
assert.Equal(t, "1000", v)
case "BLS_WITHDRAWAL_PREFIX":
@@ -278,8 +269,6 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "51", v)
case "MAX_VOLUNTARY_EXITS":
assert.Equal(t, "52", v)
case "MAX_BLOBS_PER_BLOCK":
assert.Equal(t, "4", v)
case "TIMELY_HEAD_FLAG_INDEX":
assert.Equal(t, "0x35", v)
case "TIMELY_SOURCE_FLAG_INDEX":
@@ -346,8 +335,6 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "0x0a000000", v)
case "DOMAIN_APPLICATION_BUILDER":
assert.Equal(t, "0x00000001", v)
case "DOMAIN_BLOB_SIDECAR":
assert.Equal(t, "0x00000000", v)
case "TRANSITION_TOTAL_DIFFICULTY":
assert.Equal(t, "0", v)
case "TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH":

View File

@@ -1,48 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"handlers.go",
"server.go",
"structs.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/builder",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//network:go_default_library",
"//proto/engine/v1:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["handlers_test.go"],
embed = [":go_default_library"],
deps = [
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/rpc/testutil:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//network:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
],
)

View File

@@ -1,131 +0,0 @@
package builder
import (
"fmt"
"net/http"
"strconv"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/gorilla/mux"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/network"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
// ExpectedWithdrawals get the withdrawals computed from the specified state, that will be included in the block that gets built on the specified state.
func (s *Server) ExpectedWithdrawals(w http.ResponseWriter, r *http.Request) {
// Retrieve beacon state
stateId := mux.Vars(r)["state_id"]
if stateId == "" {
network.WriteError(w, &network.DefaultErrorJson{
Message: "state_id is required in URL params",
Code: http.StatusBadRequest,
})
return
}
st, err := s.Stater.State(r.Context(), []byte(stateId))
if err != nil {
network.WriteError(w, handleWrapError(err, "could not retrieve state", http.StatusNotFound))
return
}
queryParam := r.URL.Query().Get("proposal_slot")
var proposalSlot primitives.Slot
if queryParam != "" {
pSlot, err := strconv.ParseUint(queryParam, 10, 64)
if err != nil {
network.WriteError(w, handleWrapError(err, "invalid proposal slot value", http.StatusBadRequest))
return
}
proposalSlot = primitives.Slot(pSlot)
} else {
proposalSlot = st.Slot() + 1
}
// Perform sanity checks on proposal slot before computing state
capellaStart, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
if err != nil {
network.WriteError(w, handleWrapError(err, "could not calculate Capella start slot", http.StatusInternalServerError))
return
}
if proposalSlot < capellaStart {
network.WriteError(w, &network.DefaultErrorJson{
Message: "expected withdrawals are not supported before Capella fork",
Code: http.StatusBadRequest,
})
return
}
if proposalSlot <= st.Slot() {
network.WriteError(w, &network.DefaultErrorJson{
Message: fmt.Sprintf("proposal slot must be bigger than state slot. proposal slot: %d, state slot: %d", proposalSlot, st.Slot()),
Code: http.StatusBadRequest,
})
return
}
lookAheadLimit := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().MaxSeedLookahead)))
if st.Slot().Add(lookAheadLimit) <= proposalSlot {
network.WriteError(w, &network.DefaultErrorJson{
Message: fmt.Sprintf("proposal slot cannot be >= %d slots ahead of state slot", lookAheadLimit),
Code: http.StatusBadRequest,
})
return
}
// Get metadata for response
isOptimistic, err := s.OptimisticModeFetcher.IsOptimistic(r.Context())
if err != nil {
network.WriteError(w, handleWrapError(err, "could not get optimistic mode info", http.StatusInternalServerError))
return
}
root, err := helpers.BlockRootAtSlot(st, st.Slot()-1)
if err != nil {
network.WriteError(w, handleWrapError(err, "could not get block root", http.StatusInternalServerError))
return
}
var blockRoot = [32]byte(root)
isFinalized := s.FinalizationFetcher.IsFinalized(r.Context(), blockRoot)
// Advance state forward to proposal slot
st, err = transition.ProcessSlots(r.Context(), st, proposalSlot)
if err != nil {
network.WriteError(w, &network.DefaultErrorJson{
Message: "could not process slots",
Code: http.StatusInternalServerError,
})
return
}
withdrawals, err := st.ExpectedWithdrawals()
if err != nil {
network.WriteError(w, &network.DefaultErrorJson{
Message: "could not get expected withdrawals",
Code: http.StatusInternalServerError,
})
return
}
network.WriteJson(w, &ExpectedWithdrawalsResponse{
ExecutionOptimistic: isOptimistic,
Finalized: isFinalized,
Data: buildExpectedWithdrawalsData(withdrawals),
})
}
func buildExpectedWithdrawalsData(withdrawals []*enginev1.Withdrawal) []*ExpectedWithdrawal {
data := make([]*ExpectedWithdrawal, len(withdrawals))
for i, withdrawal := range withdrawals {
data[i] = &ExpectedWithdrawal{
Address: hexutil.Encode(withdrawal.Address),
Amount: strconv.FormatUint(withdrawal.Amount, 10),
Index: strconv.FormatUint(withdrawal.Index, 10),
ValidatorIndex: strconv.FormatUint(uint64(withdrawal.ValidatorIndex), 10),
}
}
return data
}
func handleWrapError(err error, message string, code int) *network.DefaultErrorJson {
return &network.DefaultErrorJson{
Message: errors.Wrapf(err, message).Error(),
Code: code,
}
}

View File

@@ -1,210 +0,0 @@
package builder
import (
"bytes"
"encoding/json"
"net/http"
"net/http/httptest"
"strconv"
"testing"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/gorilla/mux"
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/network"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
func TestExpectedWithdrawals_BadRequest(t *testing.T) {
st, err := util.NewBeaconStateCapella()
slotsAhead := 5000
require.NoError(t, err)
capellaSlot, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
require.NoError(t, err)
currentSlot := capellaSlot + primitives.Slot(slotsAhead)
require.NoError(t, st.SetSlot(currentSlot))
mockChainService := &mock.ChainService{Optimistic: true}
testCases := []struct {
name string
path string
urlParams map[string]string
state state.BeaconState
errorMessage string
}{
{
name: "no state_id url params",
path: "/eth/v1/builder/states/{state_id}/expected_withdrawals?proposal_slot" +
strconv.FormatUint(uint64(currentSlot), 10),
urlParams: map[string]string{},
state: nil,
errorMessage: "state_id is required in URL params",
},
{
name: "invalid proposal slot value",
path: "/eth/v1/builder/states/{state_id}/expected_withdrawals?proposal_slot=aaa",
urlParams: map[string]string{"state_id": "head"},
state: st,
errorMessage: "invalid proposal slot value",
},
{
name: "proposal slot < Capella start slot",
path: "/eth/v1/builder/states/{state_id}/expected_withdrawals?proposal_slot=" +
strconv.FormatUint(uint64(capellaSlot)-1, 10),
urlParams: map[string]string{"state_id": "head"},
state: st,
errorMessage: "expected withdrawals are not supported before Capella fork",
},
{
name: "proposal slot == Capella start slot",
path: "/eth/v1/builder/states/{state_id}/expected_withdrawals?proposal_slot=" +
strconv.FormatUint(uint64(capellaSlot), 10),
urlParams: map[string]string{"state_id": "head"},
state: st,
errorMessage: "proposal slot must be bigger than state slot",
},
{
name: "Proposal slot >= 128 slots ahead of state slot",
path: "/eth/v1/builder/states/{state_id}/expected_withdrawals?proposal_slot=" +
strconv.FormatUint(uint64(currentSlot+128), 10),
urlParams: map[string]string{"state_id": "head"},
state: st,
errorMessage: "proposal slot cannot be >= 128 slots ahead of state slot",
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
s := &Server{
FinalizationFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
Stater: &testutil.MockStater{BeaconState: testCase.state},
}
request := httptest.NewRequest("GET", testCase.path, nil)
request = mux.SetURLVars(request, testCase.urlParams)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.ExpectedWithdrawals(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &network.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.StringContains(t, testCase.errorMessage, e.Message)
})
}
}
func TestExpectedWithdrawals(t *testing.T) {
st, err := util.NewBeaconStateCapella()
slotsAhead := 5000
require.NoError(t, err)
capellaSlot, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
require.NoError(t, err)
currentSlot := capellaSlot + primitives.Slot(slotsAhead)
require.NoError(t, st.SetSlot(currentSlot))
mockChainService := &mock.ChainService{Optimistic: true}
t.Run("get correct expected withdrawals", func(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.MaxValidatorsPerWithdrawalsSweep = 16
params.OverrideBeaconConfig(cfg)
// Update state with updated validator fields
valCount := 17
validators := make([]*eth.Validator, 0, valCount)
balances := make([]uint64, 0, valCount)
for i := 0; i < valCount; i++ {
blsKey, err := bls.RandKey()
require.NoError(t, err)
val := &eth.Validator{
PublicKey: blsKey.PublicKey().Marshal(),
WithdrawalCredentials: make([]byte, 32),
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
}
val.WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
validators = append(validators, val)
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance)
}
epoch := slots.ToEpoch(st.Slot())
// Fully withdrawable now with more than 0 balance
validators[5].WithdrawableEpoch = epoch
// Fully withdrawable now but 0 balance
validators[10].WithdrawableEpoch = epoch
balances[10] = 0
// Partially withdrawable now but fully withdrawable after 1 epoch
validators[14].WithdrawableEpoch = epoch + 1
balances[14] += params.BeaconConfig().MinDepositAmount
// Partially withdrawable
validators[15].WithdrawableEpoch = epoch + 2
balances[15] += params.BeaconConfig().MinDepositAmount
// Above sweep bound
validators[16].WithdrawableEpoch = epoch + 1
balances[16] += params.BeaconConfig().MinDepositAmount
require.NoError(t, st.SetValidators(validators))
require.NoError(t, st.SetBalances(balances))
inactivityScores := make([]uint64, valCount)
for i := range inactivityScores {
inactivityScores[i] = 10
}
require.NoError(t, st.SetInactivityScores(inactivityScores))
s := &Server{
FinalizationFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
Stater: &testutil.MockStater{BeaconState: st},
}
request := httptest.NewRequest(
"GET", "/eth/v1/builder/states/{state_id}/expected_withdrawals?proposal_slot="+
strconv.FormatUint(uint64(currentSlot+params.BeaconConfig().SlotsPerEpoch), 10), nil)
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.ExpectedWithdrawals(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &ExpectedWithdrawalsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, true, resp.ExecutionOptimistic)
assert.Equal(t, false, resp.Finalized)
assert.Equal(t, 3, len(resp.Data))
expectedWithdrawal1 := &ExpectedWithdrawal{
Index: strconv.FormatUint(0, 10),
ValidatorIndex: strconv.FormatUint(5, 10),
Address: hexutil.Encode(validators[5].WithdrawalCredentials[12:]),
// Decreased due to epoch processing when state advanced forward
Amount: strconv.FormatUint(31998257885, 10),
}
expectedWithdrawal2 := &ExpectedWithdrawal{
Index: strconv.FormatUint(1, 10),
ValidatorIndex: strconv.FormatUint(14, 10),
Address: hexutil.Encode(validators[14].WithdrawalCredentials[12:]),
// MaxEffectiveBalance + MinDepositAmount + decrease after epoch processing
Amount: strconv.FormatUint(32998257885, 10),
}
expectedWithdrawal3 := &ExpectedWithdrawal{
Index: strconv.FormatUint(2, 10),
ValidatorIndex: strconv.FormatUint(15, 10),
Address: hexutil.Encode(validators[15].WithdrawalCredentials[12:]),
// MinDepositAmount + decrease after epoch processing
Amount: strconv.FormatUint(998257885, 10),
}
require.DeepEqual(t, expectedWithdrawal1, resp.Data[0])
require.DeepEqual(t, expectedWithdrawal2, resp.Data[1])
require.DeepEqual(t, expectedWithdrawal3, resp.Data[2])
})
}

View File

@@ -1,12 +0,0 @@
package builder
import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
)
type Server struct {
FinalizationFetcher blockchain.FinalizationFetcher
OptimisticModeFetcher blockchain.OptimisticModeFetcher
Stater lookup.Stater
}

View File

@@ -1,14 +0,0 @@
package builder
type ExpectedWithdrawalsResponse struct {
Data []*ExpectedWithdrawal `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
}
type ExpectedWithdrawal struct {
Address string `json:"address" hex:"true"`
Amount string `json:"amount"`
Index string `json:"index"`
ValidatorIndex string `json:"validator_index"`
}

View File

@@ -104,18 +104,6 @@ func (ds *Server) GetBeaconStateV2(ctx context.Context, req *ethpbv2.BeaconState
ExecutionOptimistic: isOptimistic,
Finalized: isFinalized,
}, nil
case version.Deneb:
protoState, err := migration.BeaconStateDenebToProto(beaconSt)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not convert state to proto: %v", err)
}
return &ethpbv2.BeaconStateResponseV2{
Version: ethpbv2.Version_DENEB,
Data: &ethpbv2.BeaconStateContainer{
State: &ethpbv2.BeaconStateContainer_DenebState{DenebState: protoState},
},
ExecutionOptimistic: isOptimistic,
}, nil
default:
return nil, status.Error(codes.Internal, "Unsupported state version")
}
@@ -145,8 +133,6 @@ func (ds *Server) GetBeaconStateSSZV2(ctx context.Context, req *ethpbv2.BeaconSt
ver = ethpbv2.Version_BELLATRIX
case version.Capella:
ver = ethpbv2.Version_CAPELLA
case version.Deneb:
ver = ethpbv2.Version_DENEB
default:
return nil, status.Error(codes.Internal, "Unsupported state version")
}

View File

@@ -97,24 +97,6 @@ func TestGetBeaconStateV2(t *testing.T) {
assert.NotNil(t, resp)
assert.Equal(t, ethpbv2.Version_CAPELLA, resp.Version)
})
t.Run("Deneb", func(t *testing.T) {
fakeState, _ := util.DeterministicGenesisStateDeneb(t, 1)
server := &Server{
Stater: &testutil.MockStater{
BeaconState: fakeState,
},
HeadFetcher: &blockchainmock.ChainService{},
OptimisticModeFetcher: &blockchainmock.ChainService{},
FinalizationFetcher: &blockchainmock.ChainService{},
BeaconDB: db,
}
resp, err := server.GetBeaconStateV2(context.Background(), &ethpbv2.BeaconStateRequestV2{
StateId: []byte("head"),
})
require.NoError(t, err)
assert.NotNil(t, resp)
assert.Equal(t, ethpbv2.Version_DENEB, resp.Version)
})
t.Run("execution optimistic", func(t *testing.T) {
parentRoot := [32]byte{'a'}
blk := util.NewBeaconBlock()
@@ -274,25 +256,6 @@ func TestGetBeaconStateSSZV2(t *testing.T) {
assert.DeepEqual(t, sszState, resp.Data)
assert.Equal(t, ethpbv2.Version_CAPELLA, resp.Version)
})
t.Run("Deneb", func(t *testing.T) {
fakeState, _ := util.DeterministicGenesisStateDeneb(t, 1)
sszState, err := fakeState.MarshalSSZ()
require.NoError(t, err)
server := &Server{
Stater: &testutil.MockStater{
BeaconState: fakeState,
},
}
resp, err := server.GetBeaconStateSSZV2(context.Background(), &ethpbv2.BeaconStateRequestV2{
StateId: make([]byte, 0),
})
require.NoError(t, err)
assert.NotNil(t, resp)
assert.DeepEqual(t, sszState, resp.Data)
assert.Equal(t, ethpbv2.Version_DENEB, resp.Version)
})
}
func TestListForkChoiceHeadsV2(t *testing.T) {

View File

@@ -317,7 +317,7 @@ func (s *Server) streamPayloadAttributes(stream ethpbservice.Events_StreamEvents
},
},
})
case version.Capella, version.Deneb:
case version.Capella:
withdrawals, err := headState.ExpectedWithdrawals()
if err != nil {
return err

Some files were not shown because too many files have changed in this diff Show More