mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 13:28:01 -05:00
Compare commits
56 Commits
c6c9414d8b
...
deneb-inte
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5d994498c9 | ||
|
|
79bd9520ad | ||
|
|
fb25916441 | ||
|
|
7d52765ad2 | ||
|
|
f10b24590f | ||
|
|
54748c68b3 | ||
|
|
df49ee6db0 | ||
|
|
7198a2a474 | ||
|
|
c64ccd3f7e | ||
|
|
c6c7cd82fe | ||
|
|
0328aaeef9 | ||
|
|
575f958cb1 | ||
|
|
20c88ae812 | ||
|
|
5f933e8276 | ||
|
|
fff84f9852 | ||
|
|
91ba2a0fd6 | ||
|
|
73428c51e0 | ||
|
|
1f8a110598 | ||
|
|
20aecca552 | ||
|
|
39c755c516 | ||
|
|
78f2e1399b | ||
|
|
39b18bef75 | ||
|
|
db48c5c806 | ||
|
|
3d99f04380 | ||
|
|
1a433e1378 | ||
|
|
09af0f3bd4 | ||
|
|
86b0dd90a8 | ||
|
|
e2b9738de0 | ||
|
|
c1ec5bac6a | ||
|
|
3842ee3b83 | ||
|
|
800398c5ae | ||
|
|
030350baae | ||
|
|
74c30f25b8 | ||
|
|
47db1149bc | ||
|
|
d5ce9e6f82 | ||
|
|
d237526e83 | ||
|
|
e2bcfb681e | ||
|
|
a5bb6a678a | ||
|
|
d9ae7677e0 | ||
|
|
3ca7bbc46f | ||
|
|
11c77b2c4d | ||
|
|
563c0dd9d5 | ||
|
|
93b738bda7 | ||
|
|
86ce5eee19 | ||
|
|
ef6e877d6f | ||
|
|
d750a77cb1 | ||
|
|
0bfe96f360 | ||
|
|
21cb398f28 | ||
|
|
d94eaa5ec9 | ||
|
|
e146c7b7ca | ||
|
|
d322bf4ece | ||
|
|
bd5d6bc511 | ||
|
|
c74039658a | ||
|
|
d65eeeec36 | ||
|
|
ecaaee90c8 | ||
|
|
e5433adbfd |
10
WORKSPACE
10
WORKSPACE
@@ -213,7 +213,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.4.0-alpha.1"
|
||||
consensus_spec_version = "v1.4.0-beta.1"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -229,7 +229,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "1118a663be4a00ba00f0635eb20287157f2b2f993aed64335bfbcd04af424c2b",
|
||||
sha256 = "24399b60ce3fbeb2311952d213dc3731b6dcb0f8881b016c283de5b518d2bbba",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -245,7 +245,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "acde6e10940d14f22277eda5b55b65a24623ac88e4c7a2e34134a6069f5eea82",
|
||||
sha256 = "8e656ee48d2e2ebc9cf9baedb81f27925bc625b3e3fbb2883444a08758a5884a",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -261,7 +261,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "49c022f3a3478cea849ba8f877a9f7e4c1ded549edddc09993550bbc5bb192e1",
|
||||
sha256 = "8bd137da6cc57a25383bfac5bc37e31265098145278bd8002b88e24c8b4718b9",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -276,7 +276,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "c3e246ff01f6b7b9e9e41939954a6ff89dfca7297415f88781809165fa83267c",
|
||||
sha256 = "2bc1edb6e4a4f86c00317c04618a90b0ca29ee1eba833d0a64dd67fdd83fdbe3",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -11,12 +11,12 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/builder",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//network/authorization:go_default_library",
|
||||
@@ -40,6 +40,7 @@ go_test(
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/math"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
)
|
||||
@@ -24,6 +22,7 @@ type SignedBid interface {
|
||||
// Bid is an interface describing the method set of a builder bid.
|
||||
type Bid interface {
|
||||
Header() (interfaces.ExecutionData, error)
|
||||
BlindedBlobsBundle() (*enginev1.BlindedBlobsBundle, error)
|
||||
Value() []byte
|
||||
Pubkey() []byte
|
||||
Version() int
|
||||
@@ -116,6 +115,11 @@ func (b builderBid) Header() (interfaces.ExecutionData, error) {
|
||||
return blocks.WrappedExecutionPayloadHeader(b.p.Header)
|
||||
}
|
||||
|
||||
// BlindedBlobsBundle --
|
||||
func (b builderBid) BlindedBlobsBundle() (*enginev1.BlindedBlobsBundle, error) {
|
||||
return nil, errors.New("blinded blobs bundle not available before Deneb")
|
||||
}
|
||||
|
||||
// Version --
|
||||
func (b builderBid) Version() int {
|
||||
return version.Bellatrix
|
||||
@@ -162,8 +166,12 @@ func WrappedBuilderBidCapella(p *ethpb.BuilderBidCapella) (Bid, error) {
|
||||
// Header returns the execution data interface.
|
||||
func (b builderBidCapella) Header() (interfaces.ExecutionData, error) {
|
||||
// We have to convert big endian to little endian because the value is coming from the execution layer.
|
||||
v := big.NewInt(0).SetBytes(bytesutil.ReverseByteOrder(b.p.Value))
|
||||
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, math.WeiToGwei(v))
|
||||
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, blocks.PayloadValueToGwei(b.p.Value))
|
||||
}
|
||||
|
||||
// BlindedBlobsBundle --
|
||||
func (b builderBidCapella) BlindedBlobsBundle() (*enginev1.BlindedBlobsBundle, error) {
|
||||
return nil, errors.New("blinded blobs bundle not available before Deneb")
|
||||
}
|
||||
|
||||
// Version --
|
||||
@@ -195,3 +203,90 @@ func (b builderBidCapella) HashTreeRoot() ([32]byte, error) {
|
||||
func (b builderBidCapella) HashTreeRootWith(hh *ssz.Hasher) error {
|
||||
return b.p.HashTreeRootWith(hh)
|
||||
}
|
||||
|
||||
type builderBidDeneb struct {
|
||||
p *ethpb.BuilderBidDeneb
|
||||
}
|
||||
|
||||
// WrappedBuilderBidDeneb is a constructor which wraps a protobuf bid into an interface.
|
||||
func WrappedBuilderBidDeneb(p *ethpb.BuilderBidDeneb) (Bid, error) {
|
||||
w := builderBidDeneb{p: p}
|
||||
if w.IsNil() {
|
||||
return nil, consensus_types.ErrNilObjectWrapped
|
||||
}
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Version --
|
||||
func (b builderBidDeneb) Version() int {
|
||||
return version.Deneb
|
||||
}
|
||||
|
||||
// Value --
|
||||
func (b builderBidDeneb) Value() []byte {
|
||||
return b.p.Value
|
||||
}
|
||||
|
||||
// Pubkey --
|
||||
func (b builderBidDeneb) Pubkey() []byte {
|
||||
return b.p.Pubkey
|
||||
}
|
||||
|
||||
// IsNil --
|
||||
func (b builderBidDeneb) IsNil() bool {
|
||||
return b.p == nil
|
||||
}
|
||||
|
||||
// HashTreeRoot --
|
||||
func (b builderBidDeneb) HashTreeRoot() ([32]byte, error) {
|
||||
return b.p.HashTreeRoot()
|
||||
}
|
||||
|
||||
// HashTreeRootWith --
|
||||
func (b builderBidDeneb) HashTreeRootWith(hh *ssz.Hasher) error {
|
||||
return b.p.HashTreeRootWith(hh)
|
||||
}
|
||||
|
||||
// Header --
|
||||
func (b builderBidDeneb) Header() (interfaces.ExecutionData, error) {
|
||||
// We have to convert big endian to little endian because the value is coming from the execution layer.
|
||||
return blocks.WrappedExecutionPayloadHeaderDeneb(b.p.Header, blocks.PayloadValueToGwei(b.p.Value))
|
||||
}
|
||||
|
||||
// BlindedBlobsBundle --
|
||||
func (b builderBidDeneb) BlindedBlobsBundle() (*enginev1.BlindedBlobsBundle, error) {
|
||||
return b.p.BlindedBlobsBundle, nil
|
||||
}
|
||||
|
||||
type signedBuilderBidDeneb struct {
|
||||
p *ethpb.SignedBuilderBidDeneb
|
||||
}
|
||||
|
||||
// WrappedSignedBuilderBidDeneb is a constructor which wraps a protobuf signed bit into an interface.
|
||||
func WrappedSignedBuilderBidDeneb(p *ethpb.SignedBuilderBidDeneb) (SignedBid, error) {
|
||||
w := signedBuilderBidDeneb{p: p}
|
||||
if w.IsNil() {
|
||||
return nil, consensus_types.ErrNilObjectWrapped
|
||||
}
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Message --
|
||||
func (b signedBuilderBidDeneb) Message() (Bid, error) {
|
||||
return WrappedBuilderBidDeneb(b.p.Message)
|
||||
}
|
||||
|
||||
// Signature --
|
||||
func (b signedBuilderBidDeneb) Signature() []byte {
|
||||
return b.p.Signature
|
||||
}
|
||||
|
||||
// Version --
|
||||
func (b signedBuilderBidDeneb) Version() int {
|
||||
return version.Deneb
|
||||
}
|
||||
|
||||
// IsNil --
|
||||
func (b signedBuilderBidDeneb) IsNil() bool {
|
||||
return b.p == nil
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v4/network"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/authorization"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -86,7 +87,7 @@ type BuilderClient interface {
|
||||
NodeURL() string
|
||||
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubkey [48]byte) (SignedBid, error)
|
||||
RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error
|
||||
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error)
|
||||
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock, blobs []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error)
|
||||
Status(ctx context.Context) error
|
||||
}
|
||||
|
||||
@@ -220,6 +221,16 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
|
||||
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
|
||||
}
|
||||
switch strings.ToLower(v.Version) {
|
||||
case strings.ToLower(version.String(version.Deneb)):
|
||||
hr := &ExecHeaderResponseDeneb{}
|
||||
if err := json.Unmarshal(hb, hr); err != nil {
|
||||
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
|
||||
}
|
||||
p, err := hr.ToProto()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not extract proto message from header")
|
||||
}
|
||||
return WrappedSignedBuilderBidDeneb(p)
|
||||
case strings.ToLower(version.String(version.Capella)):
|
||||
hr := &ExecHeaderResponseCapella{}
|
||||
if err := json.Unmarshal(hb, hr); err != nil {
|
||||
@@ -274,20 +285,20 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
|
||||
|
||||
// SubmitBlindedBlock calls the builder API endpoint that binds the validator to the builder and submits the block.
|
||||
// The response is the full execution payload used to create the blinded block.
|
||||
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error) {
|
||||
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock, blobs []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
if !sb.IsBlinded() {
|
||||
return nil, errNotBlinded
|
||||
return nil, nil, errNotBlinded
|
||||
}
|
||||
switch sb.Version() {
|
||||
case version.Bellatrix:
|
||||
psb, err := sb.PbBlindedBellatrixBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get protobuf block")
|
||||
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
|
||||
}
|
||||
b := &SignedBlindedBeaconBlockBellatrix{SignedBlindedBeaconBlockBellatrix: psb}
|
||||
body, err := json.Marshal(b)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockBellatrix value body in SubmitBlindedBlock")
|
||||
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockBellatrix value body in SubmitBlindedBlock")
|
||||
}
|
||||
|
||||
versionOpt := func(r *http.Request) {
|
||||
@@ -296,29 +307,33 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockBellatrix to the builder api")
|
||||
return nil, nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockBellatrix to the builder api")
|
||||
}
|
||||
ep := &ExecPayloadResponse{}
|
||||
if err := json.Unmarshal(rb, ep); err != nil {
|
||||
return nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlock response")
|
||||
return nil, nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlock response")
|
||||
}
|
||||
if strings.ToLower(ep.Version) != version.String(version.Bellatrix) {
|
||||
return nil, errors.New("not a bellatrix payload")
|
||||
return nil, nil, errors.New("not a bellatrix payload")
|
||||
}
|
||||
p, err := ep.ToProto()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not extract proto message from payload")
|
||||
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
|
||||
}
|
||||
return blocks.WrappedExecutionPayload(p)
|
||||
payload, err := blocks.WrappedExecutionPayload(p)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
|
||||
}
|
||||
return payload, nil, nil
|
||||
case version.Capella:
|
||||
psb, err := sb.PbBlindedCapellaBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get protobuf block")
|
||||
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
|
||||
}
|
||||
b := &SignedBlindedBeaconBlockCapella{SignedBlindedBeaconBlockCapella: psb}
|
||||
body, err := json.Marshal(b)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockCapella value body in SubmitBlindedBlockCapella")
|
||||
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockCapella value body in SubmitBlindedBlockCapella")
|
||||
}
|
||||
|
||||
versionOpt := func(r *http.Request) {
|
||||
@@ -327,22 +342,61 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockCapella to the builder api")
|
||||
return nil, nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockCapella to the builder api")
|
||||
}
|
||||
ep := &ExecPayloadResponseCapella{}
|
||||
if err := json.Unmarshal(rb, ep); err != nil {
|
||||
return nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockCapella response")
|
||||
return nil, nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockCapella response")
|
||||
}
|
||||
if strings.ToLower(ep.Version) != version.String(version.Capella) {
|
||||
return nil, errors.New("not a capella payload")
|
||||
return nil, nil, errors.New("not a capella payload")
|
||||
}
|
||||
p, err := ep.ToProto()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not extract proto message from payload")
|
||||
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
|
||||
}
|
||||
return blocks.WrappedExecutionPayloadCapella(p, 0)
|
||||
payload, err := blocks.WrappedExecutionPayloadCapella(p, 0)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
|
||||
}
|
||||
return payload, nil, nil
|
||||
case version.Deneb:
|
||||
psb, err := sb.PbBlindedDenebBlock()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
|
||||
}
|
||||
|
||||
b := ðpb.SignedBlindedBeaconBlockAndBlobsDeneb{Block: psb, Blobs: blobs}
|
||||
body, err := json.Marshal(b)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockDeneb value body in SubmitBlindedBlockDeneb")
|
||||
}
|
||||
|
||||
versionOpt := func(r *http.Request) {
|
||||
r.Header.Add("Eth-Consensus-Version", version.String(version.Deneb))
|
||||
}
|
||||
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockDeneb to the builder api")
|
||||
}
|
||||
ep := &ExecPayloadResponseDeneb{}
|
||||
if err := json.Unmarshal(rb, ep); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockDeneb response")
|
||||
}
|
||||
if strings.ToLower(ep.Version) != version.String(version.Deneb) {
|
||||
return nil, nil, errors.New("not a deneb payload")
|
||||
}
|
||||
p, blobBundle, err := ep.ToProto()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
|
||||
}
|
||||
payload, err := blocks.WrappedExecutionPayloadDeneb(p, 0)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
|
||||
}
|
||||
return payload, blobBundle, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported block version %s", version.String(sb.Version()))
|
||||
return nil, nil, fmt.Errorf("unsupported block version %s", version.String(sb.Version()))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,9 @@ import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -125,7 +127,6 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
var slot types.Slot = 23
|
||||
parentHash := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
pubkey := ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
|
||||
|
||||
t.Run("server error", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
@@ -236,6 +237,52 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
require.DeepEqual(t, bidValue, value.Bytes())
|
||||
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
|
||||
})
|
||||
t.Run("deneb", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponseDeneb)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
h, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
|
||||
require.NoError(t, err)
|
||||
expectedWithdrawalsRoot := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
bid, err := h.Message()
|
||||
require.NoError(t, err)
|
||||
bidHeader, err := bid.Header()
|
||||
require.NoError(t, err)
|
||||
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
|
||||
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
|
||||
bidValue := bytesutil.ReverseByteOrder(bid.Value())
|
||||
require.DeepEqual(t, bidValue, value.Bytes())
|
||||
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
|
||||
bundle, err := bid.BlindedBlobsBundle()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(bundle.BlobRoots) <= fieldparams.MaxBlobsPerBlock && len(bundle.BlobRoots) > 0, true)
|
||||
for i := range bundle.BlobRoots {
|
||||
require.Equal(t, len(bundle.BlobRoots[i]) == fieldparams.RootLength, true)
|
||||
}
|
||||
require.Equal(t, len(bundle.KzgCommitments) > 0, true)
|
||||
for i := range bundle.KzgCommitments {
|
||||
require.Equal(t, len(bundle.KzgCommitments[i]) == 48, true)
|
||||
}
|
||||
require.Equal(t, len(bundle.Proofs) > 0, true)
|
||||
for i := range bundle.Proofs {
|
||||
require.Equal(t, len(bundle.Proofs[i]) == 48, true)
|
||||
}
|
||||
})
|
||||
t.Run("unsupported version", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
@@ -277,7 +324,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
}
|
||||
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
|
||||
require.NoError(t, err)
|
||||
ep, err := c.SubmitBlindedBlock(ctx, sbbb)
|
||||
ep, _, err := c.SubmitBlindedBlock(ctx, sbbb, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"), ep.ParentHash()))
|
||||
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
@@ -303,7 +350,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
}
|
||||
sbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockCapella(t))
|
||||
require.NoError(t, err)
|
||||
ep, err := c.SubmitBlindedBlock(ctx, sbb)
|
||||
ep, _, err := c.SubmitBlindedBlock(ctx, sbb, nil)
|
||||
require.NoError(t, err)
|
||||
withdrawals, err := ep.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
@@ -313,6 +360,40 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
assert.DeepEqual(t, ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943"), withdrawals[0].Address)
|
||||
assert.Equal(t, uint64(1), withdrawals[0].Amount)
|
||||
})
|
||||
t.Run("deneb", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "deneb", r.Header.Get("Eth-Consensus-Version"))
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadDeneb)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
test := testSignedBlindedBeaconBlockAndBlobsDeneb(t)
|
||||
sbb, err := blocks.NewSignedBeaconBlock(test.Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
ep, blobBundle, err := c.SubmitBlindedBlock(ctx, sbb, test.Blobs)
|
||||
require.NoError(t, err)
|
||||
withdrawals, err := ep.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(withdrawals))
|
||||
assert.Equal(t, uint64(1), withdrawals[0].Index)
|
||||
assert.Equal(t, types.ValidatorIndex(1), withdrawals[0].ValidatorIndex)
|
||||
assert.DeepEqual(t, ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943"), withdrawals[0].Address)
|
||||
assert.Equal(t, uint64(1), withdrawals[0].Amount)
|
||||
require.NotNil(t, blobBundle)
|
||||
require.Equal(t, hexutil.Encode(blobBundle.Blobs[0]), hexutil.Encode(make([]byte, fieldparams.BlobLength)))
|
||||
require.Equal(t, hexutil.Encode(blobBundle.KzgCommitments[0]), "0x8dab030c51e16e84be9caab84ee3d0b8bbec1db4a0e4de76439da8424d9b957370a10a78851f97e4b54d2ce1ab0d686f")
|
||||
require.Equal(t, hexutil.Encode(blobBundle.Proofs[0]), "0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a")
|
||||
})
|
||||
t.Run("mismatched versions, expected bellatrix got capella", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
@@ -330,13 +411,13 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
}
|
||||
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
|
||||
require.NoError(t, err)
|
||||
_, err = c.SubmitBlindedBlock(ctx, sbbb)
|
||||
_, _, err = c.SubmitBlindedBlock(ctx, sbbb, nil)
|
||||
require.ErrorContains(t, "not a bellatrix payload", err)
|
||||
})
|
||||
t.Run("not blinded", func(t *testing.T) {
|
||||
sbb, err := blocks.NewSignedBeaconBlock(ð.SignedBeaconBlockBellatrix{Block: ð.BeaconBlockBellatrix{Body: ð.BeaconBlockBodyBellatrix{}}})
|
||||
require.NoError(t, err)
|
||||
_, err = (&Client{}).SubmitBlindedBlock(ctx, sbb)
|
||||
_, _, err = (&Client{}).SubmitBlindedBlock(ctx, sbb, nil)
|
||||
require.ErrorIs(t, err, errNotBlinded)
|
||||
})
|
||||
}
|
||||
@@ -626,6 +707,168 @@ func testSignedBlindedBeaconBlockCapella(t *testing.T) *eth.SignedBlindedBeaconB
|
||||
}
|
||||
}
|
||||
|
||||
func testSignedBlindedBeaconBlockAndBlobsDeneb(t *testing.T) *eth.SignedBlindedBeaconBlockAndBlobsDeneb {
|
||||
return ð.SignedBlindedBeaconBlockAndBlobsDeneb{
|
||||
Block: ð.SignedBlindedBeaconBlockDeneb{
|
||||
Block: ð.BlindedBeaconBlockDeneb{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Body: ð.BlindedBeaconBlockBodyDeneb{
|
||||
RandaoReveal: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
Eth1Data: ð.Eth1Data{
|
||||
DepositRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
DepositCount: 1,
|
||||
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Graffiti: ezDecode(t, "0xdeadbeefc0ffee"),
|
||||
ProposerSlashings: []*eth.ProposerSlashing{
|
||||
{
|
||||
Header_1: ð.SignedBeaconBlockHeader{
|
||||
Header: ð.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
Header_2: ð.SignedBeaconBlockHeader{
|
||||
Header: ð.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
},
|
||||
},
|
||||
AttesterSlashings: []*eth.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: ð.IndexedAttestation{
|
||||
AttestingIndices: []uint64{1},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
Attestation_2: ð.IndexedAttestation{
|
||||
AttestingIndices: []uint64{1},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
},
|
||||
},
|
||||
Attestations: []*eth.Attestation{
|
||||
{
|
||||
AggregationBits: bitfield.Bitlist{0x01},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
},
|
||||
Deposits: []*eth.Deposit{
|
||||
{
|
||||
Proof: [][]byte{ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")},
|
||||
Data: ð.Deposit_Data{
|
||||
PublicKey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
|
||||
WithdrawalCredentials: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Amount: 1,
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
},
|
||||
},
|
||||
VoluntaryExits: []*eth.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ð.VoluntaryExit{
|
||||
Epoch: 1,
|
||||
ValidatorIndex: 1,
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
},
|
||||
SyncAggregate: ð.SyncAggregate{
|
||||
SyncCommitteeSignature: make([]byte, 48),
|
||||
SyncCommitteeBits: bitfield.Bitvector512{0x01},
|
||||
},
|
||||
ExecutionPayloadHeader: &v1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
ReceiptsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
LogsBloom: ezDecode(t, "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
|
||||
PrevRandao: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BlockNumber: 1,
|
||||
GasLimit: 1,
|
||||
GasUsed: 1,
|
||||
Timestamp: 1,
|
||||
ExtraData: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BaseFeePerGas: []byte(strconv.FormatUint(1, 10)),
|
||||
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
WithdrawalsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BlobGasUsed: 1,
|
||||
ExcessBlobGas: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
Blobs: []*eth.SignedBlindedBlobSidecar{
|
||||
{
|
||||
Message: ð.BlindedBlobSidecar{
|
||||
BlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Index: 0,
|
||||
Slot: 1,
|
||||
BlockParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
ProposerIndex: 1,
|
||||
BlobRoot: ezDecode(t, "0x24564723180fcb3d994104538d351c8dcbde12d541676bb736cf678018ca4739"),
|
||||
KzgCommitment: ezDecode(t, "0x8dab030c51e16e84be9caab84ee3d0b8bbec1db4a0e4de76439da8424d9b957370a10a78851f97e4b54d2ce1ab0d686f"),
|
||||
KzgProof: ezDecode(t, "0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a"),
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestRequestLogger(t *testing.T) {
|
||||
wo := WithObserver(&requestLogger{})
|
||||
c, err := NewClient("localhost:3500", wo)
|
||||
|
||||
@@ -10,6 +10,7 @@ go_library(
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -40,8 +41,8 @@ func (m MockClient) RegisterValidator(_ context.Context, svr []*ethpb.SignedVali
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock --
|
||||
func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error) {
|
||||
return nil, nil
|
||||
func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock, _ []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// Status --
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
@@ -1107,6 +1108,295 @@ func (b *BlindedBeaconBlockBodyCapella) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// ExecHeaderResponseDeneb is the header response for builder API /eth/v1/builder/header/{slot}/{parent_hash}/{pubkey}.
|
||||
type ExecHeaderResponseDeneb struct {
|
||||
Data struct {
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
Message *BuilderBidDeneb `json:"message"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// ToProto creates a SignedBuilderBidDeneb Proto from ExecHeaderResponseDeneb.
|
||||
func (ehr *ExecHeaderResponseDeneb) ToProto() (*eth.SignedBuilderBidDeneb, error) {
|
||||
bb, err := ehr.Data.Message.ToProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ð.SignedBuilderBidDeneb{
|
||||
Message: bb,
|
||||
Signature: bytesutil.SafeCopyBytes(ehr.Data.Signature),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ToProto creates a BuilderBidDeneb Proto from BuilderBidDeneb.
|
||||
func (bb *BuilderBidDeneb) ToProto() (*eth.BuilderBidDeneb, error) {
|
||||
header, err := bb.Header.ToProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bundle, err := bb.BlindedBlobsBundle.ToProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ð.BuilderBidDeneb{
|
||||
Header: header,
|
||||
BlindedBlobsBundle: bundle,
|
||||
Value: bytesutil.SafeCopyBytes(bb.Value.SSZBytes()),
|
||||
Pubkey: bytesutil.SafeCopyBytes(bb.Pubkey),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// BuilderBidDeneb is a field of ExecHeaderResponseDeneb.
|
||||
type BuilderBidDeneb struct {
|
||||
Header *ExecutionPayloadHeaderDeneb `json:"header"`
|
||||
BlindedBlobsBundle *BlindedBlobsBundle `json:"blinded_blobs_bundle"`
|
||||
Value Uint256 `json:"value"`
|
||||
Pubkey hexutil.Bytes `json:"pubkey"`
|
||||
}
|
||||
|
||||
// BlindedBlobsBundle is a field of BuilderBidDeneb and represents the blinded blobs of the associated header.
|
||||
type BlindedBlobsBundle struct {
|
||||
KzgCommitments []hexutil.Bytes `json:"commitments"`
|
||||
Proofs []hexutil.Bytes `json:"proofs"`
|
||||
BlobRoots []hexutil.Bytes `json:"blob_roots"`
|
||||
}
|
||||
|
||||
// ToProto creates a BlindedBlobsBundle Proto from BlindedBlobsBundle.
|
||||
func (r *BlindedBlobsBundle) ToProto() (*v1.BlindedBlobsBundle, error) {
|
||||
kzg := make([][]byte, len(r.KzgCommitments))
|
||||
for i := range kzg {
|
||||
kzg[i] = bytesutil.SafeCopyBytes(r.KzgCommitments[i])
|
||||
}
|
||||
|
||||
proofs := make([][]byte, len(r.Proofs))
|
||||
for i := range proofs {
|
||||
proofs[i] = bytesutil.SafeCopyBytes(r.Proofs[i])
|
||||
}
|
||||
|
||||
blobRoots := make([][]byte, len(r.BlobRoots))
|
||||
for i := range blobRoots {
|
||||
blobRoots[i] = bytesutil.SafeCopyBytes(r.BlobRoots[i])
|
||||
}
|
||||
|
||||
return &v1.BlindedBlobsBundle{
|
||||
KzgCommitments: kzg,
|
||||
Proofs: proofs,
|
||||
BlobRoots: blobRoots,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ExecutionPayloadHeaderDeneb a field part of the BuilderBidDeneb.
|
||||
type ExecutionPayloadHeaderDeneb struct {
|
||||
ParentHash hexutil.Bytes `json:"parent_hash"`
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
|
||||
StateRoot hexutil.Bytes `json:"state_root"`
|
||||
ReceiptsRoot hexutil.Bytes `json:"receipts_root"`
|
||||
LogsBloom hexutil.Bytes `json:"logs_bloom"`
|
||||
PrevRandao hexutil.Bytes `json:"prev_randao"`
|
||||
BlockNumber Uint64String `json:"block_number"`
|
||||
GasLimit Uint64String `json:"gas_limit"`
|
||||
GasUsed Uint64String `json:"gas_used"`
|
||||
Timestamp Uint64String `json:"timestamp"`
|
||||
ExtraData hexutil.Bytes `json:"extra_data"`
|
||||
BaseFeePerGas Uint256 `json:"base_fee_per_gas"`
|
||||
BlockHash hexutil.Bytes `json:"block_hash"`
|
||||
TransactionsRoot hexutil.Bytes `json:"transactions_root"`
|
||||
WithdrawalsRoot hexutil.Bytes `json:"withdrawals_root"`
|
||||
BlobGasUsed Uint64String `json:"blob_gas_used"` // new in deneb
|
||||
ExcessBlobGas Uint64String `json:"excess_blob_gas"` // new in deneb
|
||||
*v1.ExecutionPayloadHeaderDeneb
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representing the ExecutionPayloadHeaderDeneb struct.
|
||||
func (h *ExecutionPayloadHeaderDeneb) MarshalJSON() ([]byte, error) {
|
||||
type MarshalCaller ExecutionPayloadHeaderDeneb
|
||||
baseFeePerGas, err := sszBytesToUint256(h.ExecutionPayloadHeaderDeneb.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return []byte{}, errors.Wrapf(err, "invalid BaseFeePerGas")
|
||||
}
|
||||
return json.Marshal(&MarshalCaller{
|
||||
ParentHash: h.ExecutionPayloadHeaderDeneb.ParentHash,
|
||||
FeeRecipient: h.ExecutionPayloadHeaderDeneb.FeeRecipient,
|
||||
StateRoot: h.ExecutionPayloadHeaderDeneb.StateRoot,
|
||||
ReceiptsRoot: h.ExecutionPayloadHeaderDeneb.ReceiptsRoot,
|
||||
LogsBloom: h.ExecutionPayloadHeaderDeneb.LogsBloom,
|
||||
PrevRandao: h.ExecutionPayloadHeaderDeneb.PrevRandao,
|
||||
BlockNumber: Uint64String(h.ExecutionPayloadHeaderDeneb.BlockNumber),
|
||||
GasLimit: Uint64String(h.ExecutionPayloadHeaderDeneb.GasLimit),
|
||||
GasUsed: Uint64String(h.ExecutionPayloadHeaderDeneb.GasUsed),
|
||||
Timestamp: Uint64String(h.ExecutionPayloadHeaderDeneb.Timestamp),
|
||||
ExtraData: h.ExecutionPayloadHeaderDeneb.ExtraData,
|
||||
BaseFeePerGas: baseFeePerGas,
|
||||
BlockHash: h.ExecutionPayloadHeaderDeneb.BlockHash,
|
||||
TransactionsRoot: h.ExecutionPayloadHeaderDeneb.TransactionsRoot,
|
||||
WithdrawalsRoot: h.ExecutionPayloadHeaderDeneb.WithdrawalsRoot,
|
||||
BlobGasUsed: Uint64String(h.ExecutionPayloadHeaderDeneb.BlobGasUsed),
|
||||
ExcessBlobGas: Uint64String(h.ExecutionPayloadHeaderDeneb.ExcessBlobGas),
|
||||
})
|
||||
}
|
||||
|
||||
// UnmarshalJSON takes in a byte array and unmarshals the value into ExecutionPayloadHeaderDeneb.
|
||||
func (h *ExecutionPayloadHeaderDeneb) UnmarshalJSON(b []byte) error {
|
||||
type UnmarshalCaller ExecutionPayloadHeaderDeneb
|
||||
uc := &UnmarshalCaller{}
|
||||
if err := json.Unmarshal(b, uc); err != nil {
|
||||
return err
|
||||
}
|
||||
ep := ExecutionPayloadHeaderDeneb(*uc)
|
||||
*h = ep
|
||||
var err error
|
||||
h.ExecutionPayloadHeaderDeneb, err = h.ToProto()
|
||||
return err
|
||||
}
|
||||
|
||||
// ToProto returns a ExecutionPayloadHeaderDeneb Proto object.
|
||||
func (h *ExecutionPayloadHeaderDeneb) ToProto() (*v1.ExecutionPayloadHeaderDeneb, error) {
|
||||
return &v1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: bytesutil.SafeCopyBytes(h.ParentHash),
|
||||
FeeRecipient: bytesutil.SafeCopyBytes(h.FeeRecipient),
|
||||
StateRoot: bytesutil.SafeCopyBytes(h.StateRoot),
|
||||
ReceiptsRoot: bytesutil.SafeCopyBytes(h.ReceiptsRoot),
|
||||
LogsBloom: bytesutil.SafeCopyBytes(h.LogsBloom),
|
||||
PrevRandao: bytesutil.SafeCopyBytes(h.PrevRandao),
|
||||
BlockNumber: uint64(h.BlockNumber),
|
||||
GasLimit: uint64(h.GasLimit),
|
||||
GasUsed: uint64(h.GasUsed),
|
||||
Timestamp: uint64(h.Timestamp),
|
||||
ExtraData: bytesutil.SafeCopyBytes(h.ExtraData),
|
||||
BaseFeePerGas: bytesutil.SafeCopyBytes(h.BaseFeePerGas.SSZBytes()),
|
||||
BlockHash: bytesutil.SafeCopyBytes(h.BlockHash),
|
||||
TransactionsRoot: bytesutil.SafeCopyBytes(h.TransactionsRoot),
|
||||
WithdrawalsRoot: bytesutil.SafeCopyBytes(h.WithdrawalsRoot),
|
||||
BlobGasUsed: uint64(h.BlobGasUsed),
|
||||
ExcessBlobGas: uint64(h.ExcessBlobGas),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ExecPayloadResponseDeneb the response to the build API /eth/v1/builder/blinded_blocks that includes the version, execution payload object , and blobs bundle object.
|
||||
type ExecPayloadResponseDeneb struct {
|
||||
Version string `json:"version"`
|
||||
Data *ExecutionPayloadDenebAndBlobsBundle `json:"data"`
|
||||
}
|
||||
|
||||
// ExecutionPayloadDenebAndBlobsBundle the main field used in ExecPayloadResponseDeneb.
|
||||
type ExecutionPayloadDenebAndBlobsBundle struct {
|
||||
ExecutionPayload *ExecutionPayloadDeneb `json:"execution_payload"`
|
||||
BlobsBundle *BlobsBundle `json:"blobs_bundle"`
|
||||
}
|
||||
|
||||
// ExecutionPayloadDeneb is a field used in ExecutionPayloadDenebAndBlobsBundle.
|
||||
type ExecutionPayloadDeneb struct {
|
||||
ParentHash hexutil.Bytes `json:"parent_hash"`
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
|
||||
StateRoot hexutil.Bytes `json:"state_root"`
|
||||
ReceiptsRoot hexutil.Bytes `json:"receipts_root"`
|
||||
LogsBloom hexutil.Bytes `json:"logs_bloom"`
|
||||
PrevRandao hexutil.Bytes `json:"prev_randao"`
|
||||
BlockNumber Uint64String `json:"block_number"`
|
||||
GasLimit Uint64String `json:"gas_limit"`
|
||||
GasUsed Uint64String `json:"gas_used"`
|
||||
Timestamp Uint64String `json:"timestamp"`
|
||||
ExtraData hexutil.Bytes `json:"extra_data"`
|
||||
BaseFeePerGas Uint256 `json:"base_fee_per_gas"`
|
||||
BlockHash hexutil.Bytes `json:"block_hash"`
|
||||
Transactions []hexutil.Bytes `json:"transactions"`
|
||||
Withdrawals []Withdrawal `json:"withdrawals"`
|
||||
BlobGasUsed Uint64String `json:"blob_gas_used"` // new in deneb
|
||||
ExcessBlobGas Uint64String `json:"excess_blob_gas"` // new in deneb
|
||||
}
|
||||
|
||||
// BlobsBundle is a field in ExecutionPayloadDenebAndBlobsBundle.
|
||||
type BlobsBundle struct {
|
||||
Commitments []hexutil.Bytes `json:"commitments"`
|
||||
Proofs []hexutil.Bytes `json:"proofs"`
|
||||
Blobs []hexutil.Bytes `json:"blobs"`
|
||||
}
|
||||
|
||||
// ToProto returns a BlobsBundle Proto.
|
||||
func (b BlobsBundle) ToProto() (*v1.BlobsBundle, error) {
|
||||
commitments := make([][]byte, len(b.Commitments))
|
||||
for i := range b.Commitments {
|
||||
if len(b.Commitments[i]) != fieldparams.BLSPubkeyLength {
|
||||
return nil, fmt.Errorf("commitment length %d is not %d", len(b.Commitments[i]), fieldparams.BLSPubkeyLength)
|
||||
}
|
||||
commitments[i] = bytesutil.SafeCopyBytes(b.Commitments[i])
|
||||
}
|
||||
proofs := make([][]byte, len(b.Proofs))
|
||||
for i := range b.Proofs {
|
||||
if len(b.Proofs[i]) != fieldparams.BLSPubkeyLength {
|
||||
return nil, fmt.Errorf("proof length %d is not %d", len(b.Proofs[i]), fieldparams.BLSPubkeyLength)
|
||||
}
|
||||
proofs[i] = bytesutil.SafeCopyBytes(b.Proofs[i])
|
||||
}
|
||||
if len(b.Blobs) > fieldparams.MaxBlobsPerBlock {
|
||||
return nil, fmt.Errorf("blobs length %d is more than max %d", len(b.Blobs), fieldparams.MaxBlobsPerBlock)
|
||||
}
|
||||
blobs := make([][]byte, len(b.Blobs))
|
||||
for i := range b.Blobs {
|
||||
if len(b.Blobs[i]) != fieldparams.BlobLength {
|
||||
return nil, fmt.Errorf("blob length %d is not %d", len(b.Blobs[i]), fieldparams.BlobLength)
|
||||
}
|
||||
blobs[i] = bytesutil.SafeCopyBytes(b.Blobs[i])
|
||||
}
|
||||
return &v1.BlobsBundle{
|
||||
KzgCommitments: commitments,
|
||||
Proofs: proofs,
|
||||
Blobs: blobs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ToProto returns ExecutionPayloadDeneb Proto and BlobsBundle Proto separately.
|
||||
func (r *ExecPayloadResponseDeneb) ToProto() (*v1.ExecutionPayloadDeneb, *v1.BlobsBundle, error) {
|
||||
if r.Data == nil {
|
||||
return nil, nil, errors.New("data field in response is empty")
|
||||
}
|
||||
payload, err := r.Data.ExecutionPayload.ToProto()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
bundle, err := r.Data.BlobsBundle.ToProto()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return payload, bundle, nil
|
||||
}
|
||||
|
||||
// ToProto returns the ExecutionPayloadDeneb Proto.
|
||||
func (p *ExecutionPayloadDeneb) ToProto() (*v1.ExecutionPayloadDeneb, error) {
|
||||
txs := make([][]byte, len(p.Transactions))
|
||||
for i := range p.Transactions {
|
||||
txs[i] = bytesutil.SafeCopyBytes(p.Transactions[i])
|
||||
}
|
||||
withdrawals := make([]*v1.Withdrawal, len(p.Withdrawals))
|
||||
for i, w := range p.Withdrawals {
|
||||
withdrawals[i] = &v1.Withdrawal{
|
||||
Index: w.Index.Uint64(),
|
||||
ValidatorIndex: types.ValidatorIndex(w.ValidatorIndex.Uint64()),
|
||||
Address: bytesutil.SafeCopyBytes(w.Address),
|
||||
Amount: w.Amount.Uint64(),
|
||||
}
|
||||
}
|
||||
return &v1.ExecutionPayloadDeneb{
|
||||
ParentHash: bytesutil.SafeCopyBytes(p.ParentHash),
|
||||
FeeRecipient: bytesutil.SafeCopyBytes(p.FeeRecipient),
|
||||
StateRoot: bytesutil.SafeCopyBytes(p.StateRoot),
|
||||
ReceiptsRoot: bytesutil.SafeCopyBytes(p.ReceiptsRoot),
|
||||
LogsBloom: bytesutil.SafeCopyBytes(p.LogsBloom),
|
||||
PrevRandao: bytesutil.SafeCopyBytes(p.PrevRandao),
|
||||
BlockNumber: uint64(p.BlockNumber),
|
||||
GasLimit: uint64(p.GasLimit),
|
||||
GasUsed: uint64(p.GasUsed),
|
||||
Timestamp: uint64(p.Timestamp),
|
||||
ExtraData: bytesutil.SafeCopyBytes(p.ExtraData),
|
||||
BaseFeePerGas: bytesutil.SafeCopyBytes(p.BaseFeePerGas.SSZBytes()),
|
||||
BlockHash: bytesutil.SafeCopyBytes(p.BlockHash),
|
||||
Transactions: txs,
|
||||
Withdrawals: withdrawals,
|
||||
BlobGasUsed: uint64(p.BlobGasUsed),
|
||||
ExcessBlobGas: uint64(p.ExcessBlobGas),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ErrorMessage is a JSON representation of the builder API's returned error message.
|
||||
type ErrorMessage struct {
|
||||
Code int `json:"code"`
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
@@ -116,6 +117,47 @@ var testExampleHeaderResponseCapella = `{
|
||||
}
|
||||
}`
|
||||
|
||||
var testExampleHeaderResponseDeneb = `{
|
||||
"version": "deneb",
|
||||
"data": {
|
||||
"message": {
|
||||
"header": {
|
||||
"parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"block_number": "1",
|
||||
"gas_limit": "1",
|
||||
"gas_used": "1",
|
||||
"timestamp": "1",
|
||||
"extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"base_fee_per_gas": "452312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"transactions_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"withdrawals_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"blob_gas_used": "1",
|
||||
"excess_blob_gas": "2"
|
||||
},
|
||||
"blinded_blobs_bundle": {
|
||||
"commitments": [
|
||||
"0x8dab030c51e16e84be9caab84ee3d0b8bbec1db4a0e4de76439da8424d9b957370a10a78851f97e4b54d2ce1ab0d686f"
|
||||
],
|
||||
"proofs": [
|
||||
"0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a"
|
||||
],
|
||||
"blob_roots": [
|
||||
"0x24564723180fcb3d994104538d351c8dcbde12d541676bb736cf678018ca4739"
|
||||
]
|
||||
},
|
||||
"value": "652312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
},
|
||||
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
}
|
||||
}`
|
||||
|
||||
var testExampleHeaderResponseUnknownVersion = `{
|
||||
"version": "bad",
|
||||
"data": {
|
||||
@@ -518,6 +560,51 @@ var testExampleExecutionPayloadCapella = `{
|
||||
}
|
||||
}`
|
||||
|
||||
var testExampleExecutionPayloadDeneb = fmt.Sprintf(`{
|
||||
"version": "deneb",
|
||||
"data": {
|
||||
"execution_payload":{
|
||||
"parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"block_number": "1",
|
||||
"gas_limit": "1",
|
||||
"gas_used": "1",
|
||||
"timestamp": "1",
|
||||
"extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"base_fee_per_gas": "452312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"transactions": [
|
||||
"0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86"
|
||||
],
|
||||
"withdrawals": [
|
||||
{
|
||||
"index": "1",
|
||||
"validator_index": "1",
|
||||
"address": "0xcf8e0d4e9587369b2301d0790347320302cc0943",
|
||||
"amount": "1"
|
||||
}
|
||||
],
|
||||
"blob_gas_used": "2",
|
||||
"excess_blob_gas": "3"
|
||||
},
|
||||
"blobs_bundle": {
|
||||
"commitments": [
|
||||
"0x8dab030c51e16e84be9caab84ee3d0b8bbec1db4a0e4de76439da8424d9b957370a10a78851f97e4b54d2ce1ab0d686f"
|
||||
],
|
||||
"proofs": [
|
||||
"0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a"
|
||||
],
|
||||
"blobs": [
|
||||
"%s"
|
||||
]
|
||||
}
|
||||
}
|
||||
}`, hexutil.Encode(make([]byte, fieldparams.BlobLength)))
|
||||
|
||||
func TestExecutionPayloadResponseUnmarshal(t *testing.T) {
|
||||
epr := &ExecPayloadResponse{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayload), epr))
|
||||
@@ -689,6 +776,107 @@ func TestExecutionPayloadResponseCapellaUnmarshal(t *testing.T) {
|
||||
assert.Equal(t, uint64(1), w.Amount.Uint64())
|
||||
}
|
||||
|
||||
func TestExecutionPayloadResponseDenebUnmarshal(t *testing.T) {
|
||||
epr := &ExecPayloadResponseDeneb{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayloadDeneb), epr))
|
||||
cases := []struct {
|
||||
expected string
|
||||
actual string
|
||||
name string
|
||||
}{
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.ExecutionPayload.ParentHash),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.ParentHash",
|
||||
},
|
||||
{
|
||||
expected: "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||
actual: hexutil.Encode(epr.Data.ExecutionPayload.FeeRecipient),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.FeeRecipient",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.ExecutionPayload.StateRoot),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.StateRoot",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.ExecutionPayload.ReceiptsRoot),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.ReceiptsRoot",
|
||||
},
|
||||
{
|
||||
expected: "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
actual: hexutil.Encode(epr.Data.ExecutionPayload.LogsBloom),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.LogsBloom",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.ExecutionPayload.PrevRandao),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.PrevRandao",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.BlockNumber),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.BlockNumber",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.GasLimit),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.GasLimit",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.GasUsed),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.GasUsed",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.Timestamp),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.Timestamp",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.ExecutionPayload.ExtraData),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.ExtraData",
|
||||
},
|
||||
{
|
||||
expected: "452312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.BaseFeePerGas),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.BaseFeePerGas",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.ExecutionPayload.BlockHash),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.BlockHash",
|
||||
},
|
||||
{
|
||||
expected: "2",
|
||||
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.BlobGasUsed),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.BlobGasUsed",
|
||||
},
|
||||
{
|
||||
expected: "3",
|
||||
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.ExcessBlobGas),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.ExcessBlobGas",
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
require.Equal(t, c.expected, c.actual, fmt.Sprintf("unexpected value for field %s", c.name))
|
||||
}
|
||||
require.Equal(t, 1, len(epr.Data.ExecutionPayload.Transactions))
|
||||
txHash := "0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86"
|
||||
require.Equal(t, txHash, hexutil.Encode(epr.Data.ExecutionPayload.Transactions[0]))
|
||||
|
||||
require.Equal(t, 1, len(epr.Data.ExecutionPayload.Withdrawals))
|
||||
w := epr.Data.ExecutionPayload.Withdrawals[0]
|
||||
assert.Equal(t, uint64(1), w.Index.Uint64())
|
||||
assert.Equal(t, uint64(1), w.ValidatorIndex.Uint64())
|
||||
assert.DeepEqual(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943", w.Address.String())
|
||||
assert.Equal(t, uint64(1), w.Amount.Uint64())
|
||||
assert.Equal(t, uint64(2), uint64(epr.Data.ExecutionPayload.BlobGasUsed))
|
||||
assert.Equal(t, uint64(3), uint64(epr.Data.ExecutionPayload.ExcessBlobGas))
|
||||
}
|
||||
|
||||
func TestExecutionPayloadResponseToProto(t *testing.T) {
|
||||
hr := &ExecPayloadResponse{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayload), hr))
|
||||
@@ -796,6 +984,85 @@ func TestExecutionPayloadResponseCapellaToProto(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestExecutionPayloadResponseDenebToProto(t *testing.T) {
|
||||
hr := &ExecPayloadResponseDeneb{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayloadDeneb), hr))
|
||||
p, blobsBundle, err := hr.ToProto()
|
||||
require.NoError(t, err)
|
||||
|
||||
parentHash, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
feeRecipient, err := hexutil.Decode("0xabcf8e0d4e9587369b2301d0790347320302cc09")
|
||||
require.NoError(t, err)
|
||||
stateRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
receiptsRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
logsBloom, err := hexutil.Decode("0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
||||
require.NoError(t, err)
|
||||
prevRandao, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
extraData, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
blockHash, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
|
||||
tx, err := hexutil.Decode("0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86")
|
||||
require.NoError(t, err)
|
||||
txList := [][]byte{tx}
|
||||
address, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943")
|
||||
require.NoError(t, err)
|
||||
|
||||
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
require.NoError(t, err)
|
||||
expected := &v1.ExecutionPayloadDeneb{
|
||||
ParentHash: parentHash,
|
||||
FeeRecipient: feeRecipient,
|
||||
StateRoot: stateRoot,
|
||||
ReceiptsRoot: receiptsRoot,
|
||||
LogsBloom: logsBloom,
|
||||
PrevRandao: prevRandao,
|
||||
BlockNumber: 1,
|
||||
GasLimit: 1,
|
||||
GasUsed: 1,
|
||||
Timestamp: 1,
|
||||
ExtraData: extraData,
|
||||
BaseFeePerGas: bfpg.SSZBytes(),
|
||||
BlockHash: blockHash,
|
||||
Transactions: txList,
|
||||
Withdrawals: []*v1.Withdrawal{
|
||||
{
|
||||
Index: 1,
|
||||
ValidatorIndex: 1,
|
||||
Address: address,
|
||||
Amount: 1,
|
||||
},
|
||||
},
|
||||
BlobGasUsed: 2,
|
||||
ExcessBlobGas: 3,
|
||||
}
|
||||
require.DeepEqual(t, expected, p)
|
||||
commitment, err := hexutil.Decode("0x8dab030c51e16e84be9caab84ee3d0b8bbec1db4a0e4de76439da8424d9b957370a10a78851f97e4b54d2ce1ab0d686f")
|
||||
require.NoError(t, err)
|
||||
proof, err := hexutil.Decode("0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a")
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedBlobs := &v1.BlobsBundle{
|
||||
KzgCommitments: [][]byte{
|
||||
commitment,
|
||||
},
|
||||
Proofs: [][]byte{
|
||||
proof,
|
||||
},
|
||||
Blobs: [][]byte{
|
||||
make([]byte, fieldparams.BlobLength),
|
||||
},
|
||||
}
|
||||
|
||||
require.DeepEqual(t, blobsBundle, expectedBlobs)
|
||||
|
||||
}
|
||||
|
||||
func pbEth1Data() *eth.Eth1Data {
|
||||
return ð.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
@@ -1026,6 +1293,30 @@ func pbExecutionPayloadHeaderCapella(t *testing.T) *v1.ExecutionPayloadHeaderCap
|
||||
}
|
||||
}
|
||||
|
||||
func pbExecutionPayloadHeaderDeneb(t *testing.T) *v1.ExecutionPayloadHeaderDeneb {
|
||||
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
require.NoError(t, err)
|
||||
return &v1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
ReceiptsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
LogsBloom: ezDecode(t, "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
|
||||
PrevRandao: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BlockNumber: 1,
|
||||
GasLimit: 1,
|
||||
GasUsed: 1,
|
||||
Timestamp: 1,
|
||||
ExtraData: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BaseFeePerGas: bfpg.SSZBytes(),
|
||||
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
WithdrawalsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BlobGasUsed: 1,
|
||||
ExcessBlobGas: 2,
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecutionPayloadHeader_MarshalJSON(t *testing.T) {
|
||||
h := &ExecutionPayloadHeader{
|
||||
ExecutionPayloadHeader: pbExecutionPayloadHeader(t),
|
||||
@@ -1046,6 +1337,16 @@ func TestExecutionPayloadHeaderCapella_MarshalJSON(t *testing.T) {
|
||||
require.Equal(t, expected, string(b))
|
||||
}
|
||||
|
||||
func TestExecutionPayloadHeaderDeneb_MarshalJSON(t *testing.T) {
|
||||
h := &ExecutionPayloadHeaderDeneb{
|
||||
ExecutionPayloadHeaderDeneb: pbExecutionPayloadHeaderDeneb(t),
|
||||
}
|
||||
b, err := json.Marshal(h)
|
||||
require.NoError(t, err)
|
||||
expected := `{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"452312848583266388373324160190187140051835877600158453279131187530910662656","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","withdrawals_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","blob_gas_used":"1","excess_blob_gas":"2"}`
|
||||
require.Equal(t, expected, string(b))
|
||||
}
|
||||
|
||||
var testBuilderBid = `{
|
||||
"version":"bellatrix",
|
||||
"data":{
|
||||
|
||||
@@ -21,6 +21,7 @@ go_library(
|
||||
"process_block.go",
|
||||
"process_block_helpers.go",
|
||||
"receive_attestation.go",
|
||||
"receive_blob.go",
|
||||
"receive_block.go",
|
||||
"service.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
@@ -35,6 +36,7 @@ go_library(
|
||||
deps = [
|
||||
"//async:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
|
||||
@@ -17,8 +17,6 @@ var (
|
||||
errNilJustifiedCheckpoint = errors.New("nil justified checkpoint returned from state")
|
||||
// errBlockDoesNotExist is returned when a block does not exist for a particular state summary.
|
||||
errBlockDoesNotExist = errors.New("could not find block in DB")
|
||||
// errWrongBlockCount is returned when the wrong number of blocks or block roots is used
|
||||
errWrongBlockCount = errors.New("wrong number of blocks or block roots")
|
||||
// errBlockNotFoundInCacheOrDB is returned when a block is not found in the cache or DB.
|
||||
errBlockNotFoundInCacheOrDB = errors.New("block not found in cache or db")
|
||||
// errWSBlockNotFound is returned when a block is not found in the WS cache or DB.
|
||||
|
||||
@@ -2,8 +2,10 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
@@ -26,6 +28,8 @@ import (
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
const blobCommitmentVersionKZG uint8 = 0x01
|
||||
|
||||
var defaultLatestValidHash = bytesutil.PadTo([]byte{0xff}, 32)
|
||||
|
||||
// notifyForkchoiceUpdateArg is the argument for the forkchoice update notification `notifyForkchoiceUpdate`.
|
||||
@@ -210,7 +214,19 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
if err != nil {
|
||||
return false, errors.Wrap(invalidBlock{error: err}, "could not get execution payload")
|
||||
}
|
||||
lastValidHash, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
|
||||
|
||||
var lastValidHash []byte
|
||||
if blk.Version() >= version.Deneb {
|
||||
var versionedHashes []common.Hash
|
||||
versionedHashes, err = kzgCommitmentsToVersionedHashes(blk.Block().Body())
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get versioned hashes to feed the engine")
|
||||
}
|
||||
pr := common.Hash(blk.Block().ParentRoot())
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, &pr)
|
||||
} else {
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, []common.Hash{}, &common.Hash{} /*empty version hashes and root before Deneb*/)
|
||||
}
|
||||
switch err {
|
||||
case nil:
|
||||
newPayloadValidNodeCount.Inc()
|
||||
@@ -310,6 +326,23 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
|
||||
var attr payloadattribute.Attributer
|
||||
switch st.Version() {
|
||||
case version.Deneb:
|
||||
withdrawals, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||
return false, emptyAttri, 0
|
||||
}
|
||||
attr, err = payloadattribute.New(&enginev1.PayloadAttributesV3{
|
||||
Timestamp: uint64(t.Unix()),
|
||||
PrevRandao: prevRando,
|
||||
SuggestedFeeRecipient: feeRecipient.Bytes(),
|
||||
Withdrawals: withdrawals,
|
||||
ParentBeaconBlockRoot: headRoot,
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get payload attribute")
|
||||
return false, emptyAttri, 0
|
||||
}
|
||||
case version.Capella:
|
||||
withdrawals, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
@@ -344,19 +377,36 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
return true, attr, proposerID
|
||||
}
|
||||
|
||||
// removeInvalidBlockAndState removes the invalid block and its corresponding state from the cache and DB.
|
||||
// removeInvalidBlockAndState removes the invalid block, blob and its corresponding state from the cache and DB.
|
||||
func (s *Service) removeInvalidBlockAndState(ctx context.Context, blkRoots [][32]byte) error {
|
||||
for _, root := range blkRoots {
|
||||
if err := s.cfg.StateGen.DeleteStateFromCaches(ctx, root); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete block also deletes the state as well.
|
||||
if err := s.cfg.BeaconDB.DeleteBlock(ctx, root); err != nil {
|
||||
// TODO(10487): If a caller requests to delete a root that's justified and finalized. We should gracefully shutdown.
|
||||
// This is an irreparable condition, it would me a justified or finalized block has become invalid.
|
||||
return err
|
||||
}
|
||||
// No op if the sidecar does not exist.
|
||||
if err := s.cfg.BeaconDB.DeleteBlobSidecar(ctx, root); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func kzgCommitmentsToVersionedHashes(body interfaces.ReadOnlyBeaconBlockBody) ([]common.Hash, error) {
|
||||
commitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(invalidBlock{error: err}, "could not get blob kzg commitments")
|
||||
}
|
||||
|
||||
versionedHashes := make([]common.Hash, len(commitments))
|
||||
for i, commitment := range commitments {
|
||||
versionedHashes[i] = sha256.Sum256(commitment)
|
||||
versionedHashes[i][0] = blobCommitmentVersionKZG
|
||||
}
|
||||
return versionedHashes, nil
|
||||
}
|
||||
|
||||
@@ -860,6 +860,48 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
|
||||
require.Equal(t, 0, len(a))
|
||||
}
|
||||
|
||||
func Test_GetPayloadAttributeDeneb(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
st, _ := util.DeterministicGenesisStateDeneb(t, 1)
|
||||
hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0, []byte{})
|
||||
require.Equal(t, false, hasPayload)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), vId)
|
||||
|
||||
// Cache hit, advance state, no fee recipient
|
||||
suggestedVid := primitives.ValidatorIndex(1)
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
|
||||
hook := logTest.NewGlobal()
|
||||
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
|
||||
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
|
||||
a, err := attr.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(a))
|
||||
|
||||
// Cache hit, advance state, has fee recipient
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveFeeRecipientsByValidatorIDs(ctx, []primitives.ValidatorIndex{suggestedVid}, []common.Address{suggestedAddr}))
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
|
||||
hasPayload, attr, vId = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
|
||||
a, err = attr.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(a))
|
||||
|
||||
attrV3, err := attr.PbV3()
|
||||
require.NoError(t, err)
|
||||
hr := service.headRoot()
|
||||
require.Equal(t, hr, [32]byte(attrV3.ParentBeaconBlockRoot))
|
||||
|
||||
}
|
||||
|
||||
func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
@@ -1045,3 +1087,28 @@ func TestService_getPayloadHash(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, [32]byte{'a'}, h)
|
||||
}
|
||||
|
||||
func TestKZGCommitmentToVersionedHashes(t *testing.T) {
|
||||
kzg1 := make([]byte, 96)
|
||||
kzg1[10] = 'a'
|
||||
kzg2 := make([]byte, 96)
|
||||
kzg2[1] = 'b'
|
||||
commitments := [][]byte{kzg1, kzg2}
|
||||
|
||||
blk := ðpb.SignedBeaconBlockDeneb{
|
||||
Block: ðpb.BeaconBlockDeneb{
|
||||
Body: ðpb.BeaconBlockBodyDeneb{
|
||||
BlobKzgCommitments: commitments,
|
||||
},
|
||||
},
|
||||
}
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
vhs, err := kzgCommitmentsToVersionedHashes(b.Block().Body())
|
||||
require.NoError(t, err)
|
||||
vh0 := "0x01cf2315c97658a7ed54ada181765e23b3fadb5150fab39509f631c0b9af4566"
|
||||
vh1 := "0x01e27ce28e527eb07196b31af0f5fa1882ace701a682022ab779f816ac39d47e"
|
||||
require.Equal(t, 2, len(vhs))
|
||||
require.Equal(t, vhs[0].String(), vh0)
|
||||
require.Equal(t, vhs[1].String(), vh1)
|
||||
}
|
||||
|
||||
31
beacon-chain/blockchain/kzg/BUILD.bazel
Normal file
31
beacon-chain/blockchain/kzg/BUILD.bazel
Normal file
@@ -0,0 +1,31 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"trusted_setup.go",
|
||||
"validation.go",
|
||||
],
|
||||
embedsrcs = ["trusted_setup.json"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/kzg",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"trusted_setup_test.go",
|
||||
"validation_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
],
|
||||
)
|
||||
28
beacon-chain/blockchain/kzg/trusted_setup.go
Normal file
28
beacon-chain/blockchain/kzg/trusted_setup.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package kzg
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed trusted_setup.json
|
||||
embeddedTrustedSetup []byte // 1.2Mb
|
||||
kzgContext *GoKZG.Context
|
||||
)
|
||||
|
||||
func Start() error {
|
||||
parsedSetup := GoKZG.JSONTrustedSetup{}
|
||||
err := json.Unmarshal(embeddedTrustedSetup, &parsedSetup)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not parse trusted setup JSON")
|
||||
}
|
||||
kzgContext, err = GoKZG.NewContext4096(&parsedSetup)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not initialize go-kzg context")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
1
beacon-chain/blockchain/kzg/trusted_setup.json
Normal file
1
beacon-chain/blockchain/kzg/trusted_setup.json
Normal file
File diff suppressed because one or more lines are too long
12
beacon-chain/blockchain/kzg/trusted_setup_test.go
Normal file
12
beacon-chain/blockchain/kzg/trusted_setup_test.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package kzg
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestStart(t *testing.T) {
|
||||
require.NoError(t, Start())
|
||||
require.NotNil(t, kzgContext)
|
||||
}
|
||||
47
beacon-chain/blockchain/kzg/validation.go
Normal file
47
beacon-chain/blockchain/kzg/validation.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package kzg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// IsDataAvailable checks that
|
||||
// - all blobs in the block are available
|
||||
// - Expected KZG commitments match the number of blobs in the block
|
||||
// - That the number of proofs match the number of blobs
|
||||
// - That the proofs are verified against the KZG commitments
|
||||
func IsDataAvailable(commitments [][]byte, sidecars []*ethpb.BlobSidecar) error {
|
||||
if len(commitments) != len(sidecars) {
|
||||
return fmt.Errorf("could not check data availability, expected %d commitments, obtained %d",
|
||||
len(commitments), len(sidecars))
|
||||
}
|
||||
if len(commitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
blobs := make([]GoKZG.Blob, len(commitments))
|
||||
proofs := make([]GoKZG.KZGProof, len(commitments))
|
||||
cmts := make([]GoKZG.KZGCommitment, len(commitments))
|
||||
for i, sidecar := range sidecars {
|
||||
blobs[i] = bytesToBlob(sidecar.Blob)
|
||||
proofs[i] = bytesToKZGProof(sidecar.KzgProof)
|
||||
cmts[i] = bytesToCommitment(commitments[i])
|
||||
}
|
||||
return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs)
|
||||
}
|
||||
|
||||
func bytesToBlob(blob []byte) (ret GoKZG.Blob) {
|
||||
copy(ret[:], blob)
|
||||
return
|
||||
}
|
||||
|
||||
func bytesToCommitment(commitment []byte) (ret GoKZG.KZGCommitment) {
|
||||
copy(ret[:], commitment)
|
||||
return
|
||||
}
|
||||
|
||||
func bytesToKZGProof(proof []byte) (ret GoKZG.KZGProof) {
|
||||
copy(ret[:], proof)
|
||||
return
|
||||
}
|
||||
25
beacon-chain/blockchain/kzg/validation_test.go
Normal file
25
beacon-chain/blockchain/kzg/validation_test.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package kzg
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestIsDataAvailable(t *testing.T) {
|
||||
sidecars := make([]*ethpb.BlobSidecar, 0)
|
||||
commitments := make([][]byte, 0)
|
||||
require.NoError(t, IsDataAvailable(commitments, sidecars))
|
||||
}
|
||||
|
||||
func TestBytesToAny(t *testing.T) {
|
||||
bytes := []byte{0x01, 0x02}
|
||||
blob := GoKZG.Blob{0x01, 0x02}
|
||||
commitment := GoKZG.KZGCommitment{0x01, 0x02}
|
||||
proof := GoKZG.KZGProof{0x01, 0x02}
|
||||
require.DeepEqual(t, blob, bytesToBlob(bytes))
|
||||
require.DeepEqual(t, commitment, bytesToCommitment(bytes))
|
||||
require.DeepEqual(t, proof, bytesToKZGProof(bytes))
|
||||
}
|
||||
@@ -61,6 +61,14 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
txsPerSlotCount.Set(float64(len(txs)))
|
||||
}
|
||||
}
|
||||
if b.Version() >= version.Deneb {
|
||||
kzgs, err := b.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get blob KZG commitments")
|
||||
} else if len(kzgs) > 0 {
|
||||
log = log.WithField("blobCommitmentCount", len(kzgs))
|
||||
}
|
||||
}
|
||||
log.Info("Finished applying state transition")
|
||||
return nil
|
||||
}
|
||||
@@ -73,7 +81,7 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
|
||||
level := log.Logger.GetLevel()
|
||||
if level >= logrus.DebugLevel {
|
||||
parentRoot := block.ParentRoot()
|
||||
log.WithFields(logrus.Fields{
|
||||
lf := logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
@@ -87,7 +95,8 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
|
||||
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
|
||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime),
|
||||
"deposits": len(block.Body().Deposits()),
|
||||
}).Debug("Synced new block")
|
||||
}
|
||||
log.WithFields(lf).Debug("Synced new block")
|
||||
} else {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
@@ -15,6 +16,7 @@ import (
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
@@ -160,19 +162,14 @@ func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionD
|
||||
return preStateVersion, preStateHeader, nil
|
||||
}
|
||||
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySignedBeaconBlock,
|
||||
blockRoots [][32]byte) error {
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
if len(blks) == 0 || len(blockRoots) == 0 {
|
||||
if len(blks) == 0 {
|
||||
return errors.New("no blocks provided")
|
||||
}
|
||||
|
||||
if len(blks) != len(blockRoots) {
|
||||
return errWrongBlockCount
|
||||
}
|
||||
|
||||
if err := consensusblocks.BeaconBlockIsNil(blks[0]); err != nil {
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
@@ -222,7 +219,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
|
||||
}
|
||||
// Save potential boundary states.
|
||||
if slots.IsEpochStart(preState.Slot()) {
|
||||
boundaries[blockRoots[i]] = preState.Copy()
|
||||
boundaries[b.Root()] = preState.Copy()
|
||||
}
|
||||
jCheckpoints[i] = preState.CurrentJustifiedCheckpoint()
|
||||
fCheckpoints[i] = preState.FinalizedCheckpoint()
|
||||
@@ -255,11 +252,12 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
|
||||
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, len(blks))
|
||||
var isValidPayload bool
|
||||
for i, b := range blks {
|
||||
root := b.Root()
|
||||
isValidPayload, err = s.notifyNewPayload(ctx,
|
||||
postVersionAndHeaders[i].version,
|
||||
postVersionAndHeaders[i].header, b)
|
||||
if err != nil {
|
||||
return s.handleInvalidExecutionError(ctx, err, blockRoots[i], b.Block().ParentRoot())
|
||||
return s.handleInvalidExecutionError(ctx, err, root, b.Block().ParentRoot())
|
||||
}
|
||||
if isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, preVersionAndHeaders[i].version,
|
||||
@@ -271,13 +269,13 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
|
||||
JustifiedCheckpoint: jCheckpoints[i],
|
||||
FinalizedCheckpoint: fCheckpoints[i]}
|
||||
pendingNodes[len(blks)-i-1] = args
|
||||
if err := s.saveInitSyncBlock(ctx, blockRoots[i], b); err != nil {
|
||||
if err := s.saveInitSyncBlock(ctx, root, b); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: b.Block().Slot(),
|
||||
Root: blockRoots[i][:],
|
||||
Root: root[:],
|
||||
}); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
@@ -301,8 +299,9 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
|
||||
return err
|
||||
}
|
||||
}
|
||||
lastB := blks[len(blks)-1]
|
||||
lastBR := lastB.Root()
|
||||
// Also saves the last post state which to be used as pre state for the next batch.
|
||||
lastBR := blockRoots[len(blks)-1]
|
||||
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -320,7 +319,6 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
|
||||
return errors.Wrap(err, "could not set optimistic block to valid")
|
||||
}
|
||||
}
|
||||
lastB := blks[len(blks)-1]
|
||||
arg := ¬ifyForkchoiceUpdateArg{
|
||||
headState: preState,
|
||||
headRoot: lastBR,
|
||||
@@ -501,6 +499,78 @@ func (s *Service) runLateBlockTasks() {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
block := signed.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
if slots.ToEpoch(block.Slot())+params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest > primitives.Epoch(s.CurrentSlot()) {
|
||||
return nil
|
||||
}
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get KZG commitments")
|
||||
}
|
||||
existingBlobs := len(kzgCommitments)
|
||||
if existingBlobs == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read first from db in case we have the blobs
|
||||
s.blobNotifier.Lock()
|
||||
var nc *blobNotifierChan
|
||||
var ok bool
|
||||
nc, ok = s.blobNotifier.chanForRoot[root]
|
||||
sidecars, err := s.cfg.BeaconDB.BlobSidecarsByRoot(ctx, root)
|
||||
if err == nil {
|
||||
if len(sidecars) >= existingBlobs {
|
||||
delete(s.blobNotifier.chanForRoot, root)
|
||||
s.blobNotifier.Unlock()
|
||||
return kzg.IsDataAvailable(kzgCommitments, sidecars)
|
||||
}
|
||||
}
|
||||
// Create the channel if it didn't exist already the index map will be
|
||||
// created later anyway
|
||||
if !ok {
|
||||
nc = &blobNotifierChan{channel: make(chan struct{}, fieldparams.MaxBlobsPerBlock)}
|
||||
s.blobNotifier.chanForRoot[root] = nc
|
||||
}
|
||||
// We have more commitments in the block than blobs in database
|
||||
// We sync the channel indices with the sidecars
|
||||
nc.indices = make(map[uint64]struct{})
|
||||
for _, sidecar := range sidecars {
|
||||
nc.indices[sidecar.Index] = struct{}{}
|
||||
}
|
||||
s.blobNotifier.Unlock()
|
||||
channelWrites := len(sidecars)
|
||||
for {
|
||||
select {
|
||||
case <-nc.channel:
|
||||
channelWrites++
|
||||
if channelWrites == existingBlobs {
|
||||
s.blobNotifier.Lock()
|
||||
delete(s.blobNotifier.chanForRoot, root)
|
||||
s.blobNotifier.Unlock()
|
||||
sidecars, err := s.cfg.BeaconDB.BlobSidecarsByRoot(ctx, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get blob sidecars")
|
||||
}
|
||||
return kzg.IsDataAvailable(kzgCommitments, sidecars)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return errors.Wrap(err, "context deadline waiting for blob sidecars")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// lateBlockTasks is called 4 seconds into the slot and performs tasks
|
||||
// related to late blocks. It emits a MissedSlot state feed event.
|
||||
// It calls FCU and sets the right attributes if we are proposing next slot
|
||||
|
||||
@@ -49,8 +49,7 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
bState := st.Copy()
|
||||
|
||||
var blks []interfaces.ReadOnlySignedBeaconBlock
|
||||
var blkRoots [][32]byte
|
||||
var blks []consensusblocks.ROBlock
|
||||
for i := 0; i < 97; i++ {
|
||||
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
@@ -63,16 +62,15 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
require.NoError(t, service.saveInitSyncBlock(ctx, root, wsb))
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
blks = append(blks, wsb)
|
||||
blkRoots = append(blkRoots, root)
|
||||
rwsb, err := consensusblocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
blks = append(blks, rwsb)
|
||||
}
|
||||
err := service.onBlockBatch(ctx, blks, blkRoots[1:])
|
||||
require.ErrorIs(t, errWrongBlockCount, err)
|
||||
err = service.onBlockBatch(ctx, blks, blkRoots)
|
||||
err := service.onBlockBatch(ctx, blks)
|
||||
require.NoError(t, err)
|
||||
jcp := service.CurrentJustifiedCheckpt()
|
||||
jroot := bytesutil.ToBytes32(jcp.Root)
|
||||
require.Equal(t, blkRoots[63], jroot)
|
||||
require.Equal(t, blks[63].Root(), jroot)
|
||||
require.Equal(t, primitives.Epoch(2), service.cfg.ForkChoiceStore.JustifiedCheckpoint().Epoch)
|
||||
}
|
||||
|
||||
@@ -84,8 +82,7 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
bState := st.Copy()
|
||||
|
||||
var blks []interfaces.ReadOnlySignedBeaconBlock
|
||||
var blkRoots [][32]byte
|
||||
var blks []consensusblocks.ROBlock
|
||||
blkCount := 4
|
||||
for i := 0; i <= blkCount; i++ {
|
||||
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
@@ -94,13 +91,12 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
bState, err = transition.ExecuteStateTransition(ctx, bState, wsb)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
rwsb, err := consensusblocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.saveInitSyncBlock(ctx, root, wsb))
|
||||
blks = append(blks, wsb)
|
||||
blkRoots = append(blkRoots, root)
|
||||
require.NoError(t, service.saveInitSyncBlock(ctx, rwsb.Root(), wsb))
|
||||
blks = append(blks, rwsb)
|
||||
}
|
||||
require.NoError(t, service.onBlockBatch(ctx, blks, blkRoots))
|
||||
require.NoError(t, service.onBlockBatch(ctx, blks))
|
||||
}
|
||||
|
||||
func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
|
||||
@@ -1932,8 +1928,10 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
rwsb, err := consensusblocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
// We use onBlockBatch here because the valid chain is missing in forkchoice
|
||||
require.NoError(t, service.onBlockBatch(ctx, []interfaces.ReadOnlySignedBeaconBlock{wsb}, [][32]byte{root}))
|
||||
require.NoError(t, service.onBlockBatch(ctx, []consensusblocks.ROBlock{rwsb}))
|
||||
// Check that the head is now VALID and the node is not optimistic
|
||||
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot()))
|
||||
headRoot, err = service.HeadRoot(ctx)
|
||||
|
||||
22
beacon-chain/blockchain/receive_blob.go
Normal file
22
beacon-chain/blockchain/receive_blob.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package blockchain
|
||||
|
||||
import fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
|
||||
// SendNewBlobEvent sends a message to the BlobNotifier channel that the blob
|
||||
// for the blocroot `root` is ready in the database
|
||||
func (s *Service) SendNewBlobEvent(root [32]byte, index uint64) {
|
||||
s.blobNotifier.Lock()
|
||||
nc, ok := s.blobNotifier.chanForRoot[root]
|
||||
if !ok {
|
||||
nc = &blobNotifierChan{indices: make(map[uint64]struct{}), channel: make(chan struct{}, fieldparams.MaxBlobsPerBlock)}
|
||||
s.blobNotifier.chanForRoot[root] = nc
|
||||
}
|
||||
_, ok = nc.indices[index]
|
||||
if ok {
|
||||
s.blobNotifier.Unlock()
|
||||
return
|
||||
}
|
||||
nc.indices[index] = struct{}{}
|
||||
s.blobNotifier.Unlock()
|
||||
nc.channel <- struct{}{}
|
||||
}
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
@@ -32,8 +33,15 @@ var epochsSinceFinalitySaveHotStateDB = primitives.Epoch(100)
|
||||
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
|
||||
type BlockReceiver interface {
|
||||
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []interfaces.ReadOnlySignedBeaconBlock, blkRoots [][32]byte) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock) error
|
||||
HasBlock(ctx context.Context, root [32]byte) bool
|
||||
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
|
||||
}
|
||||
|
||||
// BlobReceiver interface defines the methods of chain service for receiving new
|
||||
// blobs
|
||||
type BlobReceiver interface {
|
||||
SendNewBlobEvent([32]byte, uint64)
|
||||
}
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
@@ -88,13 +96,15 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
if err := eg.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.isDataAvailable(ctx, blockRoot, blockCopy); err != nil {
|
||||
return errors.Wrap(err, "could not validate blob data availability")
|
||||
}
|
||||
// The rest of block processing takes a lock on forkchoice.
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, blockCopy, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save post state info")
|
||||
}
|
||||
|
||||
if err := s.postBlockProcess(ctx, blockCopy, blockRoot, postState, isValidPayload); err != nil {
|
||||
err := errors.Wrap(err, "could not process block")
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -171,7 +181,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
|
||||
// the state, performing batch verification of all collected signatures and then performing the appropriate
|
||||
// actions for a block post-transition.
|
||||
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.ReadOnlySignedBeaconBlock, blkRoots [][32]byte) error {
|
||||
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
@@ -179,20 +189,21 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Rea
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
// Apply state transition on the incoming newly received block batches, one by one.
|
||||
if err := s.onBlockBatch(ctx, blocks, blkRoots); err != nil {
|
||||
if err := s.onBlockBatch(ctx, blocks); err != nil {
|
||||
err := errors.Wrap(err, "could not process block in batch")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
lastBR := blkRoots[len(blkRoots)-1]
|
||||
lastBR := blocks[len(blocks)-1].Root()
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(lastBR)
|
||||
if err != nil {
|
||||
lastSlot := blocks[len(blocks)-1].Block().Slot()
|
||||
log.WithError(err).Errorf("Could not check if block is optimistic, Root: %#x, Slot: %d", lastBR, lastSlot)
|
||||
optimistic = true
|
||||
}
|
||||
for i, b := range blocks {
|
||||
|
||||
for _, b := range blocks {
|
||||
blockCopy, err := b.Copy()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -202,7 +213,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Rea
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: blockCopy.Block().Slot(),
|
||||
BlockRoot: blkRoots[i],
|
||||
BlockRoot: b.Root(),
|
||||
SignedBlock: blockCopy,
|
||||
Verified: true,
|
||||
Optimistic: optimistic,
|
||||
@@ -237,6 +248,11 @@ func (s *Service) HasBlock(ctx context.Context, root [32]byte) bool {
|
||||
return s.hasBlockInInitSyncOrDB(ctx, root)
|
||||
}
|
||||
|
||||
// RecentBlockSlot returns block slot form fork choice store
|
||||
func (s *Service) RecentBlockSlot(root [32]byte) (primitives.Slot, error) {
|
||||
return s.cfg.ForkChoiceStore.Slot(root)
|
||||
}
|
||||
|
||||
// ReceiveAttesterSlashing receives an attester slashing and inserts it to forkchoice
|
||||
func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing *ethpb.AttesterSlashing) {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
@@ -228,13 +227,11 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
s, _ := minimalTestService(t, WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}))
|
||||
err := s.saveGenesisData(ctx, genesis)
|
||||
require.NoError(t, err)
|
||||
root, err := tt.args.block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(tt.args.block)
|
||||
require.NoError(t, err)
|
||||
blks := []interfaces.ReadOnlySignedBeaconBlock{wsb}
|
||||
roots := [][32]byte{root}
|
||||
err = s.ReceiveBlockBatch(ctx, blks, roots)
|
||||
rwsb, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
err = s.ReceiveBlockBatch(ctx, []blocks.ROBlock{rwsb})
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
} else {
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
@@ -60,6 +61,7 @@ type Service struct {
|
||||
clockSetter startup.ClockSetter
|
||||
clockWaiter startup.ClockWaiter
|
||||
syncComplete chan struct{}
|
||||
blobNotifier *blobNotifier
|
||||
}
|
||||
|
||||
// config options for the service.
|
||||
@@ -88,16 +90,34 @@ type config struct {
|
||||
|
||||
var ErrMissingClockSetter = errors.New("blockchain Service initialized without a startup.ClockSetter")
|
||||
|
||||
type blobNotifierChan struct {
|
||||
indices map[uint64]struct{}
|
||||
channel chan struct{}
|
||||
}
|
||||
|
||||
type blobNotifier struct {
|
||||
sync.RWMutex
|
||||
chanForRoot map[[32]byte]*blobNotifierChan
|
||||
}
|
||||
|
||||
// NewService instantiates a new block service instance that will
|
||||
// be registered into a running beacon node.
|
||||
func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
err := kzg.Start()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not initialize go-kzg context")
|
||||
}
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
bn := &blobNotifier{
|
||||
chanForRoot: make(map[[32]byte]*blobNotifierChan),
|
||||
}
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
boundaryRoots: [][32]byte{},
|
||||
checkpointStateCache: cache.NewCheckpointStateCache(),
|
||||
initSyncBlocks: make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock),
|
||||
blobNotifier: bn,
|
||||
cfg: &config{ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache()},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
@@ -108,7 +128,6 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
if srv.clockSetter == nil {
|
||||
return nil, ErrMissingClockSetter
|
||||
}
|
||||
var err error
|
||||
srv.wsVerifier, err = NewWeakSubjectivityVerifier(srv.cfg.WeakSubjectivityCheckpt, srv.cfg.BeaconDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -52,6 +52,11 @@ func (mb *mockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ ui
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.SignedBlobSidecar) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
|
||||
}
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ go_library(
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
@@ -69,6 +70,7 @@ type ChainService struct {
|
||||
OptimisticCheckRootReceived [32]byte
|
||||
FinalizedRoots map[[32]byte]bool
|
||||
OptimisticRoots map[[32]byte]bool
|
||||
BlockSlot primitives.Slot
|
||||
}
|
||||
|
||||
func (s *ChainService) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
|
||||
@@ -203,7 +205,7 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block interf
|
||||
}
|
||||
|
||||
// ReceiveBlockBatch processes blocks in batches from initial-sync.
|
||||
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySignedBeaconBlock, _ [][32]byte) error {
|
||||
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock) error {
|
||||
if s.State == nil {
|
||||
return ErrNilState
|
||||
}
|
||||
@@ -389,6 +391,11 @@ func (s *ChainService) HasBlock(ctx context.Context, rt [32]byte) bool {
|
||||
return s.InitSyncBlockRoots[rt]
|
||||
}
|
||||
|
||||
// RecentBlockSlot mocks the same method in the chain service.
|
||||
func (s *ChainService) RecentBlockSlot([32]byte) (primitives.Slot, error) {
|
||||
return s.BlockSlot, nil
|
||||
}
|
||||
|
||||
// HeadGenesisValidatorsRoot mocks HeadGenesisValidatorsRoot method in chain service.
|
||||
func (_ *ChainService) HeadGenesisValidatorsRoot() [32]byte {
|
||||
return [32]byte{}
|
||||
@@ -597,3 +604,6 @@ func (s *ChainService) FinalizedBlockHash() [32]byte {
|
||||
func (s *ChainService) UnrealizedJustifiedPayloadBlockHash() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// SendNewBlobEvent mocks the same method in the chain service
|
||||
func (s *ChainService) SendNewBlobEvent(_ [32]byte, _ uint64) {}
|
||||
|
||||
@@ -15,10 +15,12 @@ go_library(
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
|
||||
@@ -2,6 +2,7 @@ package builder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
@@ -10,10 +11,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -24,7 +27,7 @@ var ErrNoBuilder = errors.New("builder endpoint not configured")
|
||||
|
||||
// BlockBuilder defines the interface for interacting with the block builder
|
||||
type BlockBuilder interface {
|
||||
SubmitBlindedBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error)
|
||||
SubmitBlindedBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blobs []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error)
|
||||
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubKey [48]byte) (builder.SignedBid, error)
|
||||
RegisterValidator(ctx context.Context, reg []*ethpb.SignedValidatorRegistrationV1) error
|
||||
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
@@ -87,7 +90,7 @@ func (s *Service) Stop() error {
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock submits a blinded block to the builder relay network.
|
||||
func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error) {
|
||||
func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, blobs []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.SubmitBlindedBlock")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
@@ -95,10 +98,13 @@ func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlyS
|
||||
submitBlindedBlockLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
if s.c == nil {
|
||||
return nil, ErrNoBuilder
|
||||
return nil, nil, ErrNoBuilder
|
||||
}
|
||||
if uint64(len(blobs)) > fieldparams.MaxBlobsPerBlock {
|
||||
return nil, nil, fmt.Errorf("blob count %d beyond max limit of %d", len(blobs), fieldparams.MaxBlobsPerBlock)
|
||||
}
|
||||
|
||||
return s.c.SubmitBlindedBlock(ctx, b)
|
||||
return s.c.SubmitBlindedBlock(ctx, b, blobs)
|
||||
}
|
||||
|
||||
// GetHeader retrieves the header for a given slot and parent hash from the builder relay network.
|
||||
|
||||
@@ -62,7 +62,7 @@ func Test_BuilderMethodsWithouClient(t *testing.T) {
|
||||
_, err = s.GetHeader(context.Background(), 0, [32]byte{}, [48]byte{})
|
||||
assert.ErrorContains(t, ErrNoBuilder.Error(), err)
|
||||
|
||||
_, err = s.SubmitBlindedBlock(context.Background(), nil)
|
||||
_, _, err = s.SubmitBlindedBlock(context.Background(), nil, nil)
|
||||
assert.ErrorContains(t, ErrNoBuilder.Error(), err)
|
||||
|
||||
err = s.RegisterValidator(context.Background(), nil)
|
||||
|
||||
@@ -16,6 +16,7 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
@@ -26,9 +27,12 @@ type MockBuilderService struct {
|
||||
HasConfigured bool
|
||||
Payload *v1.ExecutionPayload
|
||||
PayloadCapella *v1.ExecutionPayloadCapella
|
||||
PayloadDeneb *v1.ExecutionPayloadDeneb
|
||||
BlobBundle *v1.BlobsBundle
|
||||
ErrSubmitBlindedBlock error
|
||||
Bid *ethpb.SignedBuilderBid
|
||||
BidCapella *ethpb.SignedBuilderBidCapella
|
||||
BidDeneb *ethpb.SignedBuilderBidDeneb
|
||||
RegistrationCache *cache.RegistrationCache
|
||||
ErrGetHeader error
|
||||
ErrRegisterValidator error
|
||||
@@ -41,23 +45,36 @@ func (s *MockBuilderService) Configured() bool {
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock for mocking.
|
||||
func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error) {
|
||||
if s.Payload != nil {
|
||||
func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.ReadOnlySignedBeaconBlock, _ []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
switch b.Version() {
|
||||
case version.Bellatrix:
|
||||
w, err := blocks.WrappedExecutionPayload(s.Payload)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not wrap payload")
|
||||
return nil, nil, errors.Wrap(err, "could not wrap payload")
|
||||
}
|
||||
return w, s.ErrSubmitBlindedBlock
|
||||
return w, nil, s.ErrSubmitBlindedBlock
|
||||
case version.Capella:
|
||||
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella, 0)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not wrap capella payload")
|
||||
}
|
||||
return w, nil, s.ErrSubmitBlindedBlock
|
||||
case version.Deneb:
|
||||
w, err := blocks.WrappedExecutionPayloadDeneb(s.PayloadDeneb, 0)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not wrap deneb payload")
|
||||
}
|
||||
return w, s.BlobBundle, s.ErrSubmitBlindedBlock
|
||||
default:
|
||||
return nil, nil, errors.New("unknown block version for mocking")
|
||||
}
|
||||
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella, 0)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not wrap capella payload")
|
||||
}
|
||||
return w, s.ErrSubmitBlindedBlock
|
||||
}
|
||||
|
||||
// GetHeader for mocking.
|
||||
func (s *MockBuilderService) GetHeader(_ context.Context, slot primitives.Slot, _ [32]byte, _ [48]byte) (builder.SignedBid, error) {
|
||||
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch || s.BidDeneb != nil {
|
||||
return builder.WrappedSignedBuilderBidDeneb(s.BidDeneb)
|
||||
}
|
||||
if slots.ToEpoch(slot) >= params.BeaconConfig().CapellaForkEpoch || s.BidCapella != nil {
|
||||
return builder.WrappedSignedBuilderBidCapella(s.BidCapella)
|
||||
}
|
||||
|
||||
@@ -289,13 +289,13 @@ func AttestationParticipationFlagIndices(beaconState state.BeaconState, data *et
|
||||
sourceFlagIndex := cfg.TimelySourceFlagIndex
|
||||
targetFlagIndex := cfg.TimelyTargetFlagIndex
|
||||
headFlagIndex := cfg.TimelyHeadFlagIndex
|
||||
slotsPerEpoch := cfg.SlotsPerEpoch
|
||||
sqtRootSlots := cfg.SqrRootSlotsPerEpoch
|
||||
if matchedSrc && delay <= sqtRootSlots {
|
||||
participatedFlags[sourceFlagIndex] = true
|
||||
}
|
||||
matchedSrcTgt := matchedSrc && matchedTgt
|
||||
if matchedSrcTgt && delay <= slotsPerEpoch {
|
||||
// Before Deneb no attestation should pass validation without having delay <= slotsPerEpoch.
|
||||
if matchedSrcTgt {
|
||||
participatedFlags[targetFlagIndex] = true
|
||||
}
|
||||
matchedSrcTgtHead := matchedHead && matchedSrcTgt
|
||||
|
||||
@@ -630,6 +630,9 @@ func TestAttestationParticipationFlagIndices(t *testing.T) {
|
||||
targetFlagIndex := cfg.TimelyTargetFlagIndex
|
||||
headFlagIndex := cfg.TimelyHeadFlagIndex
|
||||
|
||||
denebState, _ := util.DeterministicGenesisStateDeneb(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
require.NoError(t, denebState.SetSlot(1))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
inputState state.BeaconState
|
||||
@@ -678,6 +681,34 @@ func TestAttestationParticipationFlagIndices(t *testing.T) {
|
||||
targetFlagIndex: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "participated source and target with delay",
|
||||
inputState: func() state.BeaconState {
|
||||
return beaconState
|
||||
}(),
|
||||
inputData: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]},
|
||||
Target: ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]},
|
||||
},
|
||||
inputDelay: params.BeaconConfig().SlotsPerEpoch + 1,
|
||||
participationIndices: map[uint8]bool{
|
||||
targetFlagIndex: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "participated source and target with delay in deneb",
|
||||
inputState: func() state.BeaconState {
|
||||
return denebState
|
||||
}(),
|
||||
inputData: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]},
|
||||
Target: ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]},
|
||||
},
|
||||
inputDelay: params.BeaconConfig().SlotsPerEpoch + 1,
|
||||
participationIndices: map[uint8]bool{
|
||||
targetFlagIndex: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "participated source and target and head",
|
||||
inputState: func() state.BeaconState {
|
||||
@@ -696,7 +727,6 @@ func TestAttestationParticipationFlagIndices(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
flagIndices, err := altair.AttestationParticipationFlagIndices(test.inputState, test.inputData, test.inputDelay)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -81,7 +82,6 @@ func VerifyAttestationNoVerifySignature(
|
||||
|
||||
s := att.Data.Slot
|
||||
minInclusionCheck := s+params.BeaconConfig().MinAttestationInclusionDelay <= beaconState.Slot()
|
||||
epochInclusionCheck := beaconState.Slot() <= s+params.BeaconConfig().SlotsPerEpoch
|
||||
if !minInclusionCheck {
|
||||
return fmt.Errorf(
|
||||
"attestation slot %d + inclusion delay %d > state slot %d",
|
||||
@@ -90,13 +90,17 @@ func VerifyAttestationNoVerifySignature(
|
||||
beaconState.Slot(),
|
||||
)
|
||||
}
|
||||
if !epochInclusionCheck {
|
||||
return fmt.Errorf(
|
||||
"state slot %d > attestation slot %d + SLOTS_PER_EPOCH %d",
|
||||
beaconState.Slot(),
|
||||
s,
|
||||
params.BeaconConfig().SlotsPerEpoch,
|
||||
)
|
||||
|
||||
if beaconState.Version() < version.Deneb {
|
||||
epochInclusionCheck := beaconState.Slot() <= s+params.BeaconConfig().SlotsPerEpoch
|
||||
if !epochInclusionCheck {
|
||||
return fmt.Errorf(
|
||||
"state slot %d > attestation slot %d + SLOTS_PER_EPOCH %d",
|
||||
beaconState.Slot(),
|
||||
s,
|
||||
params.BeaconConfig().SlotsPerEpoch,
|
||||
)
|
||||
}
|
||||
}
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, beaconState, att.Data.Target.Epoch)
|
||||
if err != nil {
|
||||
|
||||
@@ -127,6 +127,44 @@ func TestProcessAttestationsNoVerify_OK(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestProcessAttestationsNoVerify_OlderThanSlotsPerEpoch(t *testing.T) {
|
||||
aggBits := bitfield.NewBitlist(3)
|
||||
aggBits.SetBitAt(1, true)
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("attestation older than slots per epoch", func(t *testing.T) {
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 100)
|
||||
|
||||
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().SlotsPerEpoch + 1)
|
||||
require.NoError(t, err)
|
||||
ckp := beaconState.CurrentJustifiedCheckpoint()
|
||||
copy(ckp.Root, "hello-world")
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(ckp))
|
||||
require.NoError(t, beaconState.AppendCurrentEpochAttestations(ðpb.PendingAttestation{}))
|
||||
|
||||
require.ErrorContains(t, "state slot 33 > attestation slot 0 + SLOTS_PER_EPOCH 32", blocks.VerifyAttestationNoVerifySignature(ctx, beaconState, att))
|
||||
})
|
||||
|
||||
t.Run("attestation older than slots per epoch in deneb", func(t *testing.T) {
|
||||
beaconState, _ := util.DeterministicGenesisStateDeneb(t, 100)
|
||||
|
||||
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().SlotsPerEpoch + 1)
|
||||
require.NoError(t, err)
|
||||
ckp := beaconState.CurrentJustifiedCheckpoint()
|
||||
copy(ckp.Root, "hello-world")
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(ckp))
|
||||
|
||||
require.NoError(t, blocks.VerifyAttestationNoVerifySignature(ctx, beaconState, att))
|
||||
})
|
||||
}
|
||||
|
||||
func TestVerifyAttestationNoVerifySignature_OK(t *testing.T) {
|
||||
// Attestation with an empty signature
|
||||
|
||||
|
||||
@@ -413,7 +413,7 @@ func TestFuzzProcessVoluntaryExitsNoVerify_10000(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzVerifyExit_10000(_ *testing.T) {
|
||||
func TestFuzzVerifyExit_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
ve := ðpb.SignedVoluntaryExit{}
|
||||
rawVal := ðpb.Validator{}
|
||||
@@ -425,9 +425,18 @@ func TestFuzzVerifyExit_10000(_ *testing.T) {
|
||||
fuzzer.Fuzz(rawVal)
|
||||
fuzzer.Fuzz(fork)
|
||||
fuzzer.Fuzz(&slot)
|
||||
|
||||
state := ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
Fork: fork,
|
||||
GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:],
|
||||
}
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
|
||||
val, err := state_native.NewValidator(ðpb.Validator{})
|
||||
_ = err
|
||||
err = VerifyExitAndSignature(val, slot, fork, ve, params.BeaconConfig().ZeroHash[:])
|
||||
err = VerifyExitAndSignature(val, s, ve)
|
||||
_ = err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
@@ -60,7 +61,7 @@ func ProcessVoluntaryExits(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := VerifyExitAndSignature(val, beaconState.Slot(), beaconState.Fork(), exit, beaconState.GenesisValidatorsRoot()); err != nil {
|
||||
if err := VerifyExitAndSignature(val, beaconState, exit); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not verify exit %d", idx)
|
||||
}
|
||||
beaconState, exitEpoch, err = v.InitiateValidatorExit(ctx, beaconState, exit.Exit.ValidatorIndex, maxExitEpoch, churn)
|
||||
@@ -101,15 +102,27 @@ func ProcessVoluntaryExits(
|
||||
// initiate_validator_exit(state, voluntary_exit.validator_index)
|
||||
func VerifyExitAndSignature(
|
||||
validator state.ReadOnlyValidator,
|
||||
currentSlot primitives.Slot,
|
||||
fork *ethpb.Fork,
|
||||
state state.ReadOnlyBeaconState,
|
||||
signed *ethpb.SignedVoluntaryExit,
|
||||
genesisRoot []byte,
|
||||
) error {
|
||||
if signed == nil || signed.Exit == nil {
|
||||
return errors.New("nil exit")
|
||||
}
|
||||
|
||||
currentSlot := state.Slot()
|
||||
fork := state.Fork()
|
||||
genesisRoot := state.GenesisValidatorsRoot()
|
||||
|
||||
// EIP-7044: Beginning in Deneb, fix the fork version to Capella.
|
||||
// This allows for signed validator exits to be valid forever.
|
||||
if state.Version() >= version.Deneb {
|
||||
fork = ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
Epoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
exit := signed.Exit
|
||||
if err := verifyExitConditions(validator, currentSlot, exit); err != nil {
|
||||
return err
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -133,38 +134,38 @@ func TestProcessVoluntaryExits_AppliesCorrectStatus(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestVerifyExitAndSignature(t *testing.T) {
|
||||
type args struct {
|
||||
currentSlot primitives.Slot
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
setup func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, *ethpb.Fork, []byte, error)
|
||||
setup func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, state.ReadOnlyBeaconState, error)
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "Empty Exit",
|
||||
args: args{
|
||||
currentSlot: 0,
|
||||
},
|
||||
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, *ethpb.Fork, []byte, error) {
|
||||
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, state.ReadOnlyBeaconState, error) {
|
||||
fork := ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: 0,
|
||||
}
|
||||
genesisRoot := [32]byte{'a'}
|
||||
return ðpb.Validator{}, ðpb.SignedVoluntaryExit{}, fork, genesisRoot[:], nil
|
||||
|
||||
st := ðpb.BeaconState{
|
||||
Slot: 0,
|
||||
Fork: fork,
|
||||
GenesisValidatorsRoot: genesisRoot[:],
|
||||
}
|
||||
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(st)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
return ðpb.Validator{}, ðpb.SignedVoluntaryExit{}, s, nil
|
||||
},
|
||||
wantErr: "nil exit",
|
||||
},
|
||||
{
|
||||
name: "Happy Path",
|
||||
args: args{
|
||||
currentSlot: (params.BeaconConfig().SlotsPerEpoch * 2) + 1,
|
||||
},
|
||||
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, *ethpb.Fork, []byte, error) {
|
||||
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, state.ReadOnlyBeaconState, error) {
|
||||
fork := ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
@@ -186,15 +187,18 @@ func TestVerifyExitAndSignature(t *testing.T) {
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
signedExit.Signature = sig.Marshal()
|
||||
return validator, signedExit, fork, bs.GenesisValidatorsRoot(), nil
|
||||
if err := bs.SetFork(fork); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
if err := bs.SetSlot((params.BeaconConfig().SlotsPerEpoch * 2) + 1); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
return validator, signedExit, bs, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "bad signature",
|
||||
args: args{
|
||||
currentSlot: (params.BeaconConfig().SlotsPerEpoch * 2) + 1,
|
||||
},
|
||||
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, *ethpb.Fork, []byte, error) {
|
||||
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, state.ReadOnlyBeaconState, error) {
|
||||
fork := ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
@@ -215,27 +219,72 @@ func TestVerifyExitAndSignature(t *testing.T) {
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
signedExit.Signature = sig.Marshal()
|
||||
genesisRoot := [32]byte{'a'}
|
||||
if err := bs.SetFork(fork); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
if err := bs.SetSlot((params.BeaconConfig().SlotsPerEpoch * 2) + 1); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
// use wrong genesis root and don't update validator
|
||||
return validator, signedExit, fork, genesisRoot[:], nil
|
||||
genesisRoot := [32]byte{'a'}
|
||||
if err := bs.SetGenesisValidatorsRoot(genesisRoot[:]); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
return validator, signedExit, bs, nil
|
||||
},
|
||||
wantErr: "signature did not verify",
|
||||
},
|
||||
{
|
||||
name: "EIP-7044: deneb exits should verify with capella fork information",
|
||||
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, state.ReadOnlyBeaconState, error) {
|
||||
fork := ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().DenebForkVersion,
|
||||
Epoch: primitives.Epoch(2),
|
||||
}
|
||||
signedExit := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 2,
|
||||
ValidatorIndex: 0,
|
||||
},
|
||||
}
|
||||
bs, keys := util.DeterministicGenesisState(t, 1)
|
||||
bs, err := state_native.InitializeFromProtoUnsafeDeneb(ðpb.BeaconStateDeneb{
|
||||
GenesisValidatorsRoot: bs.GenesisValidatorsRoot(),
|
||||
Fork: fork,
|
||||
Slot: (params.BeaconConfig().SlotsPerEpoch * 2) + 1,
|
||||
Validators: bs.Validators(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
validator := bs.Validators()[0]
|
||||
validator.ActivationEpoch = 1
|
||||
err = bs.UpdateValidatorAtIndex(0, validator)
|
||||
require.NoError(t, err)
|
||||
sb, err := signing.ComputeDomainAndSign(bs, signedExit.Exit.Epoch, signedExit.Exit, params.BeaconConfig().DomainVoluntaryExit, keys[0])
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
signedExit.Signature = sig.Marshal()
|
||||
|
||||
return validator, signedExit, bs, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c := params.BeaconConfig().ShardCommitteePeriod
|
||||
params.BeaconConfig().ShardCommitteePeriod = 0
|
||||
validator, signedExit, fork, genesisRoot, err := tt.setup()
|
||||
validator, signedExit, st, err := tt.setup()
|
||||
require.NoError(t, err)
|
||||
rvalidator, err := state_native.NewValidator(validator)
|
||||
require.NoError(t, err)
|
||||
err = blocks.VerifyExitAndSignature(
|
||||
rvalidator,
|
||||
tt.args.currentSlot,
|
||||
fork,
|
||||
st,
|
||||
signedExit,
|
||||
genesisRoot,
|
||||
)
|
||||
if tt.wantErr == "" {
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -145,6 +145,40 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
|
||||
},
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
})
|
||||
case *ethpb.BeaconStateDeneb:
|
||||
return blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockDeneb{
|
||||
Block: ðpb.BeaconBlockDeneb{
|
||||
ParentRoot: params.BeaconConfig().ZeroHash[:],
|
||||
StateRoot: root[:],
|
||||
Body: ðpb.BeaconBlockBodyDeneb{
|
||||
RandaoReveal: make([]byte, 96),
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
Graffiti: make([]byte, 32),
|
||||
SyncAggregate: ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
|
||||
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{ // Deneb difference.
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
},
|
||||
BlsToExecutionChanges: make([]*ethpb.SignedBLSToExecutionChange, 0),
|
||||
BlobKzgCommitments: make([][]byte, 0),
|
||||
},
|
||||
},
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
})
|
||||
default:
|
||||
return nil, ErrUnrecognizedState
|
||||
}
|
||||
|
||||
30
beacon-chain/core/deneb/BUILD.bazel
Normal file
30
beacon-chain/core/deneb/BUILD.bazel
Normal file
@@ -0,0 +1,30 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["upgrade.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/deneb",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["upgrade_test.go"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
],
|
||||
)
|
||||
115
beacon-chain/core/deneb/upgrade.go
Normal file
115
beacon-chain/core/deneb/upgrade.go
Normal file
@@ -0,0 +1,115 @@
|
||||
package deneb
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// UpgradeToDeneb updates inputs a generic state to return the version Deneb state.
|
||||
func UpgradeToDeneb(state state.BeaconState) (state.BeaconState, error) {
|
||||
epoch := time.CurrentEpoch(state)
|
||||
|
||||
currentSyncCommittee, err := state.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nextSyncCommittee, err := state.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prevEpochParticipation, err := state.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentEpochParticipation, err := state.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inactivityScores, err := state.InactivityScores()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloadHeader, err := state.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txRoot, err := payloadHeader.TransactionsRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wdRoot, err := payloadHeader.WithdrawalsRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wi, err := state.NextWithdrawalIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vi, err := state.NextWithdrawalValidatorIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
summaries, err := state.HistoricalSummaries()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateDeneb{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
Slot: state.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: state.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().DenebForkVersion,
|
||||
Epoch: epoch,
|
||||
},
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: [][]byte{},
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
Validators: state.Validators(),
|
||||
Balances: state.Balances(),
|
||||
RandaoMixes: state.RandaoMixes(),
|
||||
Slashings: state.Slashings(),
|
||||
PreviousEpochParticipation: prevEpochParticipation,
|
||||
CurrentEpochParticipation: currentEpochParticipation,
|
||||
JustificationBits: state.JustificationBits(),
|
||||
PreviousJustifiedCheckpoint: state.PreviousJustifiedCheckpoint(),
|
||||
CurrentJustifiedCheckpoint: state.CurrentJustifiedCheckpoint(),
|
||||
FinalizedCheckpoint: state.FinalizedCheckpoint(),
|
||||
InactivityScores: inactivityScores,
|
||||
CurrentSyncCommittee: currentSyncCommittee,
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: payloadHeader.ParentHash(),
|
||||
FeeRecipient: payloadHeader.FeeRecipient(),
|
||||
StateRoot: payloadHeader.StateRoot(),
|
||||
ReceiptsRoot: payloadHeader.ReceiptsRoot(),
|
||||
LogsBloom: payloadHeader.LogsBloom(),
|
||||
PrevRandao: payloadHeader.PrevRandao(),
|
||||
BlockNumber: payloadHeader.BlockNumber(),
|
||||
GasLimit: payloadHeader.GasLimit(),
|
||||
GasUsed: payloadHeader.GasUsed(),
|
||||
Timestamp: payloadHeader.Timestamp(),
|
||||
ExtraData: payloadHeader.ExtraData(),
|
||||
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
|
||||
BlockHash: payloadHeader.BlockHash(),
|
||||
ExcessBlobGas: 0,
|
||||
BlobGasUsed: 0,
|
||||
TransactionsRoot: txRoot,
|
||||
WithdrawalsRoot: wdRoot,
|
||||
},
|
||||
NextWithdrawalIndex: wi,
|
||||
NextWithdrawalValidatorIndex: vi,
|
||||
HistoricalSummaries: summaries,
|
||||
}
|
||||
|
||||
return state_native.InitializeFromProtoUnsafeDeneb(s)
|
||||
}
|
||||
94
beacon-chain/core/deneb/upgrade_test.go
Normal file
94
beacon-chain/core/deneb/upgrade_test.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package deneb_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/deneb"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
)
|
||||
|
||||
func TestUpgradeToDeneb(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateCapella(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
preForkState := st.Copy()
|
||||
mSt, err := deneb.UpgradeToDeneb(st)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, preForkState.GenesisTime(), mSt.GenesisTime())
|
||||
require.DeepSSZEqual(t, preForkState.GenesisValidatorsRoot(), mSt.GenesisValidatorsRoot())
|
||||
require.Equal(t, preForkState.Slot(), mSt.Slot())
|
||||
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), mSt.LatestBlockHeader())
|
||||
require.DeepSSZEqual(t, preForkState.BlockRoots(), mSt.BlockRoots())
|
||||
require.DeepSSZEqual(t, preForkState.StateRoots(), mSt.StateRoots())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1Data(), mSt.Eth1Data())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DataVotes(), mSt.Eth1DataVotes())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DepositIndex(), mSt.Eth1DepositIndex())
|
||||
require.DeepSSZEqual(t, preForkState.Validators(), mSt.Validators())
|
||||
require.DeepSSZEqual(t, preForkState.Balances(), mSt.Balances())
|
||||
require.DeepSSZEqual(t, preForkState.RandaoMixes(), mSt.RandaoMixes())
|
||||
require.DeepSSZEqual(t, preForkState.Slashings(), mSt.Slashings())
|
||||
require.DeepSSZEqual(t, preForkState.JustificationBits(), mSt.JustificationBits())
|
||||
require.DeepSSZEqual(t, preForkState.PreviousJustifiedCheckpoint(), mSt.PreviousJustifiedCheckpoint())
|
||||
require.DeepSSZEqual(t, preForkState.CurrentJustifiedCheckpoint(), mSt.CurrentJustifiedCheckpoint())
|
||||
require.DeepSSZEqual(t, preForkState.FinalizedCheckpoint(), mSt.FinalizedCheckpoint())
|
||||
numValidators := mSt.NumValidators()
|
||||
p, err := mSt.PreviousEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, make([]byte, numValidators), p)
|
||||
p, err = mSt.CurrentEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, make([]byte, numValidators), p)
|
||||
s, err := mSt.InactivityScores()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, make([]uint64, numValidators), s)
|
||||
|
||||
f := mSt.Fork()
|
||||
require.DeepSSZEqual(t, ðpb.Fork{
|
||||
PreviousVersion: st.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().DenebForkVersion,
|
||||
Epoch: time.CurrentEpoch(st),
|
||||
}, f)
|
||||
csc, err := mSt.CurrentSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
psc, err := preForkState.CurrentSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, psc, csc)
|
||||
nsc, err := mSt.NextSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
psc, err = preForkState.NextSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, psc, nsc)
|
||||
|
||||
header, err := mSt.LatestExecutionPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
protoHeader, ok := header.Proto().(*enginev1.ExecutionPayloadHeaderDeneb)
|
||||
require.Equal(t, true, ok)
|
||||
prevHeader, err := preForkState.LatestExecutionPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
txRoot, err := prevHeader.TransactionsRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
wdRoot, err := prevHeader.WithdrawalsRoot()
|
||||
require.NoError(t, err)
|
||||
wanted := &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: prevHeader.ParentHash(),
|
||||
FeeRecipient: prevHeader.FeeRecipient(),
|
||||
StateRoot: prevHeader.StateRoot(),
|
||||
ReceiptsRoot: prevHeader.ReceiptsRoot(),
|
||||
LogsBloom: prevHeader.LogsBloom(),
|
||||
PrevRandao: prevHeader.PrevRandao(),
|
||||
BlockNumber: prevHeader.BlockNumber(),
|
||||
GasLimit: prevHeader.GasLimit(),
|
||||
GasUsed: prevHeader.GasUsed(),
|
||||
Timestamp: prevHeader.Timestamp(),
|
||||
BaseFeePerGas: prevHeader.BaseFeePerGas(),
|
||||
BlockHash: prevHeader.BlockHash(),
|
||||
TransactionsRoot: txRoot,
|
||||
WithdrawalsRoot: wdRoot,
|
||||
}
|
||||
require.DeepEqual(t, wanted, protoHeader)
|
||||
}
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -166,14 +167,33 @@ func ValidateAttestationTime(attSlot primitives.Slot, genesisTime time.Time, clo
|
||||
lowerBoundsSlot,
|
||||
currentSlot,
|
||||
)
|
||||
if attTime.Before(lowerBounds) {
|
||||
attReceivedTooLateCount.Inc()
|
||||
return errors.Join(ErrTooLate, attError)
|
||||
}
|
||||
if attTime.After(upperBounds) {
|
||||
attReceivedTooEarlyCount.Inc()
|
||||
return attError
|
||||
}
|
||||
|
||||
attEpoch := slots.ToEpoch(attSlot)
|
||||
if attEpoch < params.BeaconConfig().DenebForkEpoch {
|
||||
if attTime.Before(lowerBounds) {
|
||||
attReceivedTooLateCount.Inc()
|
||||
return errors.Join(ErrTooLate, attError)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EIP-7045: Starting in Deneb, allow any attestations from the current or previous epoch.
|
||||
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
prevEpoch, err := currentEpoch.SafeSub(1)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Ignoring underflow for a deneb attestation inclusion check in epoch 0")
|
||||
prevEpoch = 0
|
||||
}
|
||||
attSlotEpoch := slots.ToEpoch(attSlot)
|
||||
if attSlotEpoch != currentEpoch && attSlotEpoch != prevEpoch {
|
||||
return fmt.Errorf("attestation slot %d not within current epoch %d or previous epoch %d", attSlot, currentEpoch, prevEpoch)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -85,6 +85,11 @@ func TestAttestation_ComputeSubnetForAttestation(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_ValidateAttestationTime(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
if params.BeaconNetworkConfig().MaximumGossipClockDisparity < 200*time.Millisecond {
|
||||
t.Fatal("This test expects the maximum clock disparity to be at least 200ms")
|
||||
}
|
||||
@@ -155,6 +160,39 @@ func Test_ValidateAttestationTime(t *testing.T) {
|
||||
).Add(200 * time.Millisecond),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "attestation.slot < current_slot-ATTESTATION_PROPAGATION_SLOT_RANGE in deneb",
|
||||
args: args{
|
||||
attSlot: 300 - params.BeaconNetworkConfig().AttestationPropagationSlotRange - 1,
|
||||
genesisTime: prysmTime.Now().Add(-300 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "attestation.slot = current_slot-ATTESTATION_PROPAGATION_SLOT_RANGE in deneb",
|
||||
args: args{
|
||||
attSlot: 300 - params.BeaconNetworkConfig().AttestationPropagationSlotRange,
|
||||
genesisTime: prysmTime.Now().Add(-300 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "attestation.slot = current_slot-ATTESTATION_PROPAGATION_SLOT_RANGE, received 200ms late in deneb",
|
||||
args: args{
|
||||
attSlot: 300 - params.BeaconNetworkConfig().AttestationPropagationSlotRange,
|
||||
genesisTime: prysmTime.Now().Add(
|
||||
-300 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second,
|
||||
).Add(200 * time.Millisecond),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "attestation.slot != current epoch or previous epoch in deneb",
|
||||
args: args{
|
||||
attSlot: 300 - params.BeaconNetworkConfig().AttestationPropagationSlotRange,
|
||||
genesisTime: prysmTime.Now().Add(
|
||||
-500 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second,
|
||||
).Add(200 * time.Millisecond),
|
||||
},
|
||||
wantedErr: "attestation slot 268 not within current epoch 15 or previous epoch 14",
|
||||
},
|
||||
{
|
||||
name: "attestation.slot is well beyond current slot",
|
||||
args: args{
|
||||
|
||||
@@ -16,6 +16,7 @@ go_library(
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
)
|
||||
|
||||
// ForkVersionByteLength length of fork version byte array.
|
||||
@@ -56,7 +57,18 @@ const (
|
||||
|
||||
// ComputeDomainAndSign computes the domain and signing root and sign it using the passed in private key.
|
||||
func ComputeDomainAndSign(st state.ReadOnlyBeaconState, epoch primitives.Epoch, obj fssz.HashRoot, domain [4]byte, key bls.SecretKey) ([]byte, error) {
|
||||
d, err := Domain(st.Fork(), epoch, domain, st.GenesisValidatorsRoot())
|
||||
fork := st.Fork()
|
||||
// EIP-7044: Beginning in Deneb, fix the fork version to Capella for signed exits.
|
||||
// This allows for signed validator exits to be valid forever.
|
||||
if st.Version() >= version.Deneb && domain == params.BeaconConfig().DomainVoluntaryExit {
|
||||
fork = ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
Epoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
d, err := Domain(fork, epoch, domain, st.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -81,6 +81,15 @@ func CanUpgradeToCapella(slot primitives.Slot) bool {
|
||||
return epochStart && capellaEpoch
|
||||
}
|
||||
|
||||
// CanUpgradeToDeneb returns true if the input `slot` can upgrade to Deneb.
|
||||
// Spec code:
|
||||
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == DENEB_FORK_EPOCH
|
||||
func CanUpgradeToDeneb(slot primitives.Slot) bool {
|
||||
epochStart := slots.IsEpochStart(slot)
|
||||
DenebEpoch := slots.ToEpoch(slot) == params.BeaconConfig().DenebForkEpoch
|
||||
return epochStart && DenebEpoch
|
||||
}
|
||||
|
||||
// CanProcessEpoch checks the eligibility to process epoch.
|
||||
// The epoch can be processed at the end of the last slot of every epoch.
|
||||
//
|
||||
|
||||
@@ -298,3 +298,38 @@ func TestCanUpgradeToCapella(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanUpgradeToDeneb(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bc := params.BeaconConfig()
|
||||
bc.DenebForkEpoch = 5
|
||||
params.OverrideBeaconConfig(bc)
|
||||
tests := []struct {
|
||||
name string
|
||||
slot primitives.Slot
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "not epoch start",
|
||||
slot: 1,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "not deneb epoch",
|
||||
slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "deneb epoch",
|
||||
slot: primitives.Slot(params.BeaconConfig().DenebForkEpoch) * params.BeaconConfig().SlotsPerEpoch,
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := time.CanUpgradeToDeneb(tt.slot); got != tt.want {
|
||||
t.Errorf("CanUpgradeToDeneb() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ go_library(
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/capella:go_default_library",
|
||||
"//beacon-chain/core/deneb:go_default_library",
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/execution:go_default_library",
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/capella"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/deneb"
|
||||
e "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/epoch"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/execution"
|
||||
@@ -269,28 +270,10 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot primitives.
|
||||
return nil, errors.Wrap(err, "failed to increment state slot")
|
||||
}
|
||||
|
||||
if time.CanUpgradeToAltair(state.Slot()) {
|
||||
state, err = altair.UpgradeToAltair(ctx, state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if time.CanUpgradeToBellatrix(state.Slot()) {
|
||||
state, err = execution.UpgradeToBellatrix(state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if time.CanUpgradeToCapella(state.Slot()) {
|
||||
state, err = capella.UpgradeToCapella(state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
state, err = UpgradeState(ctx, state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, "failed to upgrade state")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -301,6 +284,45 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot primitives.
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// UpgradeState upgrades the state to the next version if possible.
|
||||
func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.UpgradeState")
|
||||
defer span.End()
|
||||
var err error
|
||||
if time.CanUpgradeToAltair(state.Slot()) {
|
||||
state, err = altair.UpgradeToAltair(ctx, state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if time.CanUpgradeToBellatrix(state.Slot()) {
|
||||
state, err = execution.UpgradeToBellatrix(state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if time.CanUpgradeToCapella(state.Slot()) {
|
||||
state, err = capella.UpgradeToCapella(state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if time.CanUpgradeToDeneb(state.Slot()) {
|
||||
state, err = deneb.UpgradeToDeneb(state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// VerifyOperationLengths verifies that block operation lengths are valid.
|
||||
func VerifyOperationLengths(_ context.Context, state state.BeaconState, b interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
|
||||
if err := blocks.BeaconBlockIsNil(b); err != nil {
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition/interop"
|
||||
v "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
field_params "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
@@ -256,7 +257,7 @@ func ProcessOperationsNoVerifyAttsSigs(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case version.Altair, version.Bellatrix, version.Capella:
|
||||
case version.Altair, version.Bellatrix, version.Capella, version.Deneb:
|
||||
state, err = altairOperations(ctx, state, signedBeaconBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -324,6 +325,10 @@ func ProcessBlockForStateRoot(
|
||||
}
|
||||
}
|
||||
|
||||
if err := VerifyBlobCommitmentCount(blk); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
randaoReveal := signed.Block().Body().RandaoReveal()
|
||||
state, err = b.ProcessRandaoNoVerify(state, randaoReveal[:])
|
||||
if err != nil {
|
||||
@@ -359,6 +364,20 @@ func ProcessBlockForStateRoot(
|
||||
return state, nil
|
||||
}
|
||||
|
||||
func VerifyBlobCommitmentCount(blk interfaces.ReadOnlyBeaconBlock) error {
|
||||
if blk.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
kzgs, err := blk.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(kzgs) > field_params.MaxBlobsPerBlock {
|
||||
return fmt.Errorf("too many kzg commitments in block: %d", len(kzgs))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This calls altair block operations.
|
||||
func altairOperations(
|
||||
ctx context.Context,
|
||||
|
||||
@@ -2,11 +2,13 @@ package transition_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
field_params "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
@@ -210,3 +212,15 @@ func TestProcessBlockDifferentVersion(t *testing.T) {
|
||||
_, _, err = transition.ProcessBlockNoVerifyAnySig(context.Background(), beaconState, wsb)
|
||||
require.ErrorContains(t, "state and block are different version. 0 != 1", err)
|
||||
}
|
||||
|
||||
func TestVerifyBlobCommitmentCount(t *testing.T) {
|
||||
b := ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{}}
|
||||
rb, err := blocks.NewBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, transition.VerifyBlobCommitmentCount(rb))
|
||||
|
||||
b = ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{BlobKzgCommitments: make([][]byte, field_params.MaxBlobsPerBlock+1)}}
|
||||
rb, err = blocks.NewBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, fmt.Sprintf("too many kzg commitments in block: %d", field_params.MaxBlobsPerBlock+1), transition.VerifyBlobCommitmentCount(rb))
|
||||
}
|
||||
|
||||
@@ -629,6 +629,20 @@ func TestProcessSlots_ThroughBellatrixEpoch(t *testing.T) {
|
||||
require.Equal(t, params.BeaconConfig().SlotsPerEpoch*10, st.Slot())
|
||||
}
|
||||
|
||||
func TestProcessSlots_ThroughDenebEpoch(t *testing.T) {
|
||||
transition.SkipSlotCache.Disable()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
conf := params.BeaconConfig()
|
||||
conf.DenebForkEpoch = 5
|
||||
params.OverrideBeaconConfig(conf)
|
||||
|
||||
st, _ := util.DeterministicGenesisStateCapella(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
st, err := transition.ProcessSlots(context.Background(), st, params.BeaconConfig().SlotsPerEpoch*10)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, version.Deneb, st.Version())
|
||||
require.Equal(t, params.BeaconConfig().SlotsPerEpoch*10, st.Slot())
|
||||
}
|
||||
|
||||
func TestProcessSlotsUsingNextSlotCache(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisState(t, 1)
|
||||
r := []byte{'a'}
|
||||
|
||||
@@ -54,6 +54,11 @@ type ReadOnlyDatabase interface {
|
||||
// Fee recipients operations.
|
||||
FeeRecipientByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (common.Address, error)
|
||||
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
|
||||
// Blob operations.
|
||||
BlobSidecarsByRoot(ctx context.Context, beaconBlockRoot [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error)
|
||||
BlobSidecarsBySlot(ctx context.Context, slot primitives.Slot, indices ...uint64) ([]*ethpb.BlobSidecar, error)
|
||||
|
||||
// origin checkpoint sync support
|
||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
@@ -89,6 +94,10 @@ type NoHeadAccessDatabase interface {
|
||||
SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, addrs []common.Address) error
|
||||
SaveRegistrationsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, regs []*ethpb.ValidatorRegistrationV1) error
|
||||
|
||||
// Blob operations.
|
||||
SaveBlobSidecar(ctx context.Context, sidecars []*ethpb.BlobSidecar) error
|
||||
DeleteBlobSidecar(ctx context.Context, beaconBlockRoot [32]byte) error
|
||||
|
||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"archived_point.go",
|
||||
"backup.go",
|
||||
"blob.go",
|
||||
"blocks.go",
|
||||
"checkpoint.go",
|
||||
"deposit_contract.go",
|
||||
@@ -38,6 +39,7 @@ go_library(
|
||||
"//beacon-chain/state/genesis:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
@@ -74,6 +76,7 @@ go_test(
|
||||
srcs = [
|
||||
"archived_point_test.go",
|
||||
"backup_test.go",
|
||||
"blob_test.go",
|
||||
"blocks_test.go",
|
||||
"checkpoint_test.go",
|
||||
"deposit_contract_test.go",
|
||||
@@ -110,6 +113,7 @@ go_test(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/testing:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/assertions:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
|
||||
264
beacon-chain/db/kv/blob.go
Normal file
264
beacon-chain/db/kv/blob.go
Normal file
@@ -0,0 +1,264 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// SaveBlobSidecar saves the blobs for a given epoch in the sidecar bucket. When we receive a blob:
|
||||
//
|
||||
// 1. Convert slot using a modulo operator to [0, maxSlots] where maxSlots = MAX_BLOB_EPOCHS*SLOTS_PER_EPOCH
|
||||
//
|
||||
// 2. Compute key for blob as bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
|
||||
//
|
||||
// 3. Begin the save algorithm: If the incoming blob has a slot bigger than the saved slot at the spot
|
||||
// in the rotating keys buffer, we overwrite all elements for that slot. Otherwise, we merge the blob with an existing one.
|
||||
func (s *Store) SaveBlobSidecar(ctx context.Context, scs []*ethpb.BlobSidecar) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlobSidecar")
|
||||
defer span.End()
|
||||
|
||||
sortSideCars(scs)
|
||||
if err := s.verifySideCars(scs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
encodedBlobSidecar, err := encode(ctx, ðpb.BlobSidecars{Sidecars: scs})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bkt := tx.Bucket(blobsBucket)
|
||||
c := bkt.Cursor()
|
||||
newKey := blobSidecarKey(scs[0])
|
||||
rotatingBufferPrefix := newKey[0:8]
|
||||
var replacingKey []byte
|
||||
for k, _ := c.Seek(rotatingBufferPrefix); bytes.HasPrefix(k, rotatingBufferPrefix); k, _ = c.Next() {
|
||||
if len(k) != 0 {
|
||||
replacingKey = k
|
||||
break
|
||||
}
|
||||
}
|
||||
// If there is no element stored at blob.slot % MAX_SLOTS_TO_PERSIST_BLOBS, then we simply
|
||||
// store the blob by key and exit early.
|
||||
if len(replacingKey) != 0 {
|
||||
slotBytes := replacingKey[:8]
|
||||
oldSlot := bytesutil.BytesToSlotBigEndian(slotBytes)
|
||||
oldEpoch := slots.ToEpoch(oldSlot)
|
||||
// The blob we are replacing is too old, so we delete it.
|
||||
if slots.ToEpoch(scs[0].Slot) >= oldEpoch.Add(uint64(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest)) {
|
||||
if err := bkt.Delete(replacingKey); err != nil {
|
||||
log.WithError(err).Warnf("Could not delete blob with key %#x", replacingKey)
|
||||
}
|
||||
} else {
|
||||
// Otherwise, we need to merge the new blob with the old blob.
|
||||
enc := bkt.Get(replacingKey)
|
||||
sc := ðpb.BlobSidecars{}
|
||||
if err := decode(ctx, enc, sc); err != nil {
|
||||
return err
|
||||
}
|
||||
sc.Sidecars = append(sc.Sidecars, scs...)
|
||||
sortSideCars(sc.Sidecars)
|
||||
encodedBlobSidecar, err = encode(ctx, ðpb.BlobSidecars{Sidecars: sc.Sidecars})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return bkt.Put(newKey, encodedBlobSidecar)
|
||||
})
|
||||
}
|
||||
|
||||
// verifySideCars ensures that all sidecars have the same slot, parent root, block root, and proposer index, and no more than MAX_BLOB_EPOCHS.
|
||||
func (s *Store) verifySideCars(scs []*ethpb.BlobSidecar) error {
|
||||
if len(scs) == 0 {
|
||||
return errors.New("nil or empty blob sidecars")
|
||||
}
|
||||
if uint64(len(scs)) > fieldparams.MaxBlobsPerBlock {
|
||||
return fmt.Errorf("too many sidecars: %d > %d", len(scs), fieldparams.MaxBlobsPerBlock)
|
||||
}
|
||||
|
||||
sl := scs[0].Slot
|
||||
pr := scs[0].BlockParentRoot
|
||||
r := scs[0].BlockRoot
|
||||
p := scs[0].ProposerIndex
|
||||
|
||||
for _, sc := range scs {
|
||||
if sc.Slot != sl {
|
||||
return fmt.Errorf("sidecar slot mismatch: %d != %d", sc.Slot, sl)
|
||||
}
|
||||
if !bytes.Equal(sc.BlockParentRoot, pr) {
|
||||
return fmt.Errorf("sidecar parent root mismatch: %x != %x", sc.BlockParentRoot, pr)
|
||||
}
|
||||
if !bytes.Equal(sc.BlockRoot, r) {
|
||||
return fmt.Errorf("sidecar root mismatch: %x != %x", sc.BlockRoot, r)
|
||||
}
|
||||
if sc.ProposerIndex != p {
|
||||
return fmt.Errorf("sidecar proposer index mismatch: %d != %d", sc.ProposerIndex, p)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sortSideCars sorts the sidecars by their index.
|
||||
func sortSideCars(scs []*ethpb.BlobSidecar) {
|
||||
sort.Slice(scs, func(i, j int) bool {
|
||||
return scs[i].Index < scs[j].Index
|
||||
})
|
||||
}
|
||||
|
||||
// BlobSidecarsByRoot retrieves the blobs for the given beacon block root.
|
||||
// If the `indices` argument is omitted, all blobs for the root will be returned.
|
||||
// Otherwise, the result will be filtered to only include the specified indices.
|
||||
// An error will result if an invalid index is specified.
|
||||
// The bucket size is bounded by 131072 entries. That's the most blobs a node will keep before rotating it out.
|
||||
func (s *Store) BlobSidecarsByRoot(ctx context.Context, root [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobSidecarsByRoot")
|
||||
defer span.End()
|
||||
|
||||
var enc []byte
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
c := tx.Bucket(blobsBucket).Cursor()
|
||||
// Bucket size is bounded and bolt cursors are fast. Moreover, a thin caching layer can be added.
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
if bytes.HasSuffix(k, root[:]) {
|
||||
enc = v
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if enc == nil {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
sc := ðpb.BlobSidecars{}
|
||||
if err := decode(ctx, enc, sc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return filterForIndices(sc, indices...)
|
||||
}
|
||||
|
||||
func filterForIndices(sc *ethpb.BlobSidecars, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||
if len(indices) == 0 {
|
||||
return sc.Sidecars, nil
|
||||
}
|
||||
// This loop assumes that the BlobSidecars value stores the complete set of blobs for a block
|
||||
// in ascending order from eg 0..3, without gaps. This allows us to assume the indices argument
|
||||
// maps 1:1 with indices in the BlobSidecars storage object.
|
||||
maxIdx := uint64(len(sc.Sidecars)) - 1
|
||||
sidecars := make([]*ethpb.BlobSidecar, len(indices))
|
||||
for i, idx := range indices {
|
||||
if idx > maxIdx {
|
||||
return nil, errors.Wrapf(ErrNotFound, "BlobSidecars missing index: index %d", idx)
|
||||
}
|
||||
sidecars[i] = sc.Sidecars[idx]
|
||||
}
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// BlobSidecarsBySlot retrieves BlobSidecars for the given slot.
|
||||
// If the `indices` argument is omitted, all blobs for the root will be returned.
|
||||
// Otherwise, the result will be filtered to only include the specified indices.
|
||||
// An error will result if an invalid index is specified.
|
||||
// The bucket size is bounded by 131072 entries. That's the most blobs a node will keep before rotating it out.
|
||||
func (s *Store) BlobSidecarsBySlot(ctx context.Context, slot types.Slot, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobSidecarsBySlot")
|
||||
defer span.End()
|
||||
|
||||
var enc []byte
|
||||
sk := slotKey(slot)
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
c := tx.Bucket(blobsBucket).Cursor()
|
||||
// Bucket size is bounded and bolt cursors are fast. Moreover, a thin caching layer can be added.
|
||||
for k, v := c.Seek(sk); bytes.HasPrefix(k, sk); k, _ = c.Next() {
|
||||
slotInKey := bytesutil.BytesToSlotBigEndian(k[8:16])
|
||||
if slotInKey == slot {
|
||||
enc = v
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if enc == nil {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
sc := ðpb.BlobSidecars{}
|
||||
if err := decode(ctx, enc, sc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return filterForIndices(sc, indices...)
|
||||
}
|
||||
|
||||
// DeleteBlobSidecar returns true if the blobs are in the db.
|
||||
func (s *Store) DeleteBlobSidecar(ctx context.Context, beaconBlockRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.DeleteBlobSidecar")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blobsBucket)
|
||||
c := bkt.Cursor()
|
||||
for k, _ := c.First(); k != nil; k, _ = c.Next() {
|
||||
if bytes.HasSuffix(k, beaconBlockRoot[:]) {
|
||||
if err := bkt.Delete(k); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// We define a blob sidecar key as: bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
|
||||
// where slot_to_rotating_buffer(slot) = slot % MAX_SLOTS_TO_PERSIST_BLOBS.
|
||||
func blobSidecarKey(blob *ethpb.BlobSidecar) []byte {
|
||||
key := slotKey(blob.Slot)
|
||||
key = append(key, bytesutil.SlotToBytesBigEndian(blob.Slot)...)
|
||||
key = append(key, blob.BlockRoot...)
|
||||
return key
|
||||
}
|
||||
|
||||
func slotKey(slot types.Slot) []byte {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
maxEpochsToPersistBlobs := params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
|
||||
maxSlotsToPersistBlobs := types.Slot(maxEpochsToPersistBlobs.Mul(uint64(slotsPerEpoch)))
|
||||
return bytesutil.SlotToBytesBigEndian(slot.ModSlot(maxSlotsToPersistBlobs))
|
||||
}
|
||||
|
||||
func checkEpochsForBlobSidecarsRequestBucket(db *bolt.DB) error {
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket(epochsForBlobSidecarsRequestBucket)
|
||||
k := []byte("epoch-key")
|
||||
v := b.Get(k)
|
||||
if v == nil {
|
||||
if err := b.Put(k, bytesutil.Uint64ToBytesBigEndian(uint64(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest))); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
e := bytesutil.BytesToUint64BigEndian(v)
|
||||
if e != uint64(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest) {
|
||||
return fmt.Errorf("epochs for blobs request value in DB %d does not match config value %d", e, params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
321
beacon-chain/db/kv/blob_test.go
Normal file
321
beacon-chain/db/kv/blob_test.go
Normal file
@@ -0,0 +1,321 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assertions"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
func equalBlobSlices(expect []*ethpb.BlobSidecar, got []*ethpb.BlobSidecar) error {
|
||||
if len(expect) != len(got) {
|
||||
return fmt.Errorf("mismatched lengths, expect=%d, got=%d", len(expect), len(got))
|
||||
}
|
||||
for i := 0; i < len(expect); i++ {
|
||||
es := expect[i]
|
||||
gs := got[i]
|
||||
var e string
|
||||
assertions.DeepEqual(assertions.SprintfAssertionLoggerFn(&e), es, gs)
|
||||
if e != "" {
|
||||
return errors.New(e)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestStore_BlobSidecars(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, 0)
|
||||
require.ErrorContains(t, "nil or empty blob sidecars", db.SaveBlobSidecar(ctx, scs))
|
||||
})
|
||||
t.Run("empty by root", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
got, err := db.BlobSidecarsByRoot(ctx, [32]byte{})
|
||||
require.ErrorIs(t, ErrNotFound, err)
|
||||
require.Equal(t, 0, len(got))
|
||||
})
|
||||
t.Run("empty by slot", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
got, err := db.BlobSidecarsBySlot(ctx, 1)
|
||||
require.ErrorIs(t, ErrNotFound, err)
|
||||
require.Equal(t, 0, len(got))
|
||||
})
|
||||
t.Run("save and retrieve by root (one)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, 1)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, 1, len(scs))
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
})
|
||||
t.Run("save and retrieve by root (max)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
})
|
||||
t.Run("save and retrieve valid subset by root", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
|
||||
// we'll request indices 0 and 3, so make a slice with those indices for comparison
|
||||
expect := make([]*ethpb.BlobSidecar, 2)
|
||||
expect[0] = scs[0]
|
||||
expect[1] = scs[3]
|
||||
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot), 0, 3)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(expect, got))
|
||||
require.Equal(t, uint64(0), got[0].Index)
|
||||
require.Equal(t, uint64(3), got[1].Index)
|
||||
})
|
||||
t.Run("error for invalid index when retrieving by root", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot), uint64(len(scs)))
|
||||
require.ErrorIs(t, err, ErrNotFound)
|
||||
require.Equal(t, 0, len(got))
|
||||
})
|
||||
t.Run("save and retrieve by slot (one)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, 1)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, 1, len(scs))
|
||||
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
})
|
||||
t.Run("save and retrieve by slot (max)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
})
|
||||
t.Run("save and retrieve valid subset by slot", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
|
||||
// we'll request indices 0 and 3, so make a slice with those indices for comparison
|
||||
expect := make([]*ethpb.BlobSidecar, 2)
|
||||
expect[0] = scs[0]
|
||||
expect[1] = scs[3]
|
||||
|
||||
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot, 0, 3)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(expect, got))
|
||||
|
||||
require.Equal(t, uint64(0), got[0].Index)
|
||||
require.Equal(t, uint64(3), got[1].Index)
|
||||
})
|
||||
t.Run("error for invalid index when retrieving by slot", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
|
||||
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot, uint64(len(scs)))
|
||||
require.ErrorIs(t, err, ErrNotFound)
|
||||
require.Equal(t, 0, len(got))
|
||||
})
|
||||
t.Run("delete works", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
require.NoError(t, db.DeleteBlobSidecar(ctx, bytesutil.ToBytes32(scs[0].BlockRoot)))
|
||||
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.ErrorIs(t, ErrNotFound, err)
|
||||
require.Equal(t, 0, len(got))
|
||||
})
|
||||
t.Run("saving blob different times", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
|
||||
scs0 := scs[0:2]
|
||||
scs1 := scs[2:4]
|
||||
scs2 := scs[4:6]
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs0))
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs1))
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs2))
|
||||
|
||||
saved, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs0[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(saved, scs))
|
||||
})
|
||||
t.Run("saving a new blob for rotation", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
oldBlockRoot := scs[0].BlockRoot
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(oldBlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
|
||||
newScs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
newRetentionSlot := primitives.Slot(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest.Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
|
||||
for _, sc := range newScs {
|
||||
sc.Slot = sc.Slot + newRetentionSlot
|
||||
}
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, newScs))
|
||||
|
||||
_, err = db.BlobSidecarsBySlot(ctx, 100)
|
||||
require.ErrorIs(t, ErrNotFound, err)
|
||||
|
||||
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(newScs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(newScs, got))
|
||||
})
|
||||
}
|
||||
|
||||
func generateBlobSidecars(t *testing.T, n uint64) []*ethpb.BlobSidecar {
|
||||
blobSidecars := make([]*ethpb.BlobSidecar, n)
|
||||
for i := uint64(0); i < n; i++ {
|
||||
blobSidecars[i] = generateBlobSidecar(t, i)
|
||||
}
|
||||
return blobSidecars
|
||||
}
|
||||
|
||||
func generateBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSidecar {
|
||||
blob := make([]byte, 131072)
|
||||
_, err := rand.Read(blob)
|
||||
require.NoError(t, err)
|
||||
kzgCommitment := make([]byte, 48)
|
||||
_, err = rand.Read(kzgCommitment)
|
||||
require.NoError(t, err)
|
||||
kzgProof := make([]byte, 48)
|
||||
_, err = rand.Read(kzgProof)
|
||||
require.NoError(t, err)
|
||||
|
||||
return ðpb.BlobSidecar{
|
||||
BlockRoot: bytesutil.PadTo([]byte{'a'}, 32),
|
||||
Index: index,
|
||||
Slot: 100,
|
||||
BlockParentRoot: bytesutil.PadTo([]byte{'b'}, 32),
|
||||
ProposerIndex: 101,
|
||||
Blob: blob,
|
||||
KzgCommitment: kzgCommitment,
|
||||
KzgProof: kzgProof,
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_verifySideCars(t *testing.T) {
|
||||
s := setupDB(t)
|
||||
tests := []struct {
|
||||
name string
|
||||
scs []*ethpb.BlobSidecar
|
||||
error string
|
||||
}{
|
||||
{name: "empty", scs: []*ethpb.BlobSidecar{}, error: "nil or empty blob sidecars"},
|
||||
{name: "too many sidecars", scs: generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock+1), error: "too many sidecars: 7 > 6"},
|
||||
{name: "invalid slot", scs: []*ethpb.BlobSidecar{{Slot: 1}, {Slot: 2}}, error: "sidecar slot mismatch: 2 != 1"},
|
||||
{name: "invalid proposer index", scs: []*ethpb.BlobSidecar{{ProposerIndex: 1}, {ProposerIndex: 2}}, error: "sidecar proposer index mismatch: 2 != 1"},
|
||||
{name: "invalid root", scs: []*ethpb.BlobSidecar{{BlockRoot: []byte{1}}, {BlockRoot: []byte{2}}}, error: "sidecar root mismatch: 02 != 01"},
|
||||
{name: "invalid parent root", scs: []*ethpb.BlobSidecar{{BlockParentRoot: []byte{1}}, {BlockParentRoot: []byte{2}}}, error: "sidecar parent root mismatch: 02 != 01"},
|
||||
{name: "happy path", scs: []*ethpb.BlobSidecar{{Index: 0}, {Index: 1}}, error: ""},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := s.verifySideCars(tt.scs)
|
||||
if tt.error != "" {
|
||||
require.Equal(t, tt.error, err.Error())
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_sortSidecars(t *testing.T) {
|
||||
scs := []*ethpb.BlobSidecar{
|
||||
{Index: 6},
|
||||
{Index: 4},
|
||||
{Index: 2},
|
||||
{Index: 1},
|
||||
{Index: 3},
|
||||
{Index: 5},
|
||||
{},
|
||||
}
|
||||
sortSideCars(scs)
|
||||
for i := 0; i < len(scs)-1; i++ {
|
||||
require.Equal(t, uint64(i), scs[i].Index)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkStore_BlobSidecarsByRoot(b *testing.B) {
|
||||
s := setupDB(b)
|
||||
ctx := context.Background()
|
||||
require.NoError(b, s.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{
|
||||
{BlockRoot: bytesutil.PadTo([]byte{'a'}, 32), Slot: 0},
|
||||
}))
|
||||
|
||||
err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blobsBucket)
|
||||
for i := 1; i < 131071; i++ {
|
||||
r := make([]byte, 32)
|
||||
_, err := rand.Read(r)
|
||||
require.NoError(b, err)
|
||||
scs := []*ethpb.BlobSidecar{
|
||||
{BlockRoot: r, Slot: primitives.Slot(i)},
|
||||
}
|
||||
k := blobSidecarKey(scs[0])
|
||||
encodedBlobSidecar, err := encode(ctx, ðpb.BlobSidecars{Sidecars: scs})
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, bkt.Put(k, encodedBlobSidecar))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(b, err)
|
||||
|
||||
require.NoError(b, s.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{
|
||||
{BlockRoot: bytesutil.PadTo([]byte{'b'}, 32), Slot: 131071},
|
||||
}))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := s.BlobSidecarsByRoot(ctx, [32]byte{'b'})
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_checkEpochsForBlobSidecarsRequestBucket(t *testing.T) {
|
||||
dbStore := setupDB(t)
|
||||
|
||||
require.NoError(t, checkEpochsForBlobSidecarsRequestBucket(dbStore.db)) // First write
|
||||
require.NoError(t, checkEpochsForBlobSidecarsRequestBucket(dbStore.db)) // First check
|
||||
|
||||
nConfig := params.BeaconNetworkConfig()
|
||||
nConfig.MinEpochsForBlobsSidecarsRequest = 42069
|
||||
params.OverrideBeaconNetworkConfig(nConfig)
|
||||
require.ErrorContains(t, "epochs for blobs request value in DB 4096 does not match config value 42069", checkEpochsForBlobSidecarsRequestBucket(dbStore.db))
|
||||
}
|
||||
@@ -818,6 +818,16 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.ReadOnlySignedBea
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(capellaBlindKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal blinded Capella block")
|
||||
}
|
||||
case hasDenebKey(enc):
|
||||
rawBlock = ðpb.SignedBeaconBlockDeneb{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(denebKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal Deneb block")
|
||||
}
|
||||
case hasDenebBlindKey(enc):
|
||||
rawBlock = ðpb.SignedBlindedBeaconBlockDeneb{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(denebBlindKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal blinded Deneb block")
|
||||
}
|
||||
default:
|
||||
// Marshal block bytes to phase 0 beacon block.
|
||||
rawBlock = ðpb.SignedBeaconBlock{}
|
||||
@@ -854,6 +864,8 @@ func marshalBlockFull(
|
||||
return nil, err
|
||||
}
|
||||
switch blk.Version() {
|
||||
case version.Deneb:
|
||||
return snappy.Encode(nil, append(denebKey, encodedBlock...)), nil
|
||||
case version.Capella:
|
||||
return snappy.Encode(nil, append(capellaKey, encodedBlock...)), nil
|
||||
case version.Bellatrix:
|
||||
@@ -888,6 +900,8 @@ func marshalBlockBlinded(
|
||||
return nil, errors.Wrap(err, "could not marshal blinded block")
|
||||
}
|
||||
switch blk.Version() {
|
||||
case version.Deneb:
|
||||
return snappy.Encode(nil, append(denebBlindKey, encodedBlock...)), nil
|
||||
case version.Capella:
|
||||
return snappy.Encode(nil, append(capellaBlindKey, encodedBlock...)), nil
|
||||
case version.Bellatrix:
|
||||
|
||||
@@ -90,6 +90,40 @@ var blockTests = []struct {
|
||||
return blocks.NewSignedBeaconBlock(b)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "deneb",
|
||||
newBlock: func(slot primitives.Slot, root []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
b := util.NewBeaconBlockDeneb()
|
||||
b.Block.Slot = slot
|
||||
if root != nil {
|
||||
b.Block.ParentRoot = root
|
||||
b.Block.Body.BlobKzgCommitments = [][]byte{
|
||||
bytesutil.PadTo([]byte{0x01}, 48),
|
||||
bytesutil.PadTo([]byte{0x02}, 48),
|
||||
bytesutil.PadTo([]byte{0x03}, 48),
|
||||
bytesutil.PadTo([]byte{0x04}, 48),
|
||||
}
|
||||
}
|
||||
return blocks.NewSignedBeaconBlock(b)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "deneb blind",
|
||||
newBlock: func(slot primitives.Slot, root []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
b := util.NewBlindedBeaconBlockDeneb()
|
||||
b.Block.Slot = slot
|
||||
if root != nil {
|
||||
b.Block.ParentRoot = root
|
||||
b.Block.Body.BlobKzgCommitments = [][]byte{
|
||||
bytesutil.PadTo([]byte{0x05}, 48),
|
||||
bytesutil.PadTo([]byte{0x06}, 48),
|
||||
bytesutil.PadTo([]byte{0x07}, 48),
|
||||
bytesutil.PadTo([]byte{0x08}, 48),
|
||||
}
|
||||
}
|
||||
return blocks.NewSignedBeaconBlock(b)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestStore_SaveBackfillBlockRoot(t *testing.T) {
|
||||
@@ -359,6 +393,10 @@ func TestStore_BlocksCRUD_NoCache(t *testing.T) {
|
||||
wanted, err = blk.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := blk.PbDenebBlock(); err == nil {
|
||||
wanted, err = blk.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
wantedPb, err := wanted.Proto()
|
||||
require.NoError(t, err)
|
||||
retrievedPb, err := retrievedBlock.Proto()
|
||||
@@ -582,6 +620,10 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
|
||||
wanted, err = wanted.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := block1.PbDenebBlock(); err == nil {
|
||||
wanted, err = wanted.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
wantedPb, err := wanted.Proto()
|
||||
require.NoError(t, err)
|
||||
bPb, err := b.Proto()
|
||||
@@ -604,6 +646,10 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
|
||||
wanted2, err = block2.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := block2.PbDenebBlock(); err == nil {
|
||||
wanted2, err = block2.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
wanted2Pb, err := wanted2.Proto()
|
||||
require.NoError(t, err)
|
||||
bPb, err = b.Proto()
|
||||
@@ -626,6 +672,10 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
|
||||
wanted, err = wanted.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := block3.PbDenebBlock(); err == nil {
|
||||
wanted, err = wanted.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
wantedPb, err = wanted.Proto()
|
||||
require.NoError(t, err)
|
||||
bPb, err = b.Proto()
|
||||
@@ -666,6 +716,10 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
|
||||
wanted, err = block1.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := block1.PbDenebBlock(); err == nil {
|
||||
wanted, err = block1.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
wantedPb, err := wanted.Proto()
|
||||
require.NoError(t, err)
|
||||
bPb, err := b.Proto()
|
||||
@@ -687,6 +741,10 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
|
||||
wanted, err = genesisBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := genesisBlock.PbDenebBlock(); err == nil {
|
||||
wanted, err = genesisBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
wantedPb, err = wanted.Proto()
|
||||
require.NoError(t, err)
|
||||
bPb, err = b.Proto()
|
||||
@@ -708,6 +766,10 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
|
||||
wanted, err = genesisBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := genesisBlock.PbDenebBlock(); err == nil {
|
||||
wanted, err = genesisBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
wantedPb, err = wanted.Proto()
|
||||
require.NoError(t, err)
|
||||
bPb, err = b.Proto()
|
||||
@@ -808,6 +870,10 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
|
||||
wanted, err = b1.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := b1.PbDenebBlock(); err == nil {
|
||||
wanted, err = b1.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
retrieved0Pb, err := retrievedBlocks[0].Proto()
|
||||
require.NoError(t, err)
|
||||
wantedPb, err := wanted.Proto()
|
||||
@@ -828,6 +894,10 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
|
||||
wanted, err = b2.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := b2.PbDenebBlock(); err == nil {
|
||||
wanted, err = b2.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
retrieved0Pb, err = retrievedBlocks[0].Proto()
|
||||
require.NoError(t, err)
|
||||
wantedPb, err = wanted.Proto()
|
||||
@@ -842,6 +912,10 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
|
||||
wanted, err = b3.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := b3.PbDenebBlock(); err == nil {
|
||||
wanted, err = b3.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
retrieved1Pb, err := retrievedBlocks[1].Proto()
|
||||
require.NoError(t, err)
|
||||
wantedPb, err = wanted.Proto()
|
||||
|
||||
@@ -37,3 +37,17 @@ func hasCapellaBlindKey(enc []byte) bool {
|
||||
}
|
||||
return bytes.Equal(enc[:len(capellaBlindKey)], capellaBlindKey)
|
||||
}
|
||||
|
||||
func hasDenebKey(enc []byte) bool {
|
||||
if len(denebKey) >= len(enc) {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(enc[:len(denebKey)], denebKey)
|
||||
}
|
||||
|
||||
func hasDenebBlindKey(enc []byte) bool {
|
||||
if len(denebBlindKey) >= len(enc) {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(enc[:len(denebBlindKey)], denebBlindKey)
|
||||
}
|
||||
|
||||
@@ -129,6 +129,9 @@ var Buckets = [][]byte{
|
||||
|
||||
feeRecipientBucket,
|
||||
registrationBucket,
|
||||
|
||||
blobsBucket,
|
||||
epochsForBlobSidecarsRequestBucket,
|
||||
}
|
||||
|
||||
// NewKVStore initializes a new boltDB key-value store at the directory
|
||||
@@ -199,6 +202,11 @@ func NewKVStore(ctx context.Context, dirPath string) (*Store, error) {
|
||||
if err := kv.setupBlockStorageType(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := checkEpochsForBlobSidecarsRequestBucket(boltDB); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to check epochs for blob sidecars request bucket")
|
||||
}
|
||||
|
||||
return kv, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -46,6 +46,7 @@ var (
|
||||
finalizedCheckpointKey = []byte("finalized-checkpoint")
|
||||
powchainDataKey = []byte("powchain-data")
|
||||
lastValidatedCheckpointKey = []byte("last-validated-checkpoint")
|
||||
blobsBucket = []byte("blobs")
|
||||
|
||||
// Below keys are used to identify objects are to be fork compatible.
|
||||
// Objects that are only compatible with specific forks should be prefixed with such keys.
|
||||
@@ -55,6 +56,9 @@ var (
|
||||
capellaKey = []byte("capella")
|
||||
capellaBlindKey = []byte("blind-capella")
|
||||
saveBlindedBeaconBlocksKey = []byte("save-blinded-beacon-blocks")
|
||||
denebKey = []byte("deneb")
|
||||
denebBlindKey = []byte("blind-deneb")
|
||||
|
||||
// block root included in the beacon state used by weak subjectivity initial sync
|
||||
originCheckpointBlockRootKey = []byte("origin-checkpoint-block-root")
|
||||
// block root tracking the progress of backfill, or pointing at genesis if backfill has not been initiated
|
||||
@@ -70,4 +74,7 @@ var (
|
||||
|
||||
// Migrations
|
||||
migrationsBucket = []byte("migrations")
|
||||
|
||||
// Stores how long to keep the blob sidecars for.
|
||||
epochsForBlobSidecarsRequestBucket = []byte("epochs-for-blob-sidecars-request")
|
||||
)
|
||||
|
||||
@@ -229,34 +229,28 @@ func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, bl
|
||||
// look at issue https://github.com/prysmaticlabs/prysm/issues/9262.
|
||||
switch rawType := states[i].ToProtoUnsafe().(type) {
|
||||
case *ethpb.BeaconState:
|
||||
pbState, err := statenative.ProtobufBeaconStatePhase0(rawType)
|
||||
pbState, err := getPhase0PbState(rawType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
encodedState, err := encode(ctx, pbState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateAltair:
|
||||
pbState, err := statenative.ProtobufBeaconStateAltair(rawType)
|
||||
pbState, err := getAltairPbState(rawType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
@@ -272,13 +266,10 @@ func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, bl
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateBellatrix:
|
||||
pbState, err := statenative.ProtobufBeaconStateBellatrix(rawType)
|
||||
pbState, err := getBellatrixPbState(rawType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
@@ -294,12 +285,28 @@ func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, bl
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateCapella:
|
||||
pbState, err := statenative.ProtobufBeaconStateCapella(rawType)
|
||||
pbState, err := getCapellaPbState(rawType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encodedState := snappy.Encode(nil, append(capellaKey, rawObj...))
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateDeneb:
|
||||
pbState, err := getDenebPbState(rawType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
@@ -323,6 +330,61 @@ func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, bl
|
||||
return s.storeValidatorEntriesSeparately(ctx, tx, validatorsEntries)
|
||||
}
|
||||
|
||||
func getPhase0PbState(rawState interface{}) (*ethpb.BeaconState, error) {
|
||||
pbState, err := statenative.ProtobufBeaconStatePhase0(rawState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if pbState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
return pbState, nil
|
||||
}
|
||||
|
||||
func getAltairPbState(rawState interface{}) (*ethpb.BeaconStateAltair, error) {
|
||||
pbState, err := statenative.ProtobufBeaconStateAltair(rawState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if pbState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
return pbState, nil
|
||||
}
|
||||
|
||||
func getBellatrixPbState(rawState interface{}) (*ethpb.BeaconStateBellatrix, error) {
|
||||
pbState, err := statenative.ProtobufBeaconStateBellatrix(rawState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if pbState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
return pbState, nil
|
||||
}
|
||||
|
||||
func getCapellaPbState(rawState interface{}) (*ethpb.BeaconStateCapella, error) {
|
||||
pbState, err := statenative.ProtobufBeaconStateCapella(rawState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if pbState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
return pbState, nil
|
||||
}
|
||||
|
||||
func getDenebPbState(rawState interface{}) (*ethpb.BeaconStateDeneb, error) {
|
||||
pbState, err := statenative.ProtobufBeaconStateDeneb(rawState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if pbState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
return pbState, nil
|
||||
}
|
||||
|
||||
func (s *Store) storeValidatorEntriesSeparately(ctx context.Context, tx *bolt.Tx, validatorsEntries map[string]*ethpb.Validator) error {
|
||||
valBkt := tx.Bucket(stateValidatorsBucket)
|
||||
for hashStr, validatorEntry := range validatorsEntries {
|
||||
@@ -472,6 +534,19 @@ func (s *Store) unmarshalState(_ context.Context, enc []byte, validatorEntries [
|
||||
}
|
||||
|
||||
switch {
|
||||
case hasDenebKey(enc):
|
||||
protoState := ðpb.BeaconStateDeneb{}
|
||||
if err := protoState.UnmarshalSSZ(enc[len(denebKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal encoding for Deneb")
|
||||
}
|
||||
ok, err := s.isStateValidatorMigrationOver()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
protoState.Validators = validatorEntries
|
||||
}
|
||||
return statenative.InitializeFromProtoUnsafeDeneb(protoState)
|
||||
case hasCapellaKey(enc):
|
||||
// Marshal state bytes to capella beacon state.
|
||||
protoState := ðpb.BeaconStateCapella{}
|
||||
@@ -579,6 +654,19 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, append(capellaKey, rawObj...)), nil
|
||||
case *ethpb.BeaconStateDeneb:
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateDeneb)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
}
|
||||
if rState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
rawObj, err := rState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, append(denebKey, rawObj...)), nil
|
||||
default:
|
||||
return nil, errors.New("invalid inner state")
|
||||
}
|
||||
|
||||
@@ -900,6 +900,100 @@ func TestBellatrixState_CanDelete(t *testing.T) {
|
||||
require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil")
|
||||
}
|
||||
|
||||
func TestDenebState_CanSaveRetrieve(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
r := [32]byte{'A'}
|
||||
|
||||
require.Equal(t, false, db.HasState(context.Background(), r))
|
||||
|
||||
st, _ := util.DeterministicGenesisStateDeneb(t, 1)
|
||||
require.NoError(t, st.SetSlot(100))
|
||||
|
||||
require.NoError(t, db.SaveState(context.Background(), st, r))
|
||||
require.Equal(t, true, db.HasState(context.Background(), r))
|
||||
|
||||
savedS, err := db.State(context.Background(), r)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepSSZEqual(t, st.ToProtoUnsafe(), savedS.ToProtoUnsafe())
|
||||
|
||||
savedS, err = db.State(context.Background(), [32]byte{'B'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil")
|
||||
}
|
||||
|
||||
func TestDenebState_CanDelete(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
r := [32]byte{'A'}
|
||||
|
||||
require.Equal(t, false, db.HasState(context.Background(), r))
|
||||
|
||||
st, _ := util.DeterministicGenesisStateDeneb(t, 1)
|
||||
require.NoError(t, st.SetSlot(100))
|
||||
|
||||
require.NoError(t, db.SaveState(context.Background(), st, r))
|
||||
require.Equal(t, true, db.HasState(context.Background(), r))
|
||||
|
||||
require.NoError(t, db.DeleteState(context.Background(), r))
|
||||
savedS, err := db.State(context.Background(), r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil")
|
||||
}
|
||||
|
||||
func TestStateDeneb_CanSaveRetrieveValidatorEntries(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
// enable historical state representation flag to test this
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableHistoricalSpaceRepresentation: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
r := [32]byte{'A'}
|
||||
|
||||
require.Equal(t, false, db.HasState(context.Background(), r))
|
||||
|
||||
stateValidators := validators(10)
|
||||
st, _ := util.DeterministicGenesisStateDeneb(t, 20)
|
||||
require.NoError(t, st.SetSlot(100))
|
||||
require.NoError(t, st.SetValidators(stateValidators))
|
||||
|
||||
ctx := context.Background()
|
||||
require.NoError(t, db.SaveState(ctx, st, r))
|
||||
assert.Equal(t, true, db.HasState(context.Background(), r))
|
||||
|
||||
savedS, err := db.State(context.Background(), r)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepSSZEqual(t, st.Validators(), savedS.Validators(), "saved state with validators and retrieved state are not matching")
|
||||
|
||||
// check if the index of the second state is still present.
|
||||
err = db.db.Update(func(tx *bolt.Tx) error {
|
||||
idxBkt := tx.Bucket(blockRootValidatorHashesBucket)
|
||||
data := idxBkt.Get(r[:])
|
||||
require.NotEqual(t, 0, len(data))
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// check if all the validator entries are still intact in the validator entry bucket.
|
||||
err = db.db.Update(func(tx *bolt.Tx) error {
|
||||
valBkt := tx.Bucket(stateValidatorsBucket)
|
||||
// if any of the original validator entry is not present, then fail the test.
|
||||
for _, val := range stateValidators {
|
||||
hash, hashErr := val.HashTreeRoot()
|
||||
assert.NoError(t, hashErr)
|
||||
data := valBkt.Get(hash[:])
|
||||
require.NotNil(t, data)
|
||||
require.NotEqual(t, 0, len(data))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func BenchmarkState_CheckStateSaveTime_1(b *testing.B) { checkStateSaveTime(b, 1) }
|
||||
func BenchmarkState_CheckStateSaveTime_10(b *testing.B) { checkStateSaveTime(b, 10) }
|
||||
|
||||
|
||||
@@ -48,7 +48,6 @@ go_library(
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//io/logs:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/clientstats:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//network:go_default_library",
|
||||
@@ -72,6 +71,7 @@ go_library(
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_k8s_client_go//tools/cache:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -23,12 +23,12 @@ import (
|
||||
payloadattribute "github.com/prysmaticlabs/prysm/v4/consensus-types/payload-attribute"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/math"
|
||||
pb "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -50,14 +50,18 @@ const (
|
||||
NewPayloadMethod = "engine_newPayloadV1"
|
||||
// NewPayloadMethodV2 v2 request string for JSON-RPC.
|
||||
NewPayloadMethodV2 = "engine_newPayloadV2"
|
||||
NewPayloadMethodV3 = "engine_newPayloadV3"
|
||||
// ForkchoiceUpdatedMethod v1 request string for JSON-RPC.
|
||||
ForkchoiceUpdatedMethod = "engine_forkchoiceUpdatedV1"
|
||||
// ForkchoiceUpdatedMethodV2 v2 request string for JSON-RPC.
|
||||
ForkchoiceUpdatedMethodV2 = "engine_forkchoiceUpdatedV2"
|
||||
// ForkchoiceUpdatedMethodV3 v3 request string for JSON-RPC.
|
||||
ForkchoiceUpdatedMethodV3 = "engine_forkchoiceUpdatedV3"
|
||||
// GetPayloadMethod v1 request string for JSON-RPC.
|
||||
GetPayloadMethod = "engine_getPayloadV1"
|
||||
// GetPayloadMethodV2 v2 request string for JSON-RPC.
|
||||
GetPayloadMethodV2 = "engine_getPayloadV2"
|
||||
GetPayloadMethodV3 = "engine_getPayloadV3"
|
||||
// ExchangeTransitionConfigurationMethod v1 request string for JSON-RPC.
|
||||
ExchangeTransitionConfigurationMethod = "engine_exchangeTransitionConfigurationV1"
|
||||
// ExecutionBlockByHashMethod request string for JSON-RPC.
|
||||
@@ -96,11 +100,11 @@ type ExecutionPayloadReconstructor interface {
|
||||
// EngineCaller defines a client that can interact with an Ethereum
|
||||
// execution node's engine service via JSON-RPC.
|
||||
type EngineCaller interface {
|
||||
NewPayload(ctx context.Context, payload interfaces.ExecutionData) ([]byte, error)
|
||||
NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes []common.Hash, parentBlockRoot *common.Hash) ([]byte, error)
|
||||
ForkchoiceUpdated(
|
||||
ctx context.Context, state *pb.ForkchoiceState, attrs payloadattribute.Attributer,
|
||||
) (*pb.PayloadIDBytes, []byte, error)
|
||||
GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (interfaces.ExecutionData, error)
|
||||
GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (interfaces.ExecutionData, *pb.BlobsBundle, bool, error)
|
||||
ExchangeTransitionConfiguration(
|
||||
ctx context.Context, cfg *pb.TransitionConfiguration,
|
||||
) error
|
||||
@@ -111,7 +115,7 @@ type EngineCaller interface {
|
||||
var EmptyBlockHash = errors.New("Block hash is empty 0x0000...")
|
||||
|
||||
// NewPayload calls the engine_newPayloadVX method via JSON-RPC.
|
||||
func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionData) ([]byte, error) {
|
||||
func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes []common.Hash, parentBlockRoot *common.Hash) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.NewPayload")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
@@ -143,6 +147,15 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
case *pb.ExecutionPayloadDeneb:
|
||||
payloadPb, ok := payload.Proto().(*pb.ExecutionPayloadDeneb)
|
||||
if !ok {
|
||||
return nil, errors.New("execution data must be a Deneb execution payload")
|
||||
}
|
||||
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethodV3, payloadPb, versionedHashes, parentBlockRoot)
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("unknown execution data type")
|
||||
}
|
||||
@@ -199,6 +212,15 @@ func (s *Service) ForkchoiceUpdated(
|
||||
if err != nil {
|
||||
return nil, nil, handleRPCError(err)
|
||||
}
|
||||
case version.Deneb:
|
||||
a, err := attrs.PbV3()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
err = s.rpcClient.CallContext(ctx, result, ForkchoiceUpdatedMethodV3, state, a)
|
||||
if err != nil {
|
||||
return nil, nil, handleRPCError(err)
|
||||
}
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("unknown payload attribute version: %v", attrs.Version())
|
||||
}
|
||||
@@ -220,7 +242,8 @@ func (s *Service) ForkchoiceUpdated(
|
||||
}
|
||||
|
||||
// GetPayload calls the engine_getPayloadVX method via JSON-RPC.
|
||||
func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (interfaces.ExecutionData, error) {
|
||||
// It returns the execution data as well as the blobs bundle.
|
||||
func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (interfaces.ExecutionData, *pb.BlobsBundle, bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetPayload")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
@@ -232,23 +255,42 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primit
|
||||
ctx, cancel := context.WithDeadline(ctx, d)
|
||||
defer cancel()
|
||||
|
||||
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch {
|
||||
result := &pb.ExecutionPayloadDenebWithValueAndBlobsBundle{}
|
||||
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethodV3, pb.PayloadIDBytes(payloadId))
|
||||
if err != nil {
|
||||
return nil, nil, false, handleRPCError(err)
|
||||
}
|
||||
ed, err := blocks.WrappedExecutionPayloadDeneb(result.Payload, blocks.PayloadValueToGwei(result.Value))
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
return ed, result.BlobsBundle, result.ShouldOverrideBuilder, nil
|
||||
}
|
||||
|
||||
if slots.ToEpoch(slot) >= params.BeaconConfig().CapellaForkEpoch {
|
||||
result := &pb.ExecutionPayloadCapellaWithValue{}
|
||||
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethodV2, pb.PayloadIDBytes(payloadId))
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
return nil, nil, false, handleRPCError(err)
|
||||
}
|
||||
|
||||
v := big.NewInt(0).SetBytes(bytesutil.ReverseByteOrder(result.Value))
|
||||
return blocks.WrappedExecutionPayloadCapella(result.Payload, math.WeiToGwei(v))
|
||||
ed, err := blocks.WrappedExecutionPayloadCapella(result.Payload, blocks.PayloadValueToGwei(result.Value))
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
return ed, nil, false, nil
|
||||
}
|
||||
|
||||
result := &pb.ExecutionPayload{}
|
||||
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethod, pb.PayloadIDBytes(payloadId))
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
return nil, nil, false, handleRPCError(err)
|
||||
}
|
||||
return blocks.WrappedExecutionPayload(result)
|
||||
ed, err := blocks.WrappedExecutionPayload(result)
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
return ed, nil, false, nil
|
||||
}
|
||||
|
||||
// ExchangeTransitionConfiguration calls the engine_exchangeTransitionConfigurationV1 method via JSON-RPC.
|
||||
@@ -546,7 +588,10 @@ func (s *Service) ReconstructFullBlock(
|
||||
// If the payload header has a block hash of 0x0, it means we are pre-merge and should
|
||||
// simply return the block with an empty execution payload.
|
||||
if bytes.Equal(header.BlockHash(), params.BeaconConfig().ZeroHash[:]) {
|
||||
payload := buildEmptyExecutionPayload()
|
||||
payload, err := buildEmptyExecutionPayload(blindedBlock.Version())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blocks.BuildSignedBeaconBlockFromExecutionPayload(blindedBlock, payload)
|
||||
}
|
||||
|
||||
@@ -605,7 +650,11 @@ func (s *Service) ReconstructFullBellatrixBlockBatch(
|
||||
// For blocks that are pre-merge we simply reconstruct them via an empty
|
||||
// execution payload.
|
||||
for _, realIdx := range zeroExecPayloads {
|
||||
payload := buildEmptyExecutionPayload()
|
||||
bblock := blindedBlocks[realIdx]
|
||||
payload, err := buildEmptyExecutionPayload(bblock.Version())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullBlock, err := blocks.BuildSignedBeaconBlockFromExecutionPayload(blindedBlocks[realIdx], payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -641,7 +690,7 @@ func (s *Service) retrievePayloadFromExecutionHash(ctx context.Context, executio
|
||||
}
|
||||
|
||||
executionBlock.Version = version
|
||||
return fullPayloadFromExecutionBlock(header, executionBlock)
|
||||
return fullPayloadFromExecutionBlock(version, header, executionBlock)
|
||||
}
|
||||
|
||||
func (s *Service) retrievePayloadsFromExecutionHashes(
|
||||
@@ -669,6 +718,7 @@ func (s *Service) retrievePayloadsFromExecutionHashes(
|
||||
// blinded block.
|
||||
for sliceIdx, realIdx := range validExecPayloads {
|
||||
var payload interfaces.ExecutionData
|
||||
bblock := blindedBlocks[realIdx]
|
||||
if features.Get().EnableOptionalEngineMethods {
|
||||
b := payloadBodies[sliceIdx]
|
||||
if b == nil {
|
||||
@@ -691,7 +741,7 @@ func (s *Service) retrievePayloadsFromExecutionHashes(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payload, err = fullPayloadFromExecutionBlock(header, b)
|
||||
payload, err = fullPayloadFromExecutionBlock(bblock.Version(), header, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -706,7 +756,7 @@ func (s *Service) retrievePayloadsFromExecutionHashes(
|
||||
}
|
||||
|
||||
func fullPayloadFromExecutionBlock(
|
||||
header interfaces.ExecutionData, block *pb.ExecutionBlock,
|
||||
blockVersion int, header interfaces.ExecutionData, block *pb.ExecutionBlock,
|
||||
) (interfaces.ExecutionData, error) {
|
||||
if header.IsNil() || block == nil {
|
||||
return nil, errors.New("execution block and header cannot be nil")
|
||||
@@ -729,7 +779,8 @@ func fullPayloadFromExecutionBlock(
|
||||
txs[i] = txBin
|
||||
}
|
||||
|
||||
if block.Version == version.Bellatrix {
|
||||
switch blockVersion {
|
||||
case version.Bellatrix:
|
||||
return blocks.WrappedExecutionPayload(&pb.ExecutionPayload{
|
||||
ParentHash: header.ParentHash(),
|
||||
FeeRecipient: header.FeeRecipient(),
|
||||
@@ -746,24 +797,56 @@ func fullPayloadFromExecutionBlock(
|
||||
BlockHash: blockHash[:],
|
||||
Transactions: txs,
|
||||
})
|
||||
case version.Capella:
|
||||
return blocks.WrappedExecutionPayloadCapella(&pb.ExecutionPayloadCapella{
|
||||
ParentHash: header.ParentHash(),
|
||||
FeeRecipient: header.FeeRecipient(),
|
||||
StateRoot: header.StateRoot(),
|
||||
ReceiptsRoot: header.ReceiptsRoot(),
|
||||
LogsBloom: header.LogsBloom(),
|
||||
PrevRandao: header.PrevRandao(),
|
||||
BlockNumber: header.BlockNumber(),
|
||||
GasLimit: header.GasLimit(),
|
||||
GasUsed: header.GasUsed(),
|
||||
Timestamp: header.Timestamp(),
|
||||
ExtraData: header.ExtraData(),
|
||||
BaseFeePerGas: header.BaseFeePerGas(),
|
||||
BlockHash: blockHash[:],
|
||||
Transactions: txs,
|
||||
Withdrawals: block.Withdrawals,
|
||||
}, 0) // We can't get the block value and don't care about the block value for this instance
|
||||
case version.Deneb:
|
||||
ebg, err := header.ExcessBlobGas()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to extract ExcessBlobGas attribute from excution payload header")
|
||||
}
|
||||
bgu, err := header.BlobGasUsed()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to extract BlobGasUsed attribute from excution payload header")
|
||||
}
|
||||
return blocks.WrappedExecutionPayloadDeneb(
|
||||
&pb.ExecutionPayloadDeneb{
|
||||
ParentHash: header.ParentHash(),
|
||||
FeeRecipient: header.FeeRecipient(),
|
||||
StateRoot: header.StateRoot(),
|
||||
ReceiptsRoot: header.ReceiptsRoot(),
|
||||
LogsBloom: header.LogsBloom(),
|
||||
PrevRandao: header.PrevRandao(),
|
||||
BlockNumber: header.BlockNumber(),
|
||||
GasLimit: header.GasLimit(),
|
||||
GasUsed: header.GasUsed(),
|
||||
Timestamp: header.Timestamp(),
|
||||
ExtraData: header.ExtraData(),
|
||||
BaseFeePerGas: header.BaseFeePerGas(),
|
||||
BlockHash: blockHash[:],
|
||||
Transactions: txs,
|
||||
Withdrawals: block.Withdrawals,
|
||||
ExcessBlobGas: ebg,
|
||||
BlobGasUsed: bgu,
|
||||
}, 0) // We can't get the block value and don't care about the block value for this instance
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown execution block version %d", block.Version)
|
||||
}
|
||||
return blocks.WrappedExecutionPayloadCapella(&pb.ExecutionPayloadCapella{
|
||||
ParentHash: header.ParentHash(),
|
||||
FeeRecipient: header.FeeRecipient(),
|
||||
StateRoot: header.StateRoot(),
|
||||
ReceiptsRoot: header.ReceiptsRoot(),
|
||||
LogsBloom: header.LogsBloom(),
|
||||
PrevRandao: header.PrevRandao(),
|
||||
BlockNumber: header.BlockNumber(),
|
||||
GasLimit: header.GasLimit(),
|
||||
GasUsed: header.GasUsed(),
|
||||
Timestamp: header.Timestamp(),
|
||||
ExtraData: header.ExtraData(),
|
||||
BaseFeePerGas: header.BaseFeePerGas(),
|
||||
BlockHash: blockHash[:],
|
||||
Transactions: txs,
|
||||
Withdrawals: block.Withdrawals,
|
||||
}, 0) // We can't get the block value and don't care about the block value for this instance
|
||||
}
|
||||
|
||||
func fullPayloadFromPayloadBody(
|
||||
@@ -773,7 +856,8 @@ func fullPayloadFromPayloadBody(
|
||||
return nil, errors.New("execution block and header cannot be nil")
|
||||
}
|
||||
|
||||
if bVersion == version.Bellatrix {
|
||||
switch bVersion {
|
||||
case version.Bellatrix:
|
||||
return blocks.WrappedExecutionPayload(&pb.ExecutionPayload{
|
||||
ParentHash: header.ParentHash(),
|
||||
FeeRecipient: header.FeeRecipient(),
|
||||
@@ -790,24 +874,56 @@ func fullPayloadFromPayloadBody(
|
||||
BlockHash: header.BlockHash(),
|
||||
Transactions: body.Transactions,
|
||||
})
|
||||
case version.Capella:
|
||||
return blocks.WrappedExecutionPayloadCapella(&pb.ExecutionPayloadCapella{
|
||||
ParentHash: header.ParentHash(),
|
||||
FeeRecipient: header.FeeRecipient(),
|
||||
StateRoot: header.StateRoot(),
|
||||
ReceiptsRoot: header.ReceiptsRoot(),
|
||||
LogsBloom: header.LogsBloom(),
|
||||
PrevRandao: header.PrevRandao(),
|
||||
BlockNumber: header.BlockNumber(),
|
||||
GasLimit: header.GasLimit(),
|
||||
GasUsed: header.GasUsed(),
|
||||
Timestamp: header.Timestamp(),
|
||||
ExtraData: header.ExtraData(),
|
||||
BaseFeePerGas: header.BaseFeePerGas(),
|
||||
BlockHash: header.BlockHash(),
|
||||
Transactions: body.Transactions,
|
||||
Withdrawals: body.Withdrawals,
|
||||
}, 0) // We can't get the block value and don't care about the block value for this instance
|
||||
case version.Deneb:
|
||||
ebg, err := header.ExcessBlobGas()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to extract ExcessBlobGas attribute from excution payload header")
|
||||
}
|
||||
bgu, err := header.BlobGasUsed()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to extract BlobGasUsed attribute from excution payload header")
|
||||
}
|
||||
return blocks.WrappedExecutionPayloadDeneb(
|
||||
&pb.ExecutionPayloadDeneb{
|
||||
ParentHash: header.ParentHash(),
|
||||
FeeRecipient: header.FeeRecipient(),
|
||||
StateRoot: header.StateRoot(),
|
||||
ReceiptsRoot: header.ReceiptsRoot(),
|
||||
LogsBloom: header.LogsBloom(),
|
||||
PrevRandao: header.PrevRandao(),
|
||||
BlockNumber: header.BlockNumber(),
|
||||
GasLimit: header.GasLimit(),
|
||||
GasUsed: header.GasUsed(),
|
||||
Timestamp: header.Timestamp(),
|
||||
ExtraData: header.ExtraData(),
|
||||
BaseFeePerGas: header.BaseFeePerGas(),
|
||||
BlockHash: header.BlockHash(),
|
||||
Transactions: body.Transactions,
|
||||
Withdrawals: body.Withdrawals,
|
||||
ExcessBlobGas: ebg,
|
||||
BlobGasUsed: bgu,
|
||||
}, 0) // We can't get the block value and don't care about the block value for this instance
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown execution block version for payload %d", bVersion)
|
||||
}
|
||||
return blocks.WrappedExecutionPayloadCapella(&pb.ExecutionPayloadCapella{
|
||||
ParentHash: header.ParentHash(),
|
||||
FeeRecipient: header.FeeRecipient(),
|
||||
StateRoot: header.StateRoot(),
|
||||
ReceiptsRoot: header.ReceiptsRoot(),
|
||||
LogsBloom: header.LogsBloom(),
|
||||
PrevRandao: header.PrevRandao(),
|
||||
BlockNumber: header.BlockNumber(),
|
||||
GasLimit: header.GasLimit(),
|
||||
GasUsed: header.GasUsed(),
|
||||
Timestamp: header.Timestamp(),
|
||||
ExtraData: header.ExtraData(),
|
||||
BaseFeePerGas: header.BaseFeePerGas(),
|
||||
BlockHash: header.BlockHash(),
|
||||
Transactions: body.Transactions,
|
||||
Withdrawals: body.Withdrawals,
|
||||
}, 0) // We can't get the block value and don't care about the block value for this instance
|
||||
}
|
||||
|
||||
// Handles errors received from the RPC server according to the specification.
|
||||
@@ -895,18 +1011,51 @@ func tDStringToUint256(td string) (*uint256.Int, error) {
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func buildEmptyExecutionPayload() *pb.ExecutionPayload {
|
||||
return &pb.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
ExtraData: make([]byte, 0),
|
||||
func buildEmptyExecutionPayload(v int) (proto.Message, error) {
|
||||
switch v {
|
||||
case version.Bellatrix:
|
||||
return &pb.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
ExtraData: make([]byte, 0),
|
||||
}, nil
|
||||
case version.Capella:
|
||||
return &pb.ExecutionPayloadCapella{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
ExtraData: make([]byte, 0),
|
||||
Withdrawals: make([]*pb.Withdrawal, 0),
|
||||
}, nil
|
||||
case version.Deneb:
|
||||
return &pb.ExecutionPayloadDeneb{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
ExtraData: make([]byte, 0),
|
||||
Withdrawals: make([]*pb.Withdrawal, 0),
|
||||
}, nil
|
||||
default:
|
||||
return nil, errors.Wrapf(ErrUnsupportedVersion, "version=%s", version.String(v))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -75,8 +75,9 @@ func TestClient_IPC(t *testing.T) {
|
||||
want, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
payloadId := [8]byte{1}
|
||||
resp, err := srv.GetPayload(ctx, payloadId, 1)
|
||||
resp, _, override, err := srv.GetPayload(ctx, payloadId, 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, override)
|
||||
resPb, err := resp.PbBellatrix()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resPb)
|
||||
@@ -85,8 +86,9 @@ func TestClient_IPC(t *testing.T) {
|
||||
want, ok := fix["ExecutionPayloadCapellaWithValue"].(*pb.ExecutionPayloadCapellaWithValue)
|
||||
require.Equal(t, true, ok)
|
||||
payloadId := [8]byte{1}
|
||||
resp, err := srv.GetPayload(ctx, payloadId, params.BeaconConfig().SlotsPerEpoch)
|
||||
resp, _, override, err := srv.GetPayload(ctx, payloadId, params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, override)
|
||||
resPb, err := resp.PbCapella()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resPb)
|
||||
@@ -118,7 +120,7 @@ func TestClient_IPC(t *testing.T) {
|
||||
require.Equal(t, true, ok)
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(req)
|
||||
require.NoError(t, err)
|
||||
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload)
|
||||
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bytesutil.ToBytes32(want.LatestValidHash), bytesutil.ToBytes32(latestValidHash))
|
||||
})
|
||||
@@ -129,7 +131,7 @@ func TestClient_IPC(t *testing.T) {
|
||||
require.Equal(t, true, ok)
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(req, 0)
|
||||
require.NoError(t, err)
|
||||
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload)
|
||||
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bytesutil.ToBytes32(want.LatestValidHash), bytesutil.ToBytes32(latestValidHash))
|
||||
})
|
||||
@@ -163,6 +165,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
t.Run(GetPayloadMethod, func(t *testing.T) {
|
||||
@@ -203,8 +206,9 @@ func TestClient_HTTP(t *testing.T) {
|
||||
client.rpcClient = rpcClient
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := client.GetPayload(ctx, payloadId, 1)
|
||||
resp, _, override, err := client.GetPayload(ctx, payloadId, 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, override)
|
||||
pb, err := resp.PbBellatrix()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, pb)
|
||||
@@ -247,8 +251,9 @@ func TestClient_HTTP(t *testing.T) {
|
||||
client.rpcClient = rpcClient
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := client.GetPayload(ctx, payloadId, params.BeaconConfig().SlotsPerEpoch)
|
||||
resp, _, override, err := client.GetPayload(ctx, payloadId, params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, override)
|
||||
pb, err := resp.PbCapella()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.ExecutionPayload.BlockHash.Bytes(), pb.BlockHash)
|
||||
@@ -262,6 +267,61 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1236), v)
|
||||
})
|
||||
t.Run(GetPayloadMethodV3, func(t *testing.T) {
|
||||
payloadId := [8]byte{1}
|
||||
want, ok := fix["ExecutionPayloadDenebWithValue"].(*pb.GetPayloadV3ResponseJson)
|
||||
require.Equal(t, true, ok)
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
enc, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
|
||||
reqArg, err := json.Marshal(pb.PayloadIDBytes(payloadId))
|
||||
require.NoError(t, err)
|
||||
|
||||
// We expect the JSON string RPC request contains the right arguments.
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, string(reqArg),
|
||||
))
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": want,
|
||||
}
|
||||
err = json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
client := &Service{}
|
||||
client.rpcClient = rpcClient
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, blobsBundle, override, err := client.GetPayload(ctx, payloadId, 2*params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, override)
|
||||
g, err := resp.ExcessBlobGas()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, uint64(3), g)
|
||||
g, err = resp.BlobGasUsed()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, uint64(2), g)
|
||||
|
||||
commitments := [][]byte{bytesutil.PadTo([]byte("commitment1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("commitment2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, commitments, blobsBundle.KzgCommitments)
|
||||
proofs := [][]byte{bytesutil.PadTo([]byte("proof1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("proof2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, proofs, blobsBundle.Proofs)
|
||||
blobs := [][]byte{bytesutil.PadTo([]byte("a"), fieldparams.BlobLength), bytesutil.PadTo([]byte("b"), fieldparams.BlobLength)}
|
||||
require.DeepEqual(t, blobs, blobsBundle.Blobs)
|
||||
})
|
||||
t.Run(ForkchoiceUpdatedMethod+" VALID status", func(t *testing.T) {
|
||||
forkChoiceState := &pb.ForkchoiceState{
|
||||
HeadBlockHash: []byte("head"),
|
||||
@@ -412,7 +472,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
@@ -426,7 +486,21 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
t.Run(NewPayloadMethodV3+" VALID status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayloadDeneb"].(*pb.ExecutionPayloadDeneb)
|
||||
require.Equal(t, true, ok)
|
||||
want, ok := fix["ValidPayloadStatus"].(*pb.PayloadStatus)
|
||||
require.Equal(t, true, ok)
|
||||
client := newPayloadV3Setup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
@@ -440,7 +514,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
|
||||
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -454,7 +528,21 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
|
||||
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(NewPayloadMethodV3+" SYNCING status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayloadDeneb"].(*pb.ExecutionPayloadDeneb)
|
||||
require.Equal(t, true, ok)
|
||||
want, ok := fix["SyncingStatus"].(*pb.PayloadStatus)
|
||||
require.Equal(t, true, ok)
|
||||
client := newPayloadV3Setup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
|
||||
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -468,7 +556,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
|
||||
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -482,7 +570,21 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
|
||||
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(NewPayloadMethodV3+" INVALID_BLOCK_HASH status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayloadDeneb"].(*pb.ExecutionPayloadDeneb)
|
||||
require.Equal(t, true, ok)
|
||||
want, ok := fix["InvalidBlockHashStatus"].(*pb.PayloadStatus)
|
||||
require.Equal(t, true, ok)
|
||||
client := newPayloadV3Setup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
|
||||
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -496,7 +598,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
|
||||
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
@@ -510,7 +612,21 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
|
||||
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
t.Run(NewPayloadMethodV3+" INVALID status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayloadDeneb"].(*pb.ExecutionPayloadDeneb)
|
||||
require.Equal(t, true, ok)
|
||||
want, ok := fix["InvalidStatus"].(*pb.PayloadStatus)
|
||||
require.Equal(t, true, ok)
|
||||
client := newPayloadV3Setup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
|
||||
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
@@ -524,7 +640,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
|
||||
require.ErrorIs(t, ErrUnknownPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -1306,6 +1422,25 @@ func fixtures() map[string]interface{} {
|
||||
Transactions: [][]byte{foo[:]},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
}
|
||||
executionPayloadFixtureDeneb := &pb.ExecutionPayloadDeneb{
|
||||
ParentHash: foo[:],
|
||||
FeeRecipient: bar,
|
||||
StateRoot: foo[:],
|
||||
ReceiptsRoot: foo[:],
|
||||
LogsBloom: baz,
|
||||
PrevRandao: foo[:],
|
||||
BlockNumber: 1,
|
||||
GasLimit: 1,
|
||||
GasUsed: 1,
|
||||
Timestamp: 1,
|
||||
ExtraData: foo[:],
|
||||
BaseFeePerGas: bytesutil.PadTo(baseFeePerGas.Bytes(), fieldparams.RootLength),
|
||||
BlockHash: foo[:],
|
||||
Transactions: [][]byte{foo[:]},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
BlobGasUsed: 2,
|
||||
ExcessBlobGas: 3,
|
||||
}
|
||||
hexUint := hexutil.Uint64(1)
|
||||
executionPayloadWithValueFixtureCapella := &pb.GetPayloadV2ResponseJson{
|
||||
ExecutionPayload: &pb.ExecutionPayloadCapellaJSON{
|
||||
@@ -1326,6 +1461,35 @@ func fixtures() map[string]interface{} {
|
||||
},
|
||||
BlockValue: "0x11fffffffff",
|
||||
}
|
||||
bgu := hexutil.Uint64(2)
|
||||
ebg := hexutil.Uint64(3)
|
||||
executionPayloadWithValueFixtureDeneb := &pb.GetPayloadV3ResponseJson{
|
||||
ShouldOverrideBuilder: true,
|
||||
ExecutionPayload: &pb.ExecutionPayloadDenebJSON{
|
||||
ParentHash: &common.Hash{'a'},
|
||||
FeeRecipient: &common.Address{'b'},
|
||||
StateRoot: &common.Hash{'c'},
|
||||
ReceiptsRoot: &common.Hash{'d'},
|
||||
LogsBloom: &hexutil.Bytes{'e'},
|
||||
PrevRandao: &common.Hash{'f'},
|
||||
BaseFeePerGas: "0x123",
|
||||
BlockHash: &common.Hash{'g'},
|
||||
Transactions: []hexutil.Bytes{{'h'}},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
BlockNumber: &hexUint,
|
||||
GasLimit: &hexUint,
|
||||
GasUsed: &hexUint,
|
||||
Timestamp: &hexUint,
|
||||
BlobGasUsed: &bgu,
|
||||
ExcessBlobGas: &ebg,
|
||||
},
|
||||
BlockValue: "0x11fffffffff",
|
||||
BlobsBundle: &pb.BlobBundleJSON{
|
||||
Commitments: []hexutil.Bytes{[]byte("commitment1"), []byte("commitment2")},
|
||||
Proofs: []hexutil.Bytes{[]byte("proof1"), []byte("proof2")},
|
||||
Blobs: []hexutil.Bytes{{'a'}, {'b'}},
|
||||
},
|
||||
}
|
||||
parent := bytesutil.PadTo([]byte("parentHash"), fieldparams.RootLength)
|
||||
sha3Uncles := bytesutil.PadTo([]byte("sha3Uncles"), fieldparams.RootLength)
|
||||
miner := bytesutil.PadTo([]byte("miner"), fieldparams.FeeRecipientLength)
|
||||
@@ -1421,7 +1585,9 @@ func fixtures() map[string]interface{} {
|
||||
"ExecutionBlock": executionBlock,
|
||||
"ExecutionPayload": executionPayloadFixture,
|
||||
"ExecutionPayloadCapella": executionPayloadFixtureCapella,
|
||||
"ExecutionPayloadDeneb": executionPayloadFixtureDeneb,
|
||||
"ExecutionPayloadCapellaWithValue": executionPayloadWithValueFixtureCapella,
|
||||
"ExecutionPayloadDenebWithValue": executionPayloadWithValueFixtureDeneb,
|
||||
"ValidPayloadStatus": validStatus,
|
||||
"InvalidBlockHashStatus": inValidBlockHashStatus,
|
||||
"AcceptedStatus": acceptedStatus,
|
||||
@@ -1438,8 +1604,9 @@ func fixtures() map[string]interface{} {
|
||||
|
||||
func Test_fullPayloadFromExecutionBlock(t *testing.T) {
|
||||
type args struct {
|
||||
header *pb.ExecutionPayloadHeader
|
||||
block *pb.ExecutionBlock
|
||||
header *pb.ExecutionPayloadHeader
|
||||
block *pb.ExecutionBlock
|
||||
version int
|
||||
}
|
||||
wantedHash := common.BytesToHash([]byte("foo"))
|
||||
tests := []struct {
|
||||
@@ -1455,9 +1622,9 @@ func Test_fullPayloadFromExecutionBlock(t *testing.T) {
|
||||
BlockHash: []byte("foo"),
|
||||
},
|
||||
block: &pb.ExecutionBlock{
|
||||
Version: version.Bellatrix,
|
||||
Hash: common.BytesToHash([]byte("bar")),
|
||||
Hash: common.BytesToHash([]byte("bar")),
|
||||
},
|
||||
version: version.Bellatrix,
|
||||
},
|
||||
err: "does not match execution block hash",
|
||||
},
|
||||
@@ -1468,9 +1635,9 @@ func Test_fullPayloadFromExecutionBlock(t *testing.T) {
|
||||
BlockHash: wantedHash[:],
|
||||
},
|
||||
block: &pb.ExecutionBlock{
|
||||
Version: version.Bellatrix,
|
||||
Hash: wantedHash,
|
||||
Hash: wantedHash,
|
||||
},
|
||||
version: version.Bellatrix,
|
||||
},
|
||||
want: func() interfaces.ExecutionData {
|
||||
p, err := blocks.WrappedExecutionPayload(&pb.ExecutionPayload{
|
||||
@@ -1486,7 +1653,129 @@ func Test_fullPayloadFromExecutionBlock(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
wrapped, err := blocks.WrappedExecutionPayloadHeader(tt.args.header)
|
||||
require.NoError(t, err)
|
||||
got, err := fullPayloadFromExecutionBlock(wrapped, tt.args.block)
|
||||
got, err := fullPayloadFromExecutionBlock(tt.args.version, wrapped, tt.args.block)
|
||||
if err != nil {
|
||||
assert.ErrorContains(t, tt.err, err)
|
||||
} else {
|
||||
assert.DeepEqual(t, tt.want(), got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_fullPayloadFromExecutionBlockCapella(t *testing.T) {
|
||||
type args struct {
|
||||
header *pb.ExecutionPayloadHeaderCapella
|
||||
block *pb.ExecutionBlock
|
||||
version int
|
||||
}
|
||||
wantedHash := common.BytesToHash([]byte("foo"))
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want func() interfaces.ExecutionData
|
||||
err string
|
||||
}{
|
||||
{
|
||||
name: "block hash field in header and block hash mismatch",
|
||||
args: args{
|
||||
header: &pb.ExecutionPayloadHeaderCapella{
|
||||
BlockHash: []byte("foo"),
|
||||
},
|
||||
block: &pb.ExecutionBlock{
|
||||
Hash: common.BytesToHash([]byte("bar")),
|
||||
},
|
||||
version: version.Capella,
|
||||
},
|
||||
err: "does not match execution block hash",
|
||||
},
|
||||
{
|
||||
name: "ok",
|
||||
args: args{
|
||||
header: &pb.ExecutionPayloadHeaderCapella{
|
||||
BlockHash: wantedHash[:],
|
||||
},
|
||||
block: &pb.ExecutionBlock{
|
||||
Hash: wantedHash,
|
||||
},
|
||||
version: version.Capella,
|
||||
},
|
||||
want: func() interfaces.ExecutionData {
|
||||
p, err := blocks.WrappedExecutionPayloadCapella(&pb.ExecutionPayloadCapella{
|
||||
BlockHash: wantedHash[:],
|
||||
Transactions: [][]byte{},
|
||||
}, 0)
|
||||
require.NoError(t, err)
|
||||
return p
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
wrapped, err := blocks.WrappedExecutionPayloadHeaderCapella(tt.args.header, 0)
|
||||
require.NoError(t, err)
|
||||
got, err := fullPayloadFromExecutionBlock(tt.args.version, wrapped, tt.args.block)
|
||||
if err != nil {
|
||||
assert.ErrorContains(t, tt.err, err)
|
||||
} else {
|
||||
assert.DeepEqual(t, tt.want(), got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_fullPayloadFromExecutionBlockDeneb(t *testing.T) {
|
||||
type args struct {
|
||||
header *pb.ExecutionPayloadHeaderDeneb
|
||||
block *pb.ExecutionBlock
|
||||
version int
|
||||
}
|
||||
wantedHash := common.BytesToHash([]byte("foo"))
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want func() interfaces.ExecutionData
|
||||
err string
|
||||
}{
|
||||
{
|
||||
name: "block hash field in header and block hash mismatch",
|
||||
args: args{
|
||||
header: &pb.ExecutionPayloadHeaderDeneb{
|
||||
BlockHash: []byte("foo"),
|
||||
},
|
||||
block: &pb.ExecutionBlock{
|
||||
Hash: common.BytesToHash([]byte("bar")),
|
||||
},
|
||||
version: version.Deneb,
|
||||
},
|
||||
err: "does not match execution block hash",
|
||||
},
|
||||
{
|
||||
name: "ok",
|
||||
args: args{
|
||||
header: &pb.ExecutionPayloadHeaderDeneb{
|
||||
BlockHash: wantedHash[:],
|
||||
},
|
||||
block: &pb.ExecutionBlock{
|
||||
Hash: wantedHash,
|
||||
},
|
||||
version: version.Deneb,
|
||||
},
|
||||
want: func() interfaces.ExecutionData {
|
||||
p, err := blocks.WrappedExecutionPayloadDeneb(&pb.ExecutionPayloadDeneb{
|
||||
BlockHash: wantedHash[:],
|
||||
Transactions: [][]byte{},
|
||||
}, 0)
|
||||
require.NoError(t, err)
|
||||
return p
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
wrapped, err := blocks.WrappedExecutionPayloadHeaderDeneb(tt.args.header, 0)
|
||||
require.NoError(t, err)
|
||||
got, err := fullPayloadFromExecutionBlock(tt.args.version, wrapped, tt.args.block)
|
||||
if err != nil {
|
||||
assert.ErrorContains(t, tt.err, err)
|
||||
} else {
|
||||
@@ -1819,6 +2108,40 @@ func newPayloadV2Setup(t *testing.T, status *pb.PayloadStatus, payload *pb.Execu
|
||||
return service
|
||||
}
|
||||
|
||||
func newPayloadV3Setup(t *testing.T, status *pb.PayloadStatus, payload *pb.ExecutionPayloadDeneb) *Service {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
enc, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
|
||||
reqArg, err := json.Marshal(payload)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We expect the JSON string RPC request contains the right arguments.
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, string(reqArg),
|
||||
))
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": status,
|
||||
}
|
||||
err = json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
return service
|
||||
}
|
||||
|
||||
func TestCapella_PayloadBodiesByHash(t *testing.T) {
|
||||
resetFn := features.InitWithReset(&features.Flags{
|
||||
EnableOptionalEngineMethods: true,
|
||||
|
||||
@@ -36,4 +36,6 @@ var (
|
||||
ErrNilResponse = errors.New("nil response")
|
||||
// ErrRequestTooLarge when the request is too large
|
||||
ErrRequestTooLarge = errors.New("request too large")
|
||||
// ErrUnsupportedVersion represents a case where a payload is requested for a block type that doesn't have a known mapping.
|
||||
ErrUnsupportedVersion = errors.New("unknown ExecutionPayload schema for block version")
|
||||
)
|
||||
|
||||
@@ -23,6 +23,7 @@ go_library(
|
||||
"//consensus-types/payload-attribute:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
payloadattribute "github.com/prysmaticlabs/prysm/v4/consensus-types/payload-attribute"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/math"
|
||||
pb "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
@@ -25,6 +26,7 @@ type EngineClient struct {
|
||||
ForkChoiceUpdatedResp []byte
|
||||
ExecutionPayload *pb.ExecutionPayload
|
||||
ExecutionPayloadCapella *pb.ExecutionPayloadCapella
|
||||
ExecutionPayloadDeneb *pb.ExecutionPayloadDeneb
|
||||
ExecutionBlock *pb.ExecutionBlock
|
||||
Err error
|
||||
ErrLatestExecBlock error
|
||||
@@ -37,12 +39,14 @@ type EngineClient struct {
|
||||
NumReconstructedPayloads uint64
|
||||
TerminalBlockHash []byte
|
||||
TerminalBlockHashExists bool
|
||||
BuilderOverride bool
|
||||
OverrideValidHash [32]byte
|
||||
BlockValue uint64
|
||||
BlobsBundle *pb.BlobsBundle
|
||||
}
|
||||
|
||||
// NewPayload --
|
||||
func (e *EngineClient) NewPayload(_ context.Context, _ interfaces.ExecutionData) ([]byte, error) {
|
||||
func (e *EngineClient) NewPayload(_ context.Context, _ interfaces.ExecutionData, _ []common.Hash, _ *common.Hash) ([]byte, error) {
|
||||
return e.NewPayloadResp, e.ErrNewPayload
|
||||
}
|
||||
|
||||
@@ -57,15 +61,26 @@ func (e *EngineClient) ForkchoiceUpdated(
|
||||
}
|
||||
|
||||
// GetPayload --
|
||||
func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, s primitives.Slot) (interfaces.ExecutionData, error) {
|
||||
func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, s primitives.Slot) (interfaces.ExecutionData, *pb.BlobsBundle, bool, error) {
|
||||
if slots.ToEpoch(s) >= params.BeaconConfig().DenebForkEpoch {
|
||||
ed, err := blocks.WrappedExecutionPayloadDeneb(e.ExecutionPayloadDeneb, math.Gwei(e.BlockValue))
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
return ed, e.BlobsBundle, e.BuilderOverride, nil
|
||||
}
|
||||
if slots.ToEpoch(s) >= params.BeaconConfig().CapellaForkEpoch {
|
||||
return blocks.WrappedExecutionPayloadCapella(e.ExecutionPayloadCapella, e.BlockValue)
|
||||
ed, err := blocks.WrappedExecutionPayloadCapella(e.ExecutionPayloadCapella, math.Gwei(e.BlockValue))
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
return ed, nil, e.BuilderOverride, nil
|
||||
}
|
||||
p, err := blocks.WrappedExecutionPayload(e.ExecutionPayload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, false, err
|
||||
}
|
||||
return p, e.ErrGetPayload
|
||||
return p, nil, e.BuilderOverride, e.ErrGetPayload
|
||||
}
|
||||
|
||||
// ExchangeTransitionConfiguration --
|
||||
|
||||
@@ -90,7 +90,7 @@ func (p *Pool) ExitsForInclusion(state state.ReadOnlyBeaconState, slot types.Slo
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err = blocks.VerifyExitAndSignature(validator, state.Slot(), state.Fork(), exit, state.GenesisValidatorsRoot()); err != nil {
|
||||
if err = blocks.VerifyExitAndSignature(validator, state, exit); err != nil {
|
||||
logrus.WithError(err).Warning("removing invalid exit from pool")
|
||||
p.lock.RUnlock()
|
||||
// MarkIncluded removes the invalid exit from the pool
|
||||
|
||||
@@ -202,6 +202,67 @@ func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMs
|
||||
}
|
||||
}
|
||||
|
||||
// BroadcastBlob broadcasts a blob to the p2p network, the message is assumed to be
|
||||
// broadcasted to the current fork and to the input subnet.
|
||||
func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.SignedBlobSidecar) error {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.BroadcastBlob")
|
||||
defer span.End()
|
||||
if blob == nil {
|
||||
return errors.New("attempted to broadcast nil blob sidecar")
|
||||
}
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not retrieve fork digest")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Non-blocking broadcast, with attempts to discover a subnet peer if none available.
|
||||
go s.broadcastBlob(ctx, subnet, blob, forkDigest)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) broadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.SignedBlobSidecar, forkDigest [4]byte) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.broadcastBlob")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
|
||||
oneSlot := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
ctx, cancel := context.WithTimeout(ctx, oneSlot)
|
||||
defer cancel()
|
||||
|
||||
wrappedSubIdx := subnet + blobSubnetLockerVal
|
||||
s.subnetLocker(wrappedSubIdx).RLock()
|
||||
hasPeer := s.hasPeerWithSubnet(blobSubnetToTopic(subnet, forkDigest))
|
||||
s.subnetLocker(wrappedSubIdx).RUnlock()
|
||||
|
||||
if !hasPeer {
|
||||
blobSidecarCommitteeBroadcastAttempts.Inc()
|
||||
if err := func() error {
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
ok, err := s.FindPeersWithSubnet(ctx, blobSubnetToTopic(subnet, forkDigest), subnet, 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
blobSidecarCommitteeBroadcasts.Inc()
|
||||
return nil
|
||||
}
|
||||
return errors.New("failed to find peers for subnet")
|
||||
}(); err != nil {
|
||||
log.WithError(err).Error("Failed to find peers")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.broadcastObject(ctx, blobSidecar, blobSubnetToTopic(subnet, forkDigest)); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast blob sidecar")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
}
|
||||
|
||||
// method to broadcast messages to other peers in our gossip mesh.
|
||||
func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic string) error {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.broadcastObject")
|
||||
@@ -238,3 +299,7 @@ func attestationToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
func syncCommitteeToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
return fmt.Sprintf(SyncCommitteeSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func blobSubnetToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
return fmt.Sprintf(BlobSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/scorers"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
@@ -447,3 +448,76 @@ func TestService_BroadcastSyncCommittee(t *testing.T) {
|
||||
t.Error("Failed to receive pubsub within 1s")
|
||||
}
|
||||
}
|
||||
|
||||
func TestService_BroadcastBlob(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
require.NotEqual(t, 0, len(p1.BHost.Network().Peers()), "No peers")
|
||||
|
||||
p := &Service{
|
||||
host: p1.BHost,
|
||||
pubsub: p1.PubSub(),
|
||||
joinedTopics: map[string]*pubsub.Topic{},
|
||||
cfg: &Config{},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
subnetsLockLock: sync.Mutex{},
|
||||
peers: peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
}
|
||||
|
||||
blobSidecar := ðpb.SignedBlobSidecar{
|
||||
Message: ðpb.BlobSidecar{
|
||||
BlockRoot: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength),
|
||||
Index: 1,
|
||||
Slot: 2,
|
||||
BlockParentRoot: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength),
|
||||
ProposerIndex: 3,
|
||||
Blob: bytesutil.PadTo([]byte{'C'}, fieldparams.BlobLength),
|
||||
KzgCommitment: bytesutil.PadTo([]byte{'D'}, fieldparams.BLSPubkeyLength),
|
||||
KzgProof: bytesutil.PadTo([]byte{'E'}, fieldparams.BLSPubkeyLength),
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte{'F'}, fieldparams.BLSSignatureLength),
|
||||
}
|
||||
subnet := uint64(0)
|
||||
|
||||
topic := BlobSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(blobSidecar)] = topic
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = fmt.Sprintf(topic, digest, subnet)
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func(tt *testing.T) {
|
||||
defer wg.Done()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
incomingMessage, err := sub.Next(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := ðpb.SignedBlobSidecar{}
|
||||
require.NoError(t, p.Encoding().DecodeGossip(incomingMessage.Data, result))
|
||||
require.DeepEqual(t, result, blobSidecar)
|
||||
}(t)
|
||||
|
||||
// Attempt to broadcast nil object should fail.
|
||||
ctx := context.Background()
|
||||
require.ErrorContains(t, "attempted to broadcast nil", p.BroadcastBlob(ctx, subnet, nil))
|
||||
|
||||
// Broadcast to peers and wait.
|
||||
require.NoError(t, p.BroadcastBlob(ctx, subnet, blobSidecar))
|
||||
require.Equal(t, false, util.WaitTimeout(&wg, 1*time.Second), "Failed to receive pubsub within 1s")
|
||||
}
|
||||
|
||||
@@ -17,7 +17,8 @@ func (s *Service) forkWatcher() {
|
||||
currEpoch := slots.ToEpoch(currSlot)
|
||||
if currEpoch == params.BeaconConfig().AltairForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().BellatrixForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().CapellaForkEpoch {
|
||||
currEpoch == params.BeaconConfig().CapellaForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().DenebForkEpoch {
|
||||
// If we are in the fork epoch, we update our enr with
|
||||
// the updated fork digest. These repeatedly does
|
||||
// this over the epoch, which might be slightly wasteful
|
||||
|
||||
@@ -121,6 +121,9 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
|
||||
return defaultAttesterSlashingTopicParams(), nil
|
||||
case strings.Contains(topic, GossipBlsToExecutionChangeMessage):
|
||||
return defaultBlsToExecutionChangeTopicParams(), nil
|
||||
case strings.Contains(topic, GossipBlobSidecarMessage):
|
||||
// TODO(Deneb): Using the default block scoring. But this should be updated.
|
||||
return defaultBlockTopicParams(), nil
|
||||
default:
|
||||
return nil, errors.Errorf("unrecognized topic provided for parameter registration: %s", topic)
|
||||
}
|
||||
|
||||
@@ -21,12 +21,16 @@ var gossipTopicMappings = map[string]proto.Message{
|
||||
SyncContributionAndProofSubnetTopicFormat: ðpb.SignedContributionAndProof{},
|
||||
SyncCommitteeSubnetTopicFormat: ðpb.SyncCommitteeMessage{},
|
||||
BlsToExecutionChangeSubnetTopicFormat: ðpb.SignedBLSToExecutionChange{},
|
||||
BlobSubnetTopicFormat: ðpb.SignedBlobSidecar{},
|
||||
}
|
||||
|
||||
// GossipTopicMappings is a function to return the assigned data type
|
||||
// versioned by epoch.
|
||||
func GossipTopicMappings(topic string, epoch primitives.Epoch) proto.Message {
|
||||
if topic == BlockSubnetTopicFormat {
|
||||
if epoch >= params.BeaconConfig().DenebForkEpoch {
|
||||
return ðpb.SignedBeaconBlockDeneb{}
|
||||
}
|
||||
if epoch >= params.BeaconConfig().CapellaForkEpoch {
|
||||
return ðpb.SignedBeaconBlockCapella{}
|
||||
}
|
||||
@@ -62,6 +66,8 @@ func init() {
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockAltair{})] = BlockSubnetTopicFormat
|
||||
// Specially handle Bellatrix objects.
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockBellatrix{})] = BlockSubnetTopicFormat
|
||||
// Specially handle Capella objects
|
||||
// Specially handle Capella objects.
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockCapella{})] = BlockSubnetTopicFormat
|
||||
// Specially handle Deneb objects.
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockDeneb{})] = BlockSubnetTopicFormat
|
||||
}
|
||||
|
||||
@@ -35,6 +35,7 @@ type Broadcaster interface {
|
||||
Broadcast(context.Context, proto.Message) error
|
||||
BroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation) error
|
||||
BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage) error
|
||||
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.SignedBlobSidecar) error
|
||||
}
|
||||
|
||||
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
||||
|
||||
@@ -60,10 +60,18 @@ var (
|
||||
"the subnet. The beacon node increments this counter when the broadcast is blocked " +
|
||||
"until a subnet peer can be found.",
|
||||
})
|
||||
blobSidecarCommitteeBroadcasts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_blob_sidecar_committee_broadcasts",
|
||||
Help: "The number of blob sidecar committee messages that were broadcast with no peer on.",
|
||||
})
|
||||
syncCommitteeBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_sync_committee_subnet_attempted_broadcasts",
|
||||
Help: "The number of sync committee that were attempted to be broadcast.",
|
||||
})
|
||||
blobSidecarCommitteeBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_blob_sidecar_committee_attempted_broadcasts",
|
||||
Help: "The number of blob sidecar committee messages that were attempted to be broadcast.",
|
||||
})
|
||||
|
||||
// Gossip Tracer Metrics
|
||||
pubsubTopicsActive = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
|
||||
@@ -22,6 +22,8 @@ func TestMain(m *testing.M) {
|
||||
flags.Init(&flags.GlobalFlags{
|
||||
BlockBatchLimit: 64,
|
||||
BlockBatchLimitBurstFactor: 10,
|
||||
BlobBatchLimit: 8,
|
||||
BlobBatchLimitBurstFactor: 2,
|
||||
})
|
||||
defer func() {
|
||||
flags.Init(resetFlags)
|
||||
|
||||
@@ -57,12 +57,17 @@ func (s *Service) CanSubscribe(topic string) bool {
|
||||
log.WithError(err).Error("Could not determine Capella fork digest")
|
||||
return false
|
||||
}
|
||||
|
||||
denebForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Deneb fork digest")
|
||||
return false
|
||||
}
|
||||
switch parts[2] {
|
||||
case fmt.Sprintf("%x", phase0ForkDigest):
|
||||
case fmt.Sprintf("%x", altairForkDigest):
|
||||
case fmt.Sprintf("%x", bellatrixForkDigest):
|
||||
case fmt.Sprintf("%x", capellaForkDigest):
|
||||
case fmt.Sprintf("%x", denebForkDigest):
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -90,7 +90,7 @@ func TestService_CanSubscribe(t *testing.T) {
|
||||
formatting := []interface{}{digest}
|
||||
|
||||
// Special case for attestation subnets which have a second formatting placeholder.
|
||||
if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat {
|
||||
if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat || topic == BlobSubnetTopicFormat {
|
||||
formatting = append(formatting, 0 /* some subnet ID */)
|
||||
}
|
||||
|
||||
|
||||
@@ -37,6 +37,12 @@ const PingMessageName = "/ping"
|
||||
// MetadataMessageName specifies the name for the metadata message topic.
|
||||
const MetadataMessageName = "/metadata"
|
||||
|
||||
// BlobSidecarsByRangeName is the name for the BlobSidecarsByRange v1 message topic.
|
||||
const BlobSidecarsByRangeName = "/blob_sidecars_by_range"
|
||||
|
||||
// BlobSidecarsByRootName is the name for the BlobSidecarsByRoot v1 message topic.
|
||||
const BlobSidecarsByRootName = "/blob_sidecars_by_root"
|
||||
|
||||
const (
|
||||
// V1 RPC Topics
|
||||
// RPCStatusTopicV1 defines the v1 topic for the status rpc method.
|
||||
@@ -52,6 +58,14 @@ const (
|
||||
// RPCMetaDataTopicV1 defines the v1 topic for the metadata rpc method.
|
||||
RPCMetaDataTopicV1 = protocolPrefix + MetadataMessageName + SchemaVersionV1
|
||||
|
||||
// RPCBlobSidecarsByRangeTopicV1 is a topic for requesting blob sidecars
|
||||
// in the slot range [start_slot, start_slot + count), leading up to the current head block as selected by fork choice.
|
||||
// Protocol ID: /eth2/beacon_chain/req/blob_sidecars_by_range/1/ - New in deneb.
|
||||
RPCBlobSidecarsByRangeTopicV1 = protocolPrefix + BlobSidecarsByRangeName + SchemaVersionV1
|
||||
// RPCBlobSidecarsByRootTopicV1 is a topic for requesting blob sidecars by their block root. New in deneb.
|
||||
// /eth2/beacon_chain/req/blob_sidecars_by_root/1/
|
||||
RPCBlobSidecarsByRootTopicV1 = protocolPrefix + BlobSidecarsByRootName + SchemaVersionV1
|
||||
|
||||
// V2 RPC Topics
|
||||
// RPCBlocksByRangeTopicV2 defines v2 the topic for the blocks by range rpc method.
|
||||
RPCBlocksByRangeTopicV2 = protocolPrefix + BeaconBlocksByRangeMessageName + SchemaVersionV2
|
||||
@@ -83,6 +97,10 @@ var RPCTopicMappings = map[string]interface{}{
|
||||
// RPC Metadata Message
|
||||
RPCMetaDataTopicV1: new(interface{}),
|
||||
RPCMetaDataTopicV2: new(interface{}),
|
||||
// BlobSidecarsByRange v1 Message
|
||||
RPCBlobSidecarsByRangeTopicV1: new(pb.BlobSidecarsByRangeRequest),
|
||||
// BlobSidecarsByRoot v1 Message
|
||||
RPCBlobSidecarsByRootTopicV1: new(p2ptypes.BlobSidecarsByRootReq),
|
||||
}
|
||||
|
||||
// Maps all registered protocol prefixes.
|
||||
@@ -99,6 +117,8 @@ var messageMapping = map[string]bool{
|
||||
BeaconBlocksByRootsMessageName: true,
|
||||
PingMessageName: true,
|
||||
MetadataMessageName: true,
|
||||
BlobSidecarsByRangeName: true,
|
||||
BlobSidecarsByRootName: true,
|
||||
}
|
||||
|
||||
// Maps all the RPC messages which are to updated in altair.
|
||||
@@ -113,6 +133,19 @@ var versionMapping = map[string]bool{
|
||||
SchemaVersionV2: true,
|
||||
}
|
||||
|
||||
// OmitContextBytesV1 keeps track of which RPC methods do not write context bytes in their v1 incarnations.
|
||||
// Phase0 did not have the notion of context bytes, which prefix wire-encoded values with a [4]byte identifier
|
||||
// to convey the schema for the receiver to use. These RPCs had a version bump to V2 when the context byte encoding
|
||||
// was introduced. For other RPC methods, context bytes are always required.
|
||||
var OmitContextBytesV1 = map[string]bool{
|
||||
StatusMessageName: true,
|
||||
GoodbyeMessageName: true,
|
||||
BeaconBlocksByRangeMessageName: true,
|
||||
BeaconBlocksByRootsMessageName: true,
|
||||
PingMessageName: true,
|
||||
MetadataMessageName: true,
|
||||
}
|
||||
|
||||
// VerifyTopicMapping verifies that the topic and its accompanying
|
||||
// message type is correct.
|
||||
func VerifyTopicMapping(topic string, msg interface{}) error {
|
||||
|
||||
@@ -31,6 +31,13 @@ var syncCommsSubnetEnrKey = params.BeaconNetworkConfig().SyncCommsSubnetKey
|
||||
// chosen as more than 64(attestation subnet count).
|
||||
const syncLockerVal = 100
|
||||
|
||||
// The value used with the blob sidecar subnet, in order
|
||||
// to create an appropriate key to retrieve
|
||||
// the relevant lock. This is used to differentiate
|
||||
// blob subnets from others. This is deliberately
|
||||
// chosen more than sync and attestation subnet combined.
|
||||
const blobSubnetLockerVal = 110
|
||||
|
||||
// FindPeersWithSubnet performs a network search for peers
|
||||
// subscribed to a particular subnet. Then we try to connect
|
||||
// with those peers. This method will block until the required amount of
|
||||
|
||||
@@ -143,6 +143,11 @@ func (_ *FakeP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastBlob -- fake.
|
||||
func (_ *FakeP2P) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.SignedBlobSidecar) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// InterceptPeerDial -- fake.
|
||||
func (_ *FakeP2P) InterceptPeerDial(peer.ID) (allow bool) {
|
||||
return true
|
||||
|
||||
@@ -33,3 +33,9 @@ func (m *MockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ uin
|
||||
m.BroadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastBlob broadcasts a blob for mock.
|
||||
func (m *MockBroadcaster) BroadcastBlob(context.Context, uint64, *ethpb.SignedBlobSidecar) error {
|
||||
m.BroadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -176,6 +176,12 @@ func (p *TestP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastBlob broadcasts a blob for mock.
|
||||
func (p *TestP2P) BroadcastBlob(context.Context, uint64, *ethpb.SignedBlobSidecar) error {
|
||||
p.BroadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetStreamHandler for RPC.
|
||||
func (p *TestP2P) SetStreamHandler(topic string, handler network.StreamHandler) {
|
||||
p.BHost.SetStreamHandler(protocol.ID(topic), handler)
|
||||
|
||||
@@ -28,7 +28,8 @@ const (
|
||||
GossipContributionAndProofMessage = "sync_committee_contribution_and_proof"
|
||||
// GossipBlsToExecutionChangeMessage is the name for the bls to execution change message type.
|
||||
GossipBlsToExecutionChangeMessage = "bls_to_execution_change"
|
||||
|
||||
// GossipBlobSidecarMessage is the name for the blob sidecar message type.
|
||||
GossipBlobSidecarMessage = "blob_sidecar"
|
||||
// Topic Formats
|
||||
//
|
||||
// AttestationSubnetTopicFormat is the topic format for the attestation subnet.
|
||||
@@ -49,4 +50,6 @@ const (
|
||||
SyncContributionAndProofSubnetTopicFormat = GossipProtocolAndDigest + GossipContributionAndProofMessage
|
||||
// BlsToExecutionChangeSubnetTopicFormat is the topic format for the bls to execution change subnet.
|
||||
BlsToExecutionChangeSubnetTopicFormat = GossipProtocolAndDigest + GossipBlsToExecutionChangeMessage
|
||||
// BlobSubnetTopicFormat is the topic format for the blob subnet.
|
||||
BlobSubnetTopicFormat = GossipProtocolAndDigest + GossipBlobSidecarMessage + "_%d"
|
||||
)
|
||||
|
||||
@@ -41,7 +41,9 @@ go_test(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -53,6 +53,11 @@ func InitializeDataMaps() {
|
||||
ðpb.SignedBeaconBlockCapella{Block: ðpb.BeaconBlockCapella{Body: ðpb.BeaconBlockBodyCapella{}}},
|
||||
)
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
return blocks.NewSignedBeaconBlock(
|
||||
ðpb.SignedBeaconBlockDeneb{Block: ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{}}},
|
||||
)
|
||||
},
|
||||
}
|
||||
|
||||
// Reset our metadata map.
|
||||
@@ -69,5 +74,8 @@ func InitializeDataMaps() {
|
||||
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() metadata.Metadata {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{})
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() metadata.Metadata {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{})
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,4 +12,6 @@ var (
|
||||
ErrRateLimited = errors.New("rate limited")
|
||||
ErrIODeadline = errors.New("i/o deadline exceeded")
|
||||
ErrInvalidRequest = errors.New("invalid range, step or count")
|
||||
ErrBlobLTMinRequest = errors.New("blob slot < minimum_request_epoch")
|
||||
ErrMaxBlobReqExceeded = errors.New("requested more than MAX_REQUEST_BLOB_SIDECARS")
|
||||
)
|
||||
|
||||
@@ -4,9 +4,13 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sort"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
const rootLength = 32
|
||||
@@ -64,7 +68,7 @@ func (r *BeaconBlockByRootsReq) UnmarshalSSZ(buf []byte) error {
|
||||
bufLen := len(buf)
|
||||
maxLength := int(params.BeaconNetworkConfig().MaxRequestBlocks * rootLength)
|
||||
if bufLen > maxLength {
|
||||
return errors.Errorf("expected buffer with length of upto %d but received length %d", maxLength, bufLen)
|
||||
return errors.Errorf("expected buffer with length of up to %d but received length %d", maxLength, bufLen)
|
||||
}
|
||||
if bufLen%rootLength != 0 {
|
||||
return ssz.ErrIncorrectByteSize
|
||||
@@ -120,3 +124,90 @@ func (m *ErrorMessage) UnmarshalSSZ(buf []byte) error {
|
||||
*m = errMsg
|
||||
return nil
|
||||
}
|
||||
|
||||
// BlobSidecarsByRootReq is used to specify a list of blob targets (root+index) in a BlobSidecarsByRoot RPC request.
|
||||
type BlobSidecarsByRootReq []*eth.BlobIdentifier
|
||||
|
||||
// BlobIdentifier is a fixed size value, so we can compute its fixed size at start time (see init below)
|
||||
var blobIdSize int
|
||||
|
||||
// SizeSSZ returns the size of the serialized representation.
|
||||
func (b *BlobSidecarsByRootReq) SizeSSZ() int {
|
||||
return len(*b) * blobIdSize
|
||||
}
|
||||
|
||||
// MarshalSSZTo appends the serialized BlobSidecarsByRootReq value to the provided byte slice.
|
||||
func (b *BlobSidecarsByRootReq) MarshalSSZTo(dst []byte) ([]byte, error) {
|
||||
// A List without an enclosing container is marshaled exactly like a vector, no length offset required.
|
||||
marshalledObj, err := b.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return append(dst, marshalledObj...), nil
|
||||
}
|
||||
|
||||
// MarshalSSZ serializes the BlobSidecarsByRootReq value to a byte slice.
|
||||
func (b *BlobSidecarsByRootReq) MarshalSSZ() ([]byte, error) {
|
||||
buf := make([]byte, len(*b)*blobIdSize)
|
||||
for i, id := range *b {
|
||||
by, err := id.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copy(buf[i*blobIdSize:(i+1)*blobIdSize], by)
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// UnmarshalSSZ unmarshals the provided bytes buffer into the
|
||||
// BlobSidecarsByRootReq value.
|
||||
func (b *BlobSidecarsByRootReq) UnmarshalSSZ(buf []byte) error {
|
||||
bufLen := len(buf)
|
||||
maxLength := int(params.BeaconNetworkConfig().MaxRequestBlobSidecars) * blobIdSize
|
||||
if bufLen > maxLength {
|
||||
return errors.Errorf("expected buffer with length of up to %d but received length %d", maxLength, bufLen)
|
||||
}
|
||||
if bufLen%blobIdSize != 0 {
|
||||
return errors.Wrapf(ssz.ErrIncorrectByteSize, "size=%d", bufLen)
|
||||
}
|
||||
count := bufLen / blobIdSize
|
||||
*b = make([]*eth.BlobIdentifier, count)
|
||||
for i := 0; i < count; i++ {
|
||||
id := ð.BlobIdentifier{}
|
||||
err := id.UnmarshalSSZ(buf[i*blobIdSize : (i+1)*blobIdSize])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
(*b)[i] = id
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ sort.Interface = BlobSidecarsByRootReq{}
|
||||
|
||||
// Less reports whether the element with index i must sort before the element with index j.
|
||||
// BlobIdentifier will be sorted in lexicographic order by root, with Blob Index as tiebreaker for a given root.
|
||||
func (s BlobSidecarsByRootReq) Less(i, j int) bool {
|
||||
rootCmp := bytes.Compare(s[i].BlockRoot, s[j].BlockRoot)
|
||||
if rootCmp != 0 {
|
||||
// They aren't equal; return true if i < j, false if i > j.
|
||||
return rootCmp < 0
|
||||
}
|
||||
// They are equal; blob index is the tie breaker.
|
||||
return s[i].Index < s[j].Index
|
||||
}
|
||||
|
||||
// Swap swaps the elements with indexes i and j.
|
||||
func (s BlobSidecarsByRootReq) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
// Len is the number of elements in the collection.
|
||||
func (s BlobSidecarsByRootReq) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func init() {
|
||||
sizer := ð.BlobIdentifier{}
|
||||
blobIdSize = sizer.SizeSSZ()
|
||||
}
|
||||
|
||||
@@ -4,12 +4,82 @@ import (
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func generateBlobIdentifiers(n int) []*eth.BlobIdentifier {
|
||||
r := make([]*eth.BlobIdentifier, n)
|
||||
for i := 0; i < n; i++ {
|
||||
r[i] = ð.BlobIdentifier{
|
||||
BlockRoot: bytesutil.PadTo([]byte{byte(i)}, 32),
|
||||
Index: 0,
|
||||
}
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func TestBlobSidecarsByRootReq_MarshalSSZ(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
ids []*eth.BlobIdentifier
|
||||
marshalErr error
|
||||
unmarshalErr error
|
||||
unmarshalMod func([]byte) []byte
|
||||
}{
|
||||
{
|
||||
name: "empty list",
|
||||
},
|
||||
{
|
||||
name: "single item list",
|
||||
ids: generateBlobIdentifiers(1),
|
||||
},
|
||||
{
|
||||
name: "10 item list",
|
||||
ids: generateBlobIdentifiers(10),
|
||||
},
|
||||
{
|
||||
name: "wonky unmarshal size",
|
||||
ids: generateBlobIdentifiers(10),
|
||||
unmarshalMod: func(in []byte) []byte {
|
||||
in = append(in, byte(0))
|
||||
return in
|
||||
},
|
||||
unmarshalErr: ssz.ErrIncorrectByteSize,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
r := BlobSidecarsByRootReq(c.ids)
|
||||
by, err := r.MarshalSSZ()
|
||||
if c.marshalErr != nil {
|
||||
require.ErrorIs(t, err, c.marshalErr)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
if c.unmarshalMod != nil {
|
||||
by = c.unmarshalMod(by)
|
||||
}
|
||||
got := &BlobSidecarsByRootReq{}
|
||||
err = got.UnmarshalSSZ(by)
|
||||
if c.unmarshalErr != nil {
|
||||
require.ErrorIs(t, err, c.unmarshalErr)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
for i, gid := range *got {
|
||||
require.DeepEqual(t, c.ids[i], gid)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconBlockByRootsReq_Limit(t *testing.T) {
|
||||
fixedRoots := make([][32]byte, 0)
|
||||
for i := uint64(0); i < params.BeaconNetworkConfig().MaxRequestBlocks+100; i++ {
|
||||
@@ -25,7 +95,7 @@ func TestBeaconBlockByRootsReq_Limit(t *testing.T) {
|
||||
buf = append(buf, rt[:]...)
|
||||
}
|
||||
req2 := BeaconBlockByRootsReq(nil)
|
||||
require.ErrorContains(t, "expected buffer with length of upto", req2.UnmarshalSSZ(buf))
|
||||
require.ErrorContains(t, "expected buffer with length of up to", req2.UnmarshalSSZ(buf))
|
||||
}
|
||||
|
||||
func TestErrorResponse_Limit(t *testing.T) {
|
||||
|
||||
@@ -26,6 +26,7 @@ go_library(
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/rpc/eth/beacon:go_default_library",
|
||||
"//beacon-chain/rpc/eth/blob:go_default_library",
|
||||
"//beacon-chain/rpc/eth/builder:go_default_library",
|
||||
"//beacon-chain/rpc/eth/debug:go_default_library",
|
||||
"//beacon-chain/rpc/eth/events:go_default_library",
|
||||
|
||||
@@ -19,6 +19,7 @@ go_library(
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//network/http:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@@ -16,13 +15,11 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/grpc"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/events"
|
||||
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/r3labs/sse/v2"
|
||||
)
|
||||
|
||||
// match a number with optional decimals
|
||||
var priorityRegex = regexp.MustCompile(`q=(\d+(?:\.\d+)?)`)
|
||||
|
||||
type sszConfig struct {
|
||||
fileName string
|
||||
responseJson SszResponse
|
||||
@@ -114,7 +111,7 @@ func handleGetSSZ(
|
||||
req *http.Request,
|
||||
config sszConfig,
|
||||
) (handled bool) {
|
||||
ssz, err := sszRequested(req)
|
||||
ssz, err := http2.SszRequested(req)
|
||||
if err != nil {
|
||||
apimiddleware.WriteError(w, apimiddleware.InternalServerError(err), nil)
|
||||
return true
|
||||
@@ -207,44 +204,6 @@ func handlePostSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.E
|
||||
return true
|
||||
}
|
||||
|
||||
func sszRequested(req *http.Request) (bool, error) {
|
||||
accept := req.Header.Values("Accept")
|
||||
if len(accept) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
types := strings.Split(accept[0], ",")
|
||||
currentType, currentPriority := "", 0.0
|
||||
for _, t := range types {
|
||||
values := strings.Split(t, ";")
|
||||
name := values[0]
|
||||
if name != api.JsonMediaType && name != api.OctetStreamMediaType {
|
||||
continue
|
||||
}
|
||||
// no params specified
|
||||
if len(values) == 1 {
|
||||
priority := 1.0
|
||||
if priority > currentPriority {
|
||||
currentType, currentPriority = name, priority
|
||||
}
|
||||
continue
|
||||
}
|
||||
params := values[1]
|
||||
match := priorityRegex.FindAllStringSubmatch(params, 1)
|
||||
if len(match) != 1 {
|
||||
continue
|
||||
}
|
||||
priority, err := strconv.ParseFloat(match[0][1], 32)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if priority > currentPriority {
|
||||
currentType, currentPriority = name, priority
|
||||
}
|
||||
}
|
||||
|
||||
return currentType == api.OctetStreamMediaType, nil
|
||||
}
|
||||
|
||||
func sszPosted(req *http.Request) bool {
|
||||
ct, ok := req.Header["Content-Type"]
|
||||
if !ok {
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
@@ -43,95 +42,6 @@ func (t testSSZResponseJson) SSZFinalized() bool {
|
||||
return t.Finalized
|
||||
}
|
||||
|
||||
func TestSSZRequested(t *testing.T) {
|
||||
t.Run("ssz_requested", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{api.OctetStreamMediaType}
|
||||
result, err := sszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, result)
|
||||
})
|
||||
|
||||
t.Run("ssz_content_type_first", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s,%s", api.OctetStreamMediaType, api.JsonMediaType)}
|
||||
result, err := sszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, result)
|
||||
})
|
||||
|
||||
t.Run("ssz_content_type_preferred_1", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s;q=0.9,%s", api.JsonMediaType, api.OctetStreamMediaType)}
|
||||
result, err := sszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, result)
|
||||
})
|
||||
|
||||
t.Run("ssz_content_type_preferred_2", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s;q=0.95,%s;q=0.9", api.OctetStreamMediaType, api.JsonMediaType)}
|
||||
result, err := sszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, result)
|
||||
})
|
||||
|
||||
t.Run("other_content_type_preferred", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s,%s;q=0.9", api.JsonMediaType, api.OctetStreamMediaType)}
|
||||
result, err := sszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, result)
|
||||
})
|
||||
|
||||
t.Run("other_params", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s,%s;q=0.9,otherparam=xyz", api.JsonMediaType, api.OctetStreamMediaType)}
|
||||
result, err := sszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, result)
|
||||
})
|
||||
|
||||
t.Run("no_header", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
result, err := sszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, result)
|
||||
})
|
||||
|
||||
t.Run("empty_header", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{}
|
||||
result, err := sszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, result)
|
||||
})
|
||||
|
||||
t.Run("empty_header_value", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{""}
|
||||
result, err := sszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, result)
|
||||
})
|
||||
|
||||
t.Run("other_content_type", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{"application/other"}
|
||||
result, err := sszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, result)
|
||||
})
|
||||
|
||||
t.Run("garbage", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{"This is Sparta!!!"}
|
||||
result, err := sszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, result)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPrepareSSZRequestForProxying(t *testing.T) {
|
||||
middleware := &apimiddleware.ApiProxyMiddleware{
|
||||
GatewayAddress: "http://apimiddleware.example",
|
||||
|
||||
@@ -108,33 +108,35 @@ func wrapSyncCommitteeSignaturesArray(
|
||||
}
|
||||
|
||||
type phase0PublishBlockRequestJson struct {
|
||||
Phase0Block *BeaconBlockJson `json:"phase0_block"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
Phase0Block *SignedBeaconBlockJson `json:"phase0_block"`
|
||||
}
|
||||
|
||||
type altairPublishBlockRequestJson struct {
|
||||
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
AltairBlock *SignedBeaconBlockAltairJson `json:"altair_block"`
|
||||
}
|
||||
|
||||
type bellatrixPublishBlockRequestJson struct {
|
||||
BellatrixBlock *BeaconBlockBellatrixJson `json:"bellatrix_block"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type capellaPublishBlockRequestJson struct {
|
||||
CapellaBlock *BeaconBlockCapellaJson `json:"capella_block"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
BellatrixBlock *SignedBeaconBlockBellatrixJson `json:"bellatrix_block"`
|
||||
}
|
||||
|
||||
type bellatrixPublishBlindedBlockRequestJson struct {
|
||||
BellatrixBlock *BlindedBeaconBlockBellatrixJson `json:"bellatrix_block"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
BellatrixBlock *SignedBlindedBeaconBlockBellatrixJson `json:"bellatrix_block"`
|
||||
}
|
||||
|
||||
type capellaPublishBlockRequestJson struct {
|
||||
CapellaBlock *SignedBeaconBlockCapellaJson `json:"capella_block"`
|
||||
}
|
||||
|
||||
type capellaPublishBlindedBlockRequestJson struct {
|
||||
CapellaBlock *BlindedBeaconBlockCapellaJson `json:"capella_block"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
CapellaBlock *SignedBlindedBeaconBlockCapellaJson `json:"capella_block"`
|
||||
}
|
||||
|
||||
type denebPublishBlockRequestJson struct {
|
||||
DenebContents *SignedBeaconBlockContentsDenebJson `json:"deneb_contents"`
|
||||
}
|
||||
|
||||
type denebPublishBlindedBlockRequestJson struct {
|
||||
DenebContents *SignedBlindedBeaconBlockContentsDenebJson `json:"deneb_contents"`
|
||||
}
|
||||
|
||||
// setInitialPublishBlockPostRequest is triggered before we deserialize the request JSON into a struct.
|
||||
@@ -147,31 +149,50 @@ func setInitialPublishBlockPostRequest(endpoint *apimiddleware.Endpoint,
|
||||
req *http.Request,
|
||||
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
|
||||
s := struct {
|
||||
Message struct {
|
||||
Slot string
|
||||
}
|
||||
Slot string
|
||||
}{}
|
||||
|
||||
buf, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not read body")
|
||||
}
|
||||
if err := json.Unmarshal(buf, &s); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not read slot from body")
|
||||
|
||||
typeParseMap := make(map[string]json.RawMessage)
|
||||
if err := json.Unmarshal(buf, &typeParseMap); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not parse object")
|
||||
}
|
||||
slot, err := strconv.ParseUint(s.Message.Slot, 10, 64)
|
||||
if val, ok := typeParseMap["message"]; ok {
|
||||
if err := json.Unmarshal(val, &s); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not unmarshal field 'message' ")
|
||||
}
|
||||
} else if val, ok := typeParseMap["signed_block"]; ok {
|
||||
temp := struct {
|
||||
Message struct {
|
||||
Slot string
|
||||
}
|
||||
}{}
|
||||
if err := json.Unmarshal(val, &temp); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not unmarshal field 'signed_block' ")
|
||||
}
|
||||
s.Slot = temp.Message.Slot
|
||||
} else {
|
||||
return false, &apimiddleware.DefaultErrorJson{Message: "could not parse slot from request", Code: http.StatusInternalServerError}
|
||||
}
|
||||
slot, err := strconv.ParseUint(s.Slot, 10, 64)
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "slot is not an unsigned integer")
|
||||
}
|
||||
currentEpoch := slots.ToEpoch(primitives.Slot(slot))
|
||||
if currentEpoch < params.BeaconConfig().AltairForkEpoch {
|
||||
endpoint.PostRequest = &SignedBeaconBlockContainerJson{}
|
||||
endpoint.PostRequest = &SignedBeaconBlockJson{}
|
||||
} else if currentEpoch < params.BeaconConfig().BellatrixForkEpoch {
|
||||
endpoint.PostRequest = &SignedBeaconBlockAltairContainerJson{}
|
||||
endpoint.PostRequest = &SignedBeaconBlockAltairJson{}
|
||||
} else if currentEpoch < params.BeaconConfig().CapellaForkEpoch {
|
||||
endpoint.PostRequest = &SignedBeaconBlockBellatrixContainerJson{}
|
||||
endpoint.PostRequest = &SignedBeaconBlockBellatrixJson{}
|
||||
} else if currentEpoch < params.BeaconConfig().DenebForkEpoch {
|
||||
endpoint.PostRequest = &SignedBeaconBlockCapellaJson{}
|
||||
} else {
|
||||
endpoint.PostRequest = &SignedBeaconBlockCapellaContainerJson{}
|
||||
endpoint.PostRequest = &SignedBeaconBlockContentsDenebJson{}
|
||||
}
|
||||
req.Body = io.NopCloser(bytes.NewBuffer(buf))
|
||||
return true, nil
|
||||
@@ -182,38 +203,42 @@ func setInitialPublishBlockPostRequest(endpoint *apimiddleware.Endpoint,
|
||||
// We do a simple conversion depending on the type of endpoint.PostRequest
|
||||
// (which was filled out previously in setInitialPublishBlockPostRequest).
|
||||
func preparePublishedBlock(endpoint *apimiddleware.Endpoint, _ http.ResponseWriter, _ *http.Request) apimiddleware.ErrorJson {
|
||||
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockContainerJson); ok {
|
||||
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockJson); ok {
|
||||
// Prepare post request that can be properly decoded on gRPC side.
|
||||
actualPostReq := &phase0PublishBlockRequestJson{
|
||||
Phase0Block: block.Message,
|
||||
Signature: block.Signature,
|
||||
Phase0Block: block,
|
||||
}
|
||||
endpoint.PostRequest = actualPostReq
|
||||
return nil
|
||||
}
|
||||
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockAltairContainerJson); ok {
|
||||
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockAltairJson); ok {
|
||||
// Prepare post request that can be properly decoded on gRPC side.
|
||||
actualPostReq := &altairPublishBlockRequestJson{
|
||||
AltairBlock: block.Message,
|
||||
Signature: block.Signature,
|
||||
AltairBlock: block,
|
||||
}
|
||||
endpoint.PostRequest = actualPostReq
|
||||
return nil
|
||||
}
|
||||
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockBellatrixContainerJson); ok {
|
||||
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockBellatrixJson); ok {
|
||||
// Prepare post request that can be properly decoded on gRPC side.
|
||||
actualPostReq := &bellatrixPublishBlockRequestJson{
|
||||
BellatrixBlock: block.Message,
|
||||
Signature: block.Signature,
|
||||
BellatrixBlock: block,
|
||||
}
|
||||
endpoint.PostRequest = actualPostReq
|
||||
return nil
|
||||
}
|
||||
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockCapellaContainerJson); ok {
|
||||
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockCapellaJson); ok {
|
||||
// Prepare post request that can be properly decoded on gRPC side.
|
||||
actualPostReq := &capellaPublishBlockRequestJson{
|
||||
CapellaBlock: block.Message,
|
||||
Signature: block.Signature,
|
||||
CapellaBlock: block,
|
||||
}
|
||||
endpoint.PostRequest = actualPostReq
|
||||
return nil
|
||||
}
|
||||
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockContentsDenebJson); ok {
|
||||
// Prepare post request that can be properly decoded on gRPC side.
|
||||
actualPostReq := &denebPublishBlockRequestJson{
|
||||
DenebContents: block,
|
||||
}
|
||||
endpoint.PostRequest = actualPostReq
|
||||
return nil
|
||||
@@ -231,31 +256,50 @@ func setInitialPublishBlindedBlockPostRequest(endpoint *apimiddleware.Endpoint,
|
||||
req *http.Request,
|
||||
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
|
||||
s := struct {
|
||||
Message struct {
|
||||
Slot string
|
||||
}
|
||||
Slot string
|
||||
}{}
|
||||
|
||||
buf, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not read body")
|
||||
}
|
||||
if err := json.Unmarshal(buf, &s); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not read slot from body")
|
||||
|
||||
typeParseMap := make(map[string]json.RawMessage)
|
||||
if err = json.Unmarshal(buf, &typeParseMap); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not parse object")
|
||||
}
|
||||
slot, err := strconv.ParseUint(s.Message.Slot, 10, 64)
|
||||
if val, ok := typeParseMap["message"]; ok {
|
||||
if err = json.Unmarshal(val, &s); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not unmarshal field 'message' ")
|
||||
}
|
||||
} else if val, ok = typeParseMap["signed_blinded_block"]; ok {
|
||||
temp := struct {
|
||||
Message struct {
|
||||
Slot string
|
||||
}
|
||||
}{}
|
||||
if err = json.Unmarshal(val, &temp); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not unmarshal field 'signed_block' ")
|
||||
}
|
||||
s.Slot = temp.Message.Slot
|
||||
} else {
|
||||
return false, &apimiddleware.DefaultErrorJson{Message: "could not parse slot from request", Code: http.StatusInternalServerError}
|
||||
}
|
||||
slot, err := strconv.ParseUint(s.Slot, 10, 64)
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "slot is not an unsigned integer")
|
||||
}
|
||||
currentEpoch := slots.ToEpoch(primitives.Slot(slot))
|
||||
if currentEpoch < params.BeaconConfig().AltairForkEpoch {
|
||||
endpoint.PostRequest = &SignedBeaconBlockContainerJson{}
|
||||
endpoint.PostRequest = &SignedBeaconBlockJson{}
|
||||
} else if currentEpoch < params.BeaconConfig().BellatrixForkEpoch {
|
||||
endpoint.PostRequest = &SignedBeaconBlockAltairContainerJson{}
|
||||
endpoint.PostRequest = &SignedBeaconBlockAltairJson{}
|
||||
} else if currentEpoch < params.BeaconConfig().CapellaForkEpoch {
|
||||
endpoint.PostRequest = &SignedBlindedBeaconBlockBellatrixContainerJson{}
|
||||
endpoint.PostRequest = &SignedBlindedBeaconBlockBellatrixJson{}
|
||||
} else if currentEpoch < params.BeaconConfig().DenebForkEpoch {
|
||||
endpoint.PostRequest = &SignedBlindedBeaconBlockCapellaJson{}
|
||||
} else {
|
||||
endpoint.PostRequest = &SignedBlindedBeaconBlockCapellaContainerJson{}
|
||||
endpoint.PostRequest = &SignedBlindedBeaconBlockContentsDenebJson{}
|
||||
}
|
||||
req.Body = io.NopCloser(bytes.NewBuffer(buf))
|
||||
return true, nil
|
||||
@@ -266,38 +310,51 @@ func setInitialPublishBlindedBlockPostRequest(endpoint *apimiddleware.Endpoint,
|
||||
// We do a simple conversion depending on the type of endpoint.PostRequest
|
||||
// (which was filled out previously in setInitialPublishBlockPostRequest).
|
||||
func preparePublishedBlindedBlock(endpoint *apimiddleware.Endpoint, _ http.ResponseWriter, _ *http.Request) apimiddleware.ErrorJson {
|
||||
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockContainerJson); ok {
|
||||
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockJson); ok {
|
||||
// Prepare post request that can be properly decoded on gRPC side.
|
||||
actualPostReq := &phase0PublishBlockRequestJson{
|
||||
Phase0Block: block.Message,
|
||||
Signature: block.Signature,
|
||||
Phase0Block: block,
|
||||
}
|
||||
endpoint.PostRequest = actualPostReq
|
||||
return nil
|
||||
}
|
||||
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockAltairContainerJson); ok {
|
||||
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockAltairJson); ok {
|
||||
// Prepare post request that can be properly decoded on gRPC side.
|
||||
actualPostReq := &altairPublishBlockRequestJson{
|
||||
AltairBlock: block.Message,
|
||||
Signature: block.Signature,
|
||||
AltairBlock: block,
|
||||
}
|
||||
endpoint.PostRequest = actualPostReq
|
||||
return nil
|
||||
}
|
||||
if block, ok := endpoint.PostRequest.(*SignedBlindedBeaconBlockBellatrixContainerJson); ok {
|
||||
if block, ok := endpoint.PostRequest.(*SignedBlindedBeaconBlockBellatrixJson); ok {
|
||||
// Prepare post request that can be properly decoded on gRPC side.
|
||||
actualPostReq := &bellatrixPublishBlindedBlockRequestJson{
|
||||
BellatrixBlock: block.Message,
|
||||
Signature: block.Signature,
|
||||
BellatrixBlock: &SignedBlindedBeaconBlockBellatrixJson{
|
||||
Message: block.Message,
|
||||
Signature: block.Signature,
|
||||
},
|
||||
}
|
||||
endpoint.PostRequest = actualPostReq
|
||||
return nil
|
||||
}
|
||||
if block, ok := endpoint.PostRequest.(*SignedBlindedBeaconBlockCapellaContainerJson); ok {
|
||||
if block, ok := endpoint.PostRequest.(*SignedBlindedBeaconBlockCapellaJson); ok {
|
||||
// Prepare post request that can be properly decoded on gRPC side.
|
||||
actualPostReq := &capellaPublishBlindedBlockRequestJson{
|
||||
CapellaBlock: block.Message,
|
||||
Signature: block.Signature,
|
||||
CapellaBlock: &SignedBlindedBeaconBlockCapellaJson{
|
||||
Message: block.Message,
|
||||
Signature: block.Signature,
|
||||
},
|
||||
}
|
||||
endpoint.PostRequest = actualPostReq
|
||||
return nil
|
||||
}
|
||||
if blockContents, ok := endpoint.PostRequest.(*SignedBlindedBeaconBlockContentsDenebJson); ok {
|
||||
// Prepare post request that can be properly decoded on gRPC side.
|
||||
actualPostReq := &denebPublishBlindedBlockRequestJson{
|
||||
DenebContents: &SignedBlindedBeaconBlockContentsDenebJson{
|
||||
SignedBlindedBlock: blockContents.SignedBlindedBlock,
|
||||
SignedBlindedBlobSidecars: blockContents.SignedBlindedBlobSidecars,
|
||||
},
|
||||
}
|
||||
endpoint.PostRequest = actualPostReq
|
||||
return nil
|
||||
@@ -343,45 +400,59 @@ func prepareValidatorAggregates(body []byte, responseContainer interface{}) (api
|
||||
}
|
||||
|
||||
type phase0BlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *SignedBeaconBlockJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type altairBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *SignedBeaconBlockAltairJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type bellatrixBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *SignedBeaconBlockContainerJson `json:"data"`
|
||||
Data *SignedBeaconBlockBellatrixJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type altairBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *SignedBeaconBlockAltairContainerJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type bellatrixBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *SignedBeaconBlockBellatrixContainerJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type capellaBlockResponseJson struct {
|
||||
Version string `json:"version"`
|
||||
Data *SignedBeaconBlockCapellaContainerJson `json:"data"`
|
||||
Version string `json:"version"`
|
||||
Data *SignedBeaconBlockCapellaJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type denebBlockResponseJson struct {
|
||||
Version string `json:"version"`
|
||||
Data *SignedBeaconBlockDenebJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type bellatrixBlindedBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *SignedBlindedBeaconBlockBellatrixJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type bellatrixBlindedBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *SignedBlindedBeaconBlockBellatrixContainerJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
type capellaBlindedBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *SignedBlindedBeaconBlockCapellaJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type capellaBlindedBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *SignedBlindedBeaconBlockCapellaContainerJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
type denebBlindedBlockResponseJson struct {
|
||||
Version string `json:"version"`
|
||||
Data *SignedBlindedBeaconBlockDenebJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
func serializeV2Block(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
|
||||
@@ -395,7 +466,7 @@ func serializeV2Block(response interface{}) (apimiddleware.RunDefault, []byte, a
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_PHASE0.String())):
|
||||
actualRespContainer = &phase0BlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBeaconBlockContainerJson{
|
||||
Data: &SignedBeaconBlockJson{
|
||||
Message: respContainer.Data.Phase0Block,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
@@ -405,7 +476,7 @@ func serializeV2Block(response interface{}) (apimiddleware.RunDefault, []byte, a
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_ALTAIR.String())):
|
||||
actualRespContainer = &altairBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBeaconBlockAltairContainerJson{
|
||||
Data: &SignedBeaconBlockAltairJson{
|
||||
Message: respContainer.Data.AltairBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
@@ -415,7 +486,7 @@ func serializeV2Block(response interface{}) (apimiddleware.RunDefault, []byte, a
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_BELLATRIX.String())):
|
||||
actualRespContainer = &bellatrixBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBeaconBlockBellatrixContainerJson{
|
||||
Data: &SignedBeaconBlockBellatrixJson{
|
||||
Message: respContainer.Data.BellatrixBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
@@ -425,13 +496,23 @@ func serializeV2Block(response interface{}) (apimiddleware.RunDefault, []byte, a
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_CAPELLA.String())):
|
||||
actualRespContainer = &capellaBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBeaconBlockCapellaContainerJson{
|
||||
Data: &SignedBeaconBlockCapellaJson{
|
||||
Message: respContainer.Data.CapellaBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_DENEB.String())):
|
||||
actualRespContainer = &denebBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBeaconBlockDenebJson{
|
||||
Message: respContainer.Data.DenebBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
default:
|
||||
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
|
||||
}
|
||||
@@ -454,7 +535,7 @@ func serializeBlindedBlock(response interface{}) (apimiddleware.RunDefault, []by
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_PHASE0.String())):
|
||||
actualRespContainer = &phase0BlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBeaconBlockContainerJson{
|
||||
Data: &SignedBeaconBlockJson{
|
||||
Message: respContainer.Data.Phase0Block,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
@@ -464,7 +545,7 @@ func serializeBlindedBlock(response interface{}) (apimiddleware.RunDefault, []by
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_ALTAIR.String())):
|
||||
actualRespContainer = &altairBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBeaconBlockAltairContainerJson{
|
||||
Data: &SignedBeaconBlockAltairJson{
|
||||
Message: respContainer.Data.AltairBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
@@ -474,7 +555,7 @@ func serializeBlindedBlock(response interface{}) (apimiddleware.RunDefault, []by
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_BELLATRIX.String())):
|
||||
actualRespContainer = &bellatrixBlindedBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBlindedBeaconBlockBellatrixContainerJson{
|
||||
Data: &SignedBlindedBeaconBlockBellatrixJson{
|
||||
Message: respContainer.Data.BellatrixBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
@@ -484,13 +565,23 @@ func serializeBlindedBlock(response interface{}) (apimiddleware.RunDefault, []by
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_CAPELLA.String())):
|
||||
actualRespContainer = &capellaBlindedBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBlindedBeaconBlockCapellaContainerJson{
|
||||
Data: &SignedBlindedBeaconBlockCapellaJson{
|
||||
Message: respContainer.Data.CapellaBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_DENEB.String())):
|
||||
actualRespContainer = &denebBlindedBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBlindedBeaconBlockDenebJson{
|
||||
Message: respContainer.Data.DenebBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
default:
|
||||
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
|
||||
}
|
||||
@@ -522,6 +613,11 @@ type capellaStateResponseJson struct {
|
||||
Data *BeaconStateCapellaJson `json:"data"`
|
||||
}
|
||||
|
||||
type denebStateResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *BeaconStateDenebJson `json:"data"`
|
||||
}
|
||||
|
||||
func serializeV2State(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
|
||||
respContainer, ok := response.(*BeaconStateV2ResponseJson)
|
||||
if !ok {
|
||||
@@ -550,6 +646,11 @@ func serializeV2State(response interface{}) (apimiddleware.RunDefault, []byte, a
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.CapellaState,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_DENEB.String())):
|
||||
actualRespContainer = &denebStateResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.DenebState,
|
||||
}
|
||||
default:
|
||||
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported state version '%s'", respContainer.Version))
|
||||
}
|
||||
@@ -581,6 +682,11 @@ type capellaProduceBlockResponseJson struct {
|
||||
Data *BeaconBlockCapellaJson `json:"data"`
|
||||
}
|
||||
|
||||
type denebProduceBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *BeaconBlockContentsDenebJson `json:"data"`
|
||||
}
|
||||
|
||||
type bellatrixProduceBlindedBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *BlindedBeaconBlockBellatrixJson `json:"data"`
|
||||
@@ -591,6 +697,11 @@ type capellaProduceBlindedBlockResponseJson struct {
|
||||
Data *BlindedBeaconBlockCapellaJson `json:"data"`
|
||||
}
|
||||
|
||||
type denebProduceBlindedBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *BlindedBeaconBlockContentsDenebJson `json:"data"`
|
||||
}
|
||||
|
||||
func serializeProducedV2Block(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
|
||||
respContainer, ok := response.(*ProduceBlockResponseV2Json)
|
||||
if !ok {
|
||||
@@ -619,6 +730,11 @@ func serializeProducedV2Block(response interface{}) (apimiddleware.RunDefault, [
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.CapellaBlock,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_DENEB.String())):
|
||||
actualRespContainer = &denebProduceBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.DenebContents,
|
||||
}
|
||||
default:
|
||||
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
|
||||
}
|
||||
@@ -658,6 +774,11 @@ func serializeProducedBlindedBlock(response interface{}) (apimiddleware.RunDefau
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.CapellaBlock,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_DENEB.String())):
|
||||
actualRespContainer = &denebProduceBlindedBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.DenebContents,
|
||||
}
|
||||
default:
|
||||
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user