mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
48 Commits
attestatio
...
ignore_rid
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7a7daadf22 | ||
|
|
4008ea736f | ||
|
|
4e4fb9ad52 | ||
|
|
737e0e0d3a | ||
|
|
604c82626f | ||
|
|
e1a3852f08 | ||
|
|
a40cc40edf | ||
|
|
0e3c1d42f6 | ||
|
|
f41e603e5a | ||
|
|
28c3330375 | ||
|
|
cb465086e8 | ||
|
|
cdd7958739 | ||
|
|
97a522827b | ||
|
|
b84b795f23 | ||
|
|
f40b8583f7 | ||
|
|
a6b6a938de | ||
|
|
c78d698d89 | ||
|
|
705e98e3c3 | ||
|
|
ce2344301c | ||
|
|
1112e01c06 | ||
|
|
243bcb03ce | ||
|
|
a0ca4a67b0 | ||
|
|
b68a4e12aa | ||
|
|
c010601f3b | ||
|
|
394bd1786a | ||
|
|
461af4baa6 | ||
|
|
7a70305935 | ||
|
|
83ce7e3607 | ||
|
|
cf8e554981 | ||
|
|
59aa978223 | ||
|
|
e4a5711c8f | ||
|
|
d8b38cf230 | ||
|
|
ca36634de6 | ||
|
|
1c35b66132 | ||
|
|
56c1f9aab5 | ||
|
|
5ecb4d62a9 | ||
|
|
bc107a61e3 | ||
|
|
9f41375550 | ||
|
|
6a638bd148 | ||
|
|
52f1b3f958 | ||
|
|
80526a1899 | ||
|
|
da2212f6cc | ||
|
|
7cc05401ca | ||
|
|
cd8d499198 | ||
|
|
2fbda536b0 | ||
|
|
0498e0a4d5 | ||
|
|
098d6a3c0b | ||
|
|
67d0b26a21 |
@@ -1 +1 @@
|
||||
6.3.2
|
||||
6.4.0
|
||||
|
||||
@@ -9,8 +9,8 @@
|
||||
#build:remote-cache --strategy=Genrule=standalone
|
||||
|
||||
# Prysm specific remote-cache properties.
|
||||
#build:remote-cache --disk_cache=
|
||||
build:remote-cache --remote_download_toplevel
|
||||
build:remote-cache --remote_download_minimal
|
||||
build:remote-cache --experimental_remote_build_event_upload=minimal
|
||||
build:remote-cache --remote_cache=grpc://bazel-remote-cache:9092
|
||||
# Does not work with rules_oci. See https://github.com/bazel-contrib/rules_oci/issues/292
|
||||
#build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
|
||||
@@ -29,7 +29,10 @@ build --experimental_use_hermetic_linux_sandbox
|
||||
# Import workspace options.
|
||||
import %workspace%/.bazelrc
|
||||
|
||||
startup --host_jvm_args=-Xmx4g --host_jvm_args=-Xms2g
|
||||
# Enable blake3 once it is supported in remote cache. See: https://github.com/buchgr/bazel-remote/issues/710
|
||||
# startup --digest_function=blake3
|
||||
|
||||
startup --host_jvm_args=-Xmx8g --host_jvm_args=-Xms4g
|
||||
build --experimental_strict_action_env
|
||||
build --sandbox_tmpfs_path=/tmp
|
||||
build --verbose_failures
|
||||
@@ -39,6 +42,7 @@ build --curses=no --color=no
|
||||
build --keep_going
|
||||
build --test_output=errors
|
||||
build --flaky_test_attempts=5
|
||||
build --build_runfile_links=false # Only build runfile symlink forest when required by local action, test, or run command.
|
||||
# Disabled race detection due to unstable test results under constrained environment build kite
|
||||
# build --features=race
|
||||
|
||||
|
||||
4
.github/workflows/go.yml
vendored
4
.github/workflows/go.yml
vendored
@@ -5,6 +5,8 @@ on:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ '*' ]
|
||||
merge_group:
|
||||
types: [checks_requested]
|
||||
|
||||
jobs:
|
||||
formatting:
|
||||
@@ -34,7 +36,7 @@ jobs:
|
||||
run: | # https://github.com/securego/gosec/issues/469
|
||||
export PATH=$PATH:$(go env GOPATH)/bin
|
||||
go install github.com/securego/gosec/v2/cmd/gosec@v2.15.0
|
||||
gosec -exclude=G307 -exclude-dir=crypto/bls/herumi ./...
|
||||
gosec -exclude-generated -exclude=G307 -exclude-dir=crypto/bls/herumi ./...
|
||||
|
||||
lint:
|
||||
name: Lint
|
||||
|
||||
@@ -244,8 +244,8 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "91434d5fd5e1c6eb7b0174fed2afe25e09bddf00e1e4c431db931b2cee4e7773",
|
||||
url = "https://github.com/eth-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
|
||||
sha256 = "516d551cfb3e50e4ac2f42db0992f4ceb573a7cb1616d727a725c8161485329f",
|
||||
url = "https://github.com/eth-clients/slashing-protection-interchange-tests/archive/refs/tags/v5.3.0.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
|
||||
@@ -2,7 +2,10 @@ load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["headers.go"],
|
||||
srcs = [
|
||||
"constants.go",
|
||||
"headers.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/api",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
@@ -12,10 +12,10 @@ go_library(
|
||||
deps = [
|
||||
"//api/client:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/apimiddleware:go_default_library",
|
||||
"//beacon-chain/rpc/eth/beacon:go_default_library",
|
||||
"//beacon-chain/rpc/eth/config:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/beacon:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
|
||||
@@ -16,10 +16,10 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/beacon"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/config"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
apibeacon "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/beacon"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/forks"
|
||||
@@ -31,7 +31,7 @@ const (
|
||||
getSignedBlockPath = "/eth/v2/beacon/blocks"
|
||||
getBlockRootPath = "/eth/v1/beacon/blocks/{{.Id}}/root"
|
||||
getForkForStatePath = "/eth/v1/beacon/states/{{.Id}}/fork"
|
||||
getWeakSubjectivityPath = "/eth/v1/beacon/weak_subjectivity"
|
||||
getWeakSubjectivityPath = "/prysm/v1/beacon/weak_subjectivity"
|
||||
getForkSchedulePath = "/eth/v1/config/fork_schedule"
|
||||
getConfigSpecPath = "/eth/v1/config/spec"
|
||||
getStatePath = "/eth/v2/debug/beacon/states"
|
||||
@@ -258,16 +258,16 @@ func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v := &apimiddleware.WeakSubjectivityResponse{}
|
||||
v := &apibeacon.GetWeakSubjectivityResponse{}
|
||||
err = json.Unmarshal(body, v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
epoch, err := strconv.ParseUint(v.Data.Checkpoint.Epoch, 10, 64)
|
||||
epoch, err := strconv.ParseUint(v.Data.WsCheckpoint.Epoch, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blockRoot, err := hexutil.Decode(v.Data.Checkpoint.Root)
|
||||
blockRoot, err := hexutil.Decode(v.Data.WsCheckpoint.Root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -305,7 +305,7 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*shar
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
decoder.DisallowUnknownFields()
|
||||
errorJson := &apimiddleware.IndexedVerificationFailureErrorJson{}
|
||||
errorJson := &shared.IndexedVerificationFailureError{}
|
||||
if err := decoder.Decode(errorJson); err != nil {
|
||||
return errors.Wrapf(err, "failed to decode error JSON for %s", resp.Request.URL)
|
||||
}
|
||||
|
||||
@@ -20,8 +20,6 @@ go_library(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//network/authorization:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
)
|
||||
@@ -22,7 +21,7 @@ type SignedBid interface {
|
||||
// Bid is an interface describing the method set of a builder bid.
|
||||
type Bid interface {
|
||||
Header() (interfaces.ExecutionData, error)
|
||||
BlindedBlobsBundle() (*enginev1.BlindedBlobsBundle, error)
|
||||
BlobKzgCommitments() ([][]byte, error)
|
||||
Value() []byte
|
||||
Pubkey() []byte
|
||||
Version() int
|
||||
@@ -115,9 +114,9 @@ func (b builderBid) Header() (interfaces.ExecutionData, error) {
|
||||
return blocks.WrappedExecutionPayloadHeader(b.p.Header)
|
||||
}
|
||||
|
||||
// BlindedBlobsBundle --
|
||||
func (b builderBid) BlindedBlobsBundle() (*enginev1.BlindedBlobsBundle, error) {
|
||||
return nil, errors.New("blinded blobs bundle not available before Deneb")
|
||||
// BlobKzgCommitments --
|
||||
func (b builderBid) BlobKzgCommitments() ([][]byte, error) {
|
||||
return [][]byte{}, errors.New("blob kzg commitments not available before Deneb")
|
||||
}
|
||||
|
||||
// Version --
|
||||
@@ -169,9 +168,9 @@ func (b builderBidCapella) Header() (interfaces.ExecutionData, error) {
|
||||
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, blocks.PayloadValueToGwei(b.p.Value))
|
||||
}
|
||||
|
||||
// BlindedBlobsBundle --
|
||||
func (b builderBidCapella) BlindedBlobsBundle() (*enginev1.BlindedBlobsBundle, error) {
|
||||
return nil, errors.New("blinded blobs bundle not available before Deneb")
|
||||
// BlobKzgCommitments --
|
||||
func (b builderBidCapella) BlobKzgCommitments() ([][]byte, error) {
|
||||
return [][]byte{}, errors.New("blob kzg commitments not available before Deneb")
|
||||
}
|
||||
|
||||
// Version --
|
||||
@@ -253,9 +252,9 @@ func (b builderBidDeneb) Header() (interfaces.ExecutionData, error) {
|
||||
return blocks.WrappedExecutionPayloadHeaderDeneb(b.p.Header, blocks.PayloadValueToGwei(b.p.Value))
|
||||
}
|
||||
|
||||
// BlindedBlobsBundle --
|
||||
func (b builderBidDeneb) BlindedBlobsBundle() (*enginev1.BlindedBlobsBundle, error) {
|
||||
return b.p.BlindedBlobsBundle, nil
|
||||
// BlobKzgCommitments --
|
||||
func (b builderBidDeneb) BlobKzgCommitments() ([][]byte, error) {
|
||||
return b.p.BlobKzgCommitments, nil
|
||||
}
|
||||
|
||||
type signedBuilderBidDeneb struct {
|
||||
|
||||
@@ -19,8 +19,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v4/network"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/authorization"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
@@ -89,7 +87,7 @@ type BuilderClient interface {
|
||||
NodeURL() string
|
||||
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubkey [48]byte) (SignedBid, error)
|
||||
RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error
|
||||
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock, blobs []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error)
|
||||
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error)
|
||||
Status(ctx context.Context) error
|
||||
}
|
||||
|
||||
@@ -104,8 +102,7 @@ type Client struct {
|
||||
// `host` is the base host + port used to construct request urls. This value can be
|
||||
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
|
||||
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
|
||||
endpoint := covertEndPoint(host)
|
||||
u, err := urlForHost(endpoint.Url)
|
||||
u, err := urlForHost(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -121,8 +118,7 @@ func NewClient(host string, opts ...ClientOpt) (*Client, error) {
|
||||
|
||||
func urlForHost(h string) (*url.URL, error) {
|
||||
// try to parse as url (being permissive)
|
||||
u, err := url.Parse(h)
|
||||
if err == nil && u.Host != "" {
|
||||
if u, err := url.Parse(h); err == nil && u.Host != "" {
|
||||
return u, nil
|
||||
}
|
||||
// try to parse as host:port
|
||||
@@ -140,7 +136,7 @@ func (c *Client) NodeURL() string {
|
||||
|
||||
type reqOption func(*http.Request)
|
||||
|
||||
// do is a generic, opinionated request function to reduce boilerplate amongst the methods in this package api/client/builder/types.go.
|
||||
// do is a generic, opinionated request function to reduce boilerplate amongst the methods in this package api/client/builder.
|
||||
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, opts ...reqOption) (res []byte, err error) {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.client.do")
|
||||
defer func() {
|
||||
@@ -291,7 +287,7 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
|
||||
|
||||
// SubmitBlindedBlock calls the builder API endpoint that binds the validator to the builder and submits the block.
|
||||
// The response is the full execution payload used to create the blinded block.
|
||||
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock, blobs []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
if !sb.IsBlinded() {
|
||||
return nil, nil, errNotBlinded
|
||||
}
|
||||
@@ -375,9 +371,9 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
|
||||
}
|
||||
b, err := shared.SignedBlindedBeaconBlockContentsDenebFromConsensus(ðpb.SignedBlindedBeaconBlockAndBlobsDeneb{SignedBlindedBlock: psb, SignedBlindedBlobSidecars: blobs})
|
||||
b, err := shared.SignedBlindedBeaconBlockDenebFromConsensus(ðpb.SignedBlindedBeaconBlockDeneb{Message: psb.Message, Signature: bytesutil.SafeCopyBytes(psb.Signature)})
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not convert SignedBlindedBeaconBlockContentsDeneb to json marshalable type")
|
||||
return nil, nil, errors.Wrapf(err, "could not convert SignedBlindedBeaconBlockDeneb to json marshalable type")
|
||||
}
|
||||
body, err := json.Marshal(b)
|
||||
if err != nil {
|
||||
@@ -457,12 +453,3 @@ func non200Err(response *http.Response) error {
|
||||
return errors.Wrap(ErrNotOK, fmt.Sprintf("unsupported error code: %d", response.StatusCode))
|
||||
}
|
||||
}
|
||||
|
||||
func covertEndPoint(ep string) network.Endpoint {
|
||||
return network.Endpoint{
|
||||
Url: ep,
|
||||
Auth: network.AuthorizationData{ // Auth is not used for builder.
|
||||
Method: authorization.None,
|
||||
Value: "",
|
||||
}}
|
||||
}
|
||||
|
||||
@@ -12,10 +12,8 @@ import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -270,55 +268,12 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
bidValue := bytesutil.ReverseByteOrder(bid.Value())
|
||||
require.DeepEqual(t, bidValue, value.Bytes())
|
||||
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
|
||||
bundle, err := bid.BlindedBlobsBundle()
|
||||
kcgCommitments, err := bid.BlobKzgCommitments()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(bundle.BlobRoots) <= fieldparams.MaxBlobsPerBlock && len(bundle.BlobRoots) > 0, true)
|
||||
for i := range bundle.BlobRoots {
|
||||
require.Equal(t, len(bundle.BlobRoots[i]) == fieldparams.RootLength, true)
|
||||
require.Equal(t, len(kcgCommitments) > 0, true)
|
||||
for i := range kcgCommitments {
|
||||
require.Equal(t, len(kcgCommitments[i]) == 48, true)
|
||||
}
|
||||
require.Equal(t, len(bundle.KzgCommitments) > 0, true)
|
||||
for i := range bundle.KzgCommitments {
|
||||
require.Equal(t, len(bundle.KzgCommitments[i]) == 48, true)
|
||||
}
|
||||
require.Equal(t, len(bundle.Proofs) > 0, true)
|
||||
for i := range bundle.Proofs {
|
||||
require.Equal(t, len(bundle.Proofs[i]) == 48, true)
|
||||
}
|
||||
})
|
||||
t.Run("deneb, no bundle", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponseDenebNoBundle)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
h, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
|
||||
require.NoError(t, err)
|
||||
expectedWithdrawalsRoot := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
bid, err := h.Message()
|
||||
require.NoError(t, err)
|
||||
bidHeader, err := bid.Header()
|
||||
require.NoError(t, err)
|
||||
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
|
||||
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
|
||||
bidValue := bytesutil.ReverseByteOrder(bid.Value())
|
||||
require.DeepEqual(t, bidValue, value.Bytes())
|
||||
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
|
||||
bundle, err := bid.BlindedBlobsBundle()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, (*v1.BlindedBlobsBundle)(nil), bundle)
|
||||
})
|
||||
|
||||
t.Run("unsupported version", func(t *testing.T) {
|
||||
@@ -362,7 +317,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
}
|
||||
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
|
||||
require.NoError(t, err)
|
||||
ep, _, err := c.SubmitBlindedBlock(ctx, sbbb, nil)
|
||||
ep, _, err := c.SubmitBlindedBlock(ctx, sbbb)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"), ep.ParentHash()))
|
||||
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
@@ -388,7 +343,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
}
|
||||
sbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockCapella(t))
|
||||
require.NoError(t, err)
|
||||
ep, _, err := c.SubmitBlindedBlock(ctx, sbb, nil)
|
||||
ep, _, err := c.SubmitBlindedBlock(ctx, sbb)
|
||||
require.NoError(t, err)
|
||||
withdrawals, err := ep.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
@@ -399,18 +354,17 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
assert.Equal(t, uint64(1), withdrawals[0].Amount)
|
||||
})
|
||||
t.Run("deneb", func(t *testing.T) {
|
||||
|
||||
test := testSignedBlindedBeaconBlockAndBlobsDeneb(t)
|
||||
test := testSignedBlindedBeaconBlockDeneb(t)
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "deneb", r.Header.Get("Eth-Consensus-Version"))
|
||||
var req shared.SignedBlindedBeaconBlockContentsDeneb
|
||||
var req shared.SignedBlindedBeaconBlockDeneb
|
||||
err := json.NewDecoder(r.Body).Decode(&req)
|
||||
require.NoError(t, err)
|
||||
block, err := req.SignedBlindedBlock.ToConsensus()
|
||||
block, err := req.ToConsensus()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, block, test.SignedBlindedBlock)
|
||||
require.DeepEqual(t, block, test)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadDeneb)),
|
||||
@@ -423,10 +377,10 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
|
||||
sbb, err := blocks.NewSignedBeaconBlock(test.SignedBlindedBlock)
|
||||
sbb, err := blocks.NewSignedBeaconBlock(test)
|
||||
require.NoError(t, err)
|
||||
|
||||
ep, blobBundle, err := c.SubmitBlindedBlock(ctx, sbb, test.SignedBlindedBlobSidecars)
|
||||
ep, blobBundle, err := c.SubmitBlindedBlock(ctx, sbb)
|
||||
require.NoError(t, err)
|
||||
withdrawals, err := ep.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
@@ -436,9 +390,6 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
assert.DeepEqual(t, ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943"), withdrawals[0].Address)
|
||||
assert.Equal(t, uint64(1), withdrawals[0].Amount)
|
||||
require.NotNil(t, blobBundle)
|
||||
require.Equal(t, hexutil.Encode(blobBundle.Blobs[0]), hexutil.Encode(make([]byte, fieldparams.BlobLength)))
|
||||
require.Equal(t, hexutil.Encode(blobBundle.KzgCommitments[0]), "0x8dab030c51e16e84be9caab84ee3d0b8bbec1db4a0e4de76439da8424d9b957370a10a78851f97e4b54d2ce1ab0d686f")
|
||||
require.Equal(t, hexutil.Encode(blobBundle.Proofs[0]), "0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a")
|
||||
})
|
||||
t.Run("mismatched versions, expected bellatrix got capella", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
@@ -457,13 +408,13 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
}
|
||||
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
|
||||
require.NoError(t, err)
|
||||
_, _, err = c.SubmitBlindedBlock(ctx, sbbb, nil)
|
||||
_, _, err = c.SubmitBlindedBlock(ctx, sbbb)
|
||||
require.ErrorContains(t, "not a bellatrix payload", err)
|
||||
})
|
||||
t.Run("not blinded", func(t *testing.T) {
|
||||
sbb, err := blocks.NewSignedBeaconBlock(ð.SignedBeaconBlockBellatrix{Block: ð.BeaconBlockBellatrix{Body: ð.BeaconBlockBodyBellatrix{}}})
|
||||
require.NoError(t, err)
|
||||
_, _, err = (&Client{}).SubmitBlindedBlock(ctx, sbb, nil)
|
||||
_, _, err = (&Client{}).SubmitBlindedBlock(ctx, sbb)
|
||||
require.ErrorIs(t, err, errNotBlinded)
|
||||
})
|
||||
}
|
||||
@@ -753,91 +704,70 @@ func testSignedBlindedBeaconBlockCapella(t *testing.T) *eth.SignedBlindedBeaconB
|
||||
}
|
||||
}
|
||||
|
||||
func testSignedBlindedBeaconBlockAndBlobsDeneb(t *testing.T) *eth.SignedBlindedBeaconBlockAndBlobsDeneb {
|
||||
func testSignedBlindedBeaconBlockDeneb(t *testing.T) *eth.SignedBlindedBeaconBlockDeneb {
|
||||
basebytes, err := shared.Uint256ToSSZBytes("14074904626401341155369551180448584754667373453244490859944217516317499064576")
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
return ð.SignedBlindedBeaconBlockAndBlobsDeneb{
|
||||
SignedBlindedBlock: ð.SignedBlindedBeaconBlockDeneb{
|
||||
Message: ð.BlindedBeaconBlockDeneb{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Body: ð.BlindedBeaconBlockBodyDeneb{
|
||||
RandaoReveal: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
Eth1Data: ð.Eth1Data{
|
||||
DepositRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
DepositCount: 1,
|
||||
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Graffiti: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
ProposerSlashings: []*eth.ProposerSlashing{
|
||||
{
|
||||
Header_1: ð.SignedBeaconBlockHeader{
|
||||
Header: ð.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
return ð.SignedBlindedBeaconBlockDeneb{
|
||||
Message: ð.BlindedBeaconBlockDeneb{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Body: ð.BlindedBeaconBlockBodyDeneb{
|
||||
RandaoReveal: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
Eth1Data: ð.Eth1Data{
|
||||
DepositRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
DepositCount: 1,
|
||||
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Graffiti: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
ProposerSlashings: []*eth.ProposerSlashing{
|
||||
{
|
||||
Header_1: ð.SignedBeaconBlockHeader{
|
||||
Header: ð.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Header_2: ð.SignedBeaconBlockHeader{
|
||||
Header: ð.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
Header_2: ð.SignedBeaconBlockHeader{
|
||||
Header: ð.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
},
|
||||
AttesterSlashings: []*eth.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: ð.IndexedAttestation{
|
||||
AttestingIndices: []uint64{1},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
},
|
||||
AttesterSlashings: []*eth.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: ð.IndexedAttestation{
|
||||
AttestingIndices: []uint64{1},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
Attestation_2: ð.IndexedAttestation{
|
||||
AttestingIndices: []uint64{1},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
},
|
||||
Attestations: []*eth.Attestation{
|
||||
{
|
||||
AggregationBits: bitfield.Bitlist{0x01},
|
||||
Attestation_2: ð.IndexedAttestation{
|
||||
AttestingIndices: []uint64{1},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
@@ -854,68 +784,72 @@ func testSignedBlindedBeaconBlockAndBlobsDeneb(t *testing.T) *eth.SignedBlindedB
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
},
|
||||
Deposits: []*eth.Deposit{
|
||||
{
|
||||
Proof: [][]byte{ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")},
|
||||
Data: ð.Deposit_Data{
|
||||
PublicKey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
|
||||
WithdrawalCredentials: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Amount: 1,
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
Attestations: []*eth.Attestation{
|
||||
{
|
||||
AggregationBits: bitfield.Bitlist{0x01},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
VoluntaryExits: []*eth.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ð.VoluntaryExit{
|
||||
Epoch: 1,
|
||||
ValidatorIndex: 1,
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
Deposits: []*eth.Deposit{
|
||||
{
|
||||
Proof: [][]byte{ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")},
|
||||
Data: ð.Deposit_Data{
|
||||
PublicKey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
|
||||
WithdrawalCredentials: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Amount: 1,
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
},
|
||||
SyncAggregate: ð.SyncAggregate{
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
SyncCommitteeBits: ezDecode(t, "0x6451e9f951ebf05edc01de67e593484b672877054f055903ff0df1a1a945cf30ca26bb4d4b154f94a1bc776bcf5d0efb3603e1f9b8ee2499ccdcfe2a18cef458"),
|
||||
},
|
||||
ExecutionPayloadHeader: &v1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
ReceiptsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
LogsBloom: ezDecode(t, "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
|
||||
PrevRandao: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BlockNumber: 1,
|
||||
GasLimit: 1,
|
||||
GasUsed: 1,
|
||||
Timestamp: 1,
|
||||
ExtraData: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BaseFeePerGas: basebytes,
|
||||
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
WithdrawalsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BlobGasUsed: 1,
|
||||
ExcessBlobGas: 2,
|
||||
},
|
||||
VoluntaryExits: []*eth.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ð.VoluntaryExit{
|
||||
Epoch: 1,
|
||||
ValidatorIndex: 1,
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
},
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
SignedBlindedBlobSidecars: []*eth.SignedBlindedBlobSidecar{
|
||||
{
|
||||
Message: ð.BlindedBlobSidecar{
|
||||
BlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Index: 0,
|
||||
Slot: 1,
|
||||
BlockParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
ProposerIndex: 1,
|
||||
BlobRoot: ezDecode(t, "0x24564723180fcb3d994104538d351c8dcbde12d541676bb736cf678018ca4739"),
|
||||
KzgCommitment: ezDecode(t, "0x8dab030c51e16e84be9caab84ee3d0b8bbec1db4a0e4de76439da8424d9b957370a10a78851f97e4b54d2ce1ab0d686f"),
|
||||
KzgProof: ezDecode(t, "0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a"),
|
||||
SyncAggregate: ð.SyncAggregate{
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
SyncCommitteeBits: ezDecode(t, "0x6451e9f951ebf05edc01de67e593484b672877054f055903ff0df1a1a945cf30ca26bb4d4b154f94a1bc776bcf5d0efb3603e1f9b8ee2499ccdcfe2a18cef458"),
|
||||
},
|
||||
ExecutionPayloadHeader: &v1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
ReceiptsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
LogsBloom: ezDecode(t, "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
|
||||
PrevRandao: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BlockNumber: 1,
|
||||
GasLimit: 1,
|
||||
GasUsed: 1,
|
||||
Timestamp: 1,
|
||||
ExtraData: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BaseFeePerGas: basebytes,
|
||||
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
WithdrawalsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BlobGasUsed: 1,
|
||||
ExcessBlobGas: 2,
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ func (m MockClient) RegisterValidator(_ context.Context, svr []*ethpb.SignedVali
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock --
|
||||
func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock, _ []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -117,7 +117,7 @@ type VersionResponse struct {
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
// ExecHeaderResponse is a JSON representation of the builder API header response for Bellatrix.
|
||||
// ExecHeaderResponse is a JSON representation of the builder API header response for Bellatrix.
|
||||
type ExecHeaderResponse struct {
|
||||
Version string `json:"version"`
|
||||
Data struct {
|
||||
@@ -869,16 +869,16 @@ func (bb *BuilderBidDeneb) ToProto() (*eth.BuilderBidDeneb, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var bundle *v1.BlindedBlobsBundle
|
||||
if bb.BlindedBlobsBundle != nil {
|
||||
bundle, err = bb.BlindedBlobsBundle.ToProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
kzgCommitments := make([][]byte, len(bb.BlobKzgCommitments))
|
||||
for i, commit := range bb.BlobKzgCommitments {
|
||||
if len(commit) != fieldparams.BLSPubkeyLength {
|
||||
return nil, fmt.Errorf("commitment length %d is not %d", len(commit), fieldparams.BLSPubkeyLength)
|
||||
}
|
||||
kzgCommitments[i] = bytesutil.SafeCopyBytes(commit)
|
||||
}
|
||||
return ð.BuilderBidDeneb{
|
||||
Header: header,
|
||||
BlindedBlobsBundle: bundle,
|
||||
BlobKzgCommitments: kzgCommitments,
|
||||
Value: bytesutil.SafeCopyBytes(bb.Value.SSZBytes()),
|
||||
Pubkey: bytesutil.SafeCopyBytes(bb.Pubkey),
|
||||
}, nil
|
||||
@@ -887,42 +887,11 @@ func (bb *BuilderBidDeneb) ToProto() (*eth.BuilderBidDeneb, error) {
|
||||
// BuilderBidDeneb is a field of ExecHeaderResponseDeneb.
|
||||
type BuilderBidDeneb struct {
|
||||
Header *ExecutionPayloadHeaderDeneb `json:"header"`
|
||||
BlindedBlobsBundle *BlindedBlobsBundle `json:"blinded_blobs_bundle"`
|
||||
BlobKzgCommitments []hexutil.Bytes `json:"blob_kzg_commitments"`
|
||||
Value Uint256 `json:"value"`
|
||||
Pubkey hexutil.Bytes `json:"pubkey"`
|
||||
}
|
||||
|
||||
// BlindedBlobsBundle is a field of BuilderBidDeneb and represents the blinded blobs of the associated header.
|
||||
type BlindedBlobsBundle struct {
|
||||
KzgCommitments []hexutil.Bytes `json:"commitments"`
|
||||
Proofs []hexutil.Bytes `json:"proofs"`
|
||||
BlobRoots []hexutil.Bytes `json:"blob_roots"`
|
||||
}
|
||||
|
||||
// ToProto creates a BlindedBlobsBundle Proto from BlindedBlobsBundle.
|
||||
func (r *BlindedBlobsBundle) ToProto() (*v1.BlindedBlobsBundle, error) {
|
||||
kzg := make([][]byte, len(r.KzgCommitments))
|
||||
for i := range kzg {
|
||||
kzg[i] = bytesutil.SafeCopyBytes(r.KzgCommitments[i])
|
||||
}
|
||||
|
||||
proofs := make([][]byte, len(r.Proofs))
|
||||
for i := range proofs {
|
||||
proofs[i] = bytesutil.SafeCopyBytes(r.Proofs[i])
|
||||
}
|
||||
|
||||
blobRoots := make([][]byte, len(r.BlobRoots))
|
||||
for i := range blobRoots {
|
||||
blobRoots[i] = bytesutil.SafeCopyBytes(r.BlobRoots[i])
|
||||
}
|
||||
|
||||
return &v1.BlindedBlobsBundle{
|
||||
KzgCommitments: kzg,
|
||||
Proofs: proofs,
|
||||
BlobRoots: blobRoots,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ExecutionPayloadHeaderDeneb a field part of the BuilderBidDeneb.
|
||||
type ExecutionPayloadHeaderDeneb struct {
|
||||
ParentHash hexutil.Bytes `json:"parent_hash"`
|
||||
|
||||
@@ -142,17 +142,9 @@ var testExampleHeaderResponseDeneb = `{
|
||||
"blob_gas_used": "1",
|
||||
"excess_blob_gas": "2"
|
||||
},
|
||||
"blinded_blobs_bundle": {
|
||||
"commitments": [
|
||||
"blob_kzg_commitments": [
|
||||
"0x8dab030c51e16e84be9caab84ee3d0b8bbec1db4a0e4de76439da8424d9b957370a10a78851f97e4b54d2ce1ab0d686f"
|
||||
],
|
||||
"proofs": [
|
||||
"0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a"
|
||||
],
|
||||
"blob_roots": [
|
||||
"0x24564723180fcb3d994104538d351c8dcbde12d541676bb736cf678018ca4739"
|
||||
]
|
||||
},
|
||||
],
|
||||
"value": "652312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
},
|
||||
|
||||
3
api/constants.go
Normal file
3
api/constants.go
Normal file
@@ -0,0 +1,3 @@
|
||||
package api
|
||||
|
||||
const WebUrlPrefix = "/v2/validator/"
|
||||
@@ -14,7 +14,6 @@ go_library(
|
||||
"//validator:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//api/gateway/apimiddleware:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library",
|
||||
@@ -34,7 +33,6 @@ go_test(
|
||||
srcs = ["gateway_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api/gateway/apimiddleware:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"api_middleware.go",
|
||||
"log.go",
|
||||
"param_handling.go",
|
||||
"process_field.go",
|
||||
"process_request.go",
|
||||
"structs.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_wealdtech_go_bytesutil//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"param_handling_test.go",
|
||||
"process_request_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,265 +0,0 @@
|
||||
package apimiddleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
// ApiProxyMiddleware is a proxy between an Ethereum consensus API HTTP client and grpc-gateway.
|
||||
// The purpose of the proxy is to handle HTTP requests and gRPC responses in such a way that:
|
||||
// - Ethereum consensus API requests can be handled by grpc-gateway correctly
|
||||
// - gRPC responses can be returned as spec-compliant Ethereum consensus API responses
|
||||
type ApiProxyMiddleware struct {
|
||||
GatewayAddress string
|
||||
EndpointCreator EndpointFactory
|
||||
Timeout time.Duration
|
||||
router *mux.Router
|
||||
}
|
||||
|
||||
// EndpointFactory is responsible for creating new instances of Endpoint values.
|
||||
type EndpointFactory interface {
|
||||
Create(path string) (*Endpoint, error)
|
||||
Paths() []string
|
||||
IsNil() bool
|
||||
}
|
||||
|
||||
// Endpoint is a representation of an API HTTP endpoint that should be proxied by the middleware.
|
||||
type Endpoint struct {
|
||||
Path string // The path of the HTTP endpoint.
|
||||
GetResponse interface{} // The struct corresponding to the JSON structure used in a GET response.
|
||||
PostRequest interface{} // The struct corresponding to the JSON structure used in a POST request.
|
||||
PostResponse interface{} // The struct corresponding to the JSON structure used in a POST response.
|
||||
DeleteRequest interface{} // The struct corresponding to the JSON structure used in a DELETE request.
|
||||
DeleteResponse interface{} // The struct corresponding to the JSON structure used in a DELETE response.
|
||||
RequestURLLiterals []string // Names of URL parameters that should not be base64-encoded.
|
||||
RequestQueryParams []QueryParam // Query parameters of the request.
|
||||
Err ErrorJson // The struct corresponding to the error that should be returned in case of a request failure.
|
||||
Hooks HookCollection // A collection of functions that can be invoked at various stages of the request/response cycle.
|
||||
CustomHandlers []CustomHandler // Functions that will be executed instead of the default request/response behaviour.
|
||||
}
|
||||
|
||||
// RunDefault expresses whether the default processing logic should be carried out after running a pre hook.
|
||||
type RunDefault bool
|
||||
|
||||
// DefaultEndpoint returns an Endpoint with default configuration, e.g. DefaultErrorJson for error handling.
|
||||
func DefaultEndpoint() Endpoint {
|
||||
return Endpoint{
|
||||
Err: &DefaultErrorJson{},
|
||||
}
|
||||
}
|
||||
|
||||
// QueryParam represents a single query parameter's metadata.
|
||||
type QueryParam struct {
|
||||
Name string
|
||||
Hex bool
|
||||
Enum bool
|
||||
}
|
||||
|
||||
// CustomHandler is a function that can be invoked at the very beginning of the request,
|
||||
// essentially replacing the whole default request/response logic with custom logic for a specific endpoint.
|
||||
type CustomHandler = func(m *ApiProxyMiddleware, endpoint Endpoint, w http.ResponseWriter, req *http.Request) (handled bool)
|
||||
|
||||
// HookCollection contains hooks that can be used to amend the default request/response cycle with custom logic for a specific endpoint.
|
||||
type HookCollection struct {
|
||||
OnPreDeserializeRequestBodyIntoContainer func(endpoint *Endpoint, w http.ResponseWriter, req *http.Request) (RunDefault, ErrorJson)
|
||||
OnPostDeserializeRequestBodyIntoContainer func(endpoint *Endpoint, w http.ResponseWriter, req *http.Request) ErrorJson
|
||||
OnPreDeserializeGrpcResponseBodyIntoContainer func([]byte, interface{}) (RunDefault, ErrorJson)
|
||||
OnPreSerializeMiddlewareResponseIntoJson func(interface{}) (RunDefault, []byte, ErrorJson)
|
||||
}
|
||||
|
||||
// fieldProcessor applies the processing function f to a value when the tag is present on the field.
|
||||
type fieldProcessor struct {
|
||||
tag string
|
||||
f func(value reflect.Value) error
|
||||
}
|
||||
|
||||
// Run starts the proxy, registering all proxy endpoints.
|
||||
func (m *ApiProxyMiddleware) Run(gatewayRouter *mux.Router) {
|
||||
for _, path := range m.EndpointCreator.Paths() {
|
||||
gatewayRouter.HandleFunc(path, m.WithMiddleware(path))
|
||||
}
|
||||
m.router = gatewayRouter
|
||||
}
|
||||
|
||||
// ServeHTTP for the proxy middleware.
|
||||
func (m *ApiProxyMiddleware) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
m.router.ServeHTTP(w, req)
|
||||
}
|
||||
|
||||
// WithMiddleware wraps the given endpoint handler with the middleware logic.
|
||||
func (m *ApiProxyMiddleware) WithMiddleware(path string) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, req *http.Request) {
|
||||
endpoint, err := m.EndpointCreator.Create(path)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Could not create endpoint for path: %s", path)
|
||||
return
|
||||
}
|
||||
|
||||
for _, handler := range endpoint.CustomHandlers {
|
||||
if handler(m, *endpoint, w, req) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if req.Method == "POST" {
|
||||
if errJson := handlePostRequestForEndpoint(endpoint, w, req); errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if req.Method == "DELETE" && req.Body != http.NoBody {
|
||||
if errJson := handleDeleteRequestForEndpoint(endpoint, req); errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if errJson := m.PrepareRequestForProxying(*endpoint, req); errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
grpcResp, errJson := m.ProxyRequest(req)
|
||||
if errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
grpcRespBody, errJson := ReadGrpcResponseBody(grpcResp.Body)
|
||||
if errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
|
||||
var respJson []byte
|
||||
if !GrpcResponseIsEmpty(grpcRespBody) {
|
||||
respHasError, errJson := HandleGrpcResponseError(endpoint.Err, grpcResp, grpcRespBody, w)
|
||||
if errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
if respHasError {
|
||||
return
|
||||
}
|
||||
|
||||
var resp interface{}
|
||||
if req.Method == "GET" {
|
||||
resp = endpoint.GetResponse
|
||||
} else if req.Method == "DELETE" {
|
||||
resp = endpoint.DeleteResponse
|
||||
} else {
|
||||
resp = endpoint.PostResponse
|
||||
}
|
||||
if errJson := deserializeGrpcResponseBodyIntoContainerWrapped(endpoint, grpcRespBody, resp); errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
if errJson := ProcessMiddlewareResponseFields(resp); errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
|
||||
respJson, errJson = serializeMiddlewareResponseIntoJsonWrapped(endpoint, respJson, resp)
|
||||
if errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if errJson := WriteMiddlewareResponseHeadersAndBody(grpcResp, respJson, w); errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
if errJson := Cleanup(grpcResp.Body); errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func handlePostRequestForEndpoint(endpoint *Endpoint, w http.ResponseWriter, req *http.Request) ErrorJson {
|
||||
if errJson := deserializeRequestBodyIntoContainerWrapped(endpoint, req, w); errJson != nil {
|
||||
return errJson
|
||||
}
|
||||
if errJson := ProcessRequestContainerFields(endpoint.PostRequest); errJson != nil {
|
||||
return errJson
|
||||
}
|
||||
return SetRequestBodyToRequestContainer(endpoint.PostRequest, req)
|
||||
}
|
||||
|
||||
func handleDeleteRequestForEndpoint(endpoint *Endpoint, req *http.Request) ErrorJson {
|
||||
if errJson := DeserializeRequestBodyIntoContainer(req.Body, endpoint.DeleteRequest); errJson != nil {
|
||||
return errJson
|
||||
}
|
||||
if errJson := ProcessRequestContainerFields(endpoint.DeleteRequest); errJson != nil {
|
||||
return errJson
|
||||
}
|
||||
return SetRequestBodyToRequestContainer(endpoint.DeleteRequest, req)
|
||||
}
|
||||
|
||||
func deserializeRequestBodyIntoContainerWrapped(endpoint *Endpoint, req *http.Request, w http.ResponseWriter) ErrorJson {
|
||||
runDefault := true
|
||||
if endpoint.Hooks.OnPreDeserializeRequestBodyIntoContainer != nil {
|
||||
run, errJson := endpoint.Hooks.OnPreDeserializeRequestBodyIntoContainer(endpoint, w, req)
|
||||
if errJson != nil {
|
||||
return errJson
|
||||
}
|
||||
if !run {
|
||||
runDefault = false
|
||||
}
|
||||
}
|
||||
if runDefault {
|
||||
if errJson := DeserializeRequestBodyIntoContainer(req.Body, endpoint.PostRequest); errJson != nil {
|
||||
return errJson
|
||||
}
|
||||
}
|
||||
if endpoint.Hooks.OnPostDeserializeRequestBodyIntoContainer != nil {
|
||||
if errJson := endpoint.Hooks.OnPostDeserializeRequestBodyIntoContainer(endpoint, w, req); errJson != nil {
|
||||
return errJson
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deserializeGrpcResponseBodyIntoContainerWrapped(endpoint *Endpoint, grpcResponseBody []byte, resp interface{}) ErrorJson {
|
||||
runDefault := true
|
||||
if endpoint.Hooks.OnPreDeserializeGrpcResponseBodyIntoContainer != nil {
|
||||
run, errJson := endpoint.Hooks.OnPreDeserializeGrpcResponseBodyIntoContainer(grpcResponseBody, resp)
|
||||
if errJson != nil {
|
||||
return errJson
|
||||
}
|
||||
if !run {
|
||||
runDefault = false
|
||||
}
|
||||
}
|
||||
if runDefault {
|
||||
if errJson := DeserializeGrpcResponseBodyIntoContainer(grpcResponseBody, resp); errJson != nil {
|
||||
return errJson
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func serializeMiddlewareResponseIntoJsonWrapped(endpoint *Endpoint, respJson []byte, resp interface{}) ([]byte, ErrorJson) {
|
||||
runDefault := true
|
||||
var errJson ErrorJson
|
||||
if endpoint.Hooks.OnPreSerializeMiddlewareResponseIntoJson != nil {
|
||||
var run RunDefault
|
||||
run, respJson, errJson = endpoint.Hooks.OnPreSerializeMiddlewareResponseIntoJson(resp)
|
||||
if errJson != nil {
|
||||
return nil, errJson
|
||||
}
|
||||
if !run {
|
||||
runDefault = false
|
||||
}
|
||||
}
|
||||
if runDefault {
|
||||
respJson, errJson = SerializeMiddlewareResponseIntoJson(resp)
|
||||
if errJson != nil {
|
||||
return nil, errJson
|
||||
}
|
||||
}
|
||||
return respJson, nil
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
package apimiddleware
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "apimiddleware")
|
||||
@@ -1,103 +0,0 @@
|
||||
package apimiddleware
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
butil "github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/wealdtech/go-bytesutil"
|
||||
)
|
||||
|
||||
// HandleURLParameters processes URL parameters, allowing parameterized URLs to be safely and correctly proxied to grpc-gateway.
|
||||
func HandleURLParameters(url string, req *http.Request, literals []string) ErrorJson {
|
||||
segments := strings.Split(url, "/")
|
||||
|
||||
segmentsLoop:
|
||||
for i, s := range segments {
|
||||
// We only care about segments which are parameterized.
|
||||
if isRequestParam(s) {
|
||||
// Don't do anything with parameters which should be forwarded literally to gRPC.
|
||||
for _, l := range literals {
|
||||
if s == "{"+l+"}" {
|
||||
continue segmentsLoop
|
||||
}
|
||||
}
|
||||
|
||||
routeVar := mux.Vars(req)[s[1:len(s)-1]]
|
||||
bRouteVar := []byte(routeVar)
|
||||
if butil.IsHex(bRouteVar) {
|
||||
var err error
|
||||
bRouteVar, err = bytesutil.FromHexString(string(bRouteVar))
|
||||
if err != nil {
|
||||
return InternalServerErrorWithMessage(err, "could not process URL parameter")
|
||||
}
|
||||
}
|
||||
// Converting hex to base64 may result in a value which malforms the URL.
|
||||
// We use URLEncoding to safely escape such values.
|
||||
base64RouteVar := base64.URLEncoding.EncodeToString(bRouteVar)
|
||||
|
||||
// Merge segments back into the full URL.
|
||||
splitPath := strings.Split(req.URL.Path, "/")
|
||||
splitPath[i] = base64RouteVar
|
||||
req.URL.Path = strings.Join(splitPath, "/")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// HandleQueryParameters processes query parameters, allowing them to be safely and correctly proxied to grpc-gateway.
|
||||
func HandleQueryParameters(req *http.Request, params []QueryParam) ErrorJson {
|
||||
queryParams := req.URL.Query()
|
||||
|
||||
normalizeQueryValues(queryParams)
|
||||
|
||||
for key, vals := range queryParams {
|
||||
for _, p := range params {
|
||||
if key == p.Name {
|
||||
if p.Hex {
|
||||
queryParams.Del(key)
|
||||
for _, v := range vals {
|
||||
b := []byte(v)
|
||||
if butil.IsHex(b) {
|
||||
var err error
|
||||
b, err = bytesutil.FromHexString(v)
|
||||
if err != nil {
|
||||
return InternalServerErrorWithMessage(err, "could not process query parameter")
|
||||
}
|
||||
}
|
||||
queryParams.Add(key, base64.URLEncoding.EncodeToString(b))
|
||||
}
|
||||
}
|
||||
if p.Enum {
|
||||
queryParams.Del(key)
|
||||
for _, v := range vals {
|
||||
// gRPC expects uppercase enum values.
|
||||
queryParams.Add(key, strings.ToUpper(v))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
req.URL.RawQuery = queryParams.Encode()
|
||||
return nil
|
||||
}
|
||||
|
||||
// isRequestParam verifies whether the passed string is a request parameter.
|
||||
// Request parameters are enclosed in { and }.
|
||||
func isRequestParam(s string) bool {
|
||||
return len(s) > 2 && s[0] == '{' && s[len(s)-1] == '}'
|
||||
}
|
||||
|
||||
func normalizeQueryValues(queryParams url.Values) {
|
||||
// Replace comma-separated values with individual values.
|
||||
for key, vals := range queryParams {
|
||||
splitVals := make([]string, 0)
|
||||
for _, v := range vals {
|
||||
splitVals = append(splitVals, strings.Split(v, ",")...)
|
||||
}
|
||||
queryParams[key] = splitVals
|
||||
}
|
||||
}
|
||||
@@ -1,124 +0,0 @@
|
||||
package apimiddleware
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestHandleURLParameters(t *testing.T) {
|
||||
var body bytes.Buffer
|
||||
|
||||
t.Run("no_params", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example/bar", &body)
|
||||
|
||||
errJson := HandleURLParameters("/not_param", request, []string{})
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, "/bar", request.URL.Path)
|
||||
})
|
||||
|
||||
t.Run("with_params", func(t *testing.T) {
|
||||
muxVars := make(map[string]string)
|
||||
muxVars["bar_param"] = "bar"
|
||||
muxVars["quux_param"] = "quux"
|
||||
request := httptest.NewRequest("GET", "http://foo.example/bar/baz/quux", &body)
|
||||
request = mux.SetURLVars(request, muxVars)
|
||||
|
||||
errJson := HandleURLParameters("/{bar_param}/not_param/{quux_param}", request, []string{})
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, "/YmFy/baz/cXV1eA==", request.URL.Path)
|
||||
})
|
||||
|
||||
t.Run("with_literal", func(t *testing.T) {
|
||||
muxVars := make(map[string]string)
|
||||
muxVars["bar_param"] = "bar"
|
||||
request := httptest.NewRequest("GET", "http://foo.example/bar/baz", &body)
|
||||
request = mux.SetURLVars(request, muxVars)
|
||||
|
||||
errJson := HandleURLParameters("/{bar_param}/not_param/", request, []string{"bar_param"})
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, "/bar/baz", request.URL.Path)
|
||||
})
|
||||
|
||||
t.Run("with_hex", func(t *testing.T) {
|
||||
muxVars := make(map[string]string)
|
||||
muxVars["hex_param"] = "0x626172"
|
||||
request := httptest.NewRequest("GET", "http://foo.example/0x626172/baz", &body)
|
||||
request = mux.SetURLVars(request, muxVars)
|
||||
|
||||
errJson := HandleURLParameters("/{hex_param}/not_param/", request, []string{})
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, "/YmFy/baz", request.URL.Path)
|
||||
})
|
||||
}
|
||||
|
||||
func TestHandleQueryParameters(t *testing.T) {
|
||||
var body bytes.Buffer
|
||||
|
||||
t.Run("regular_params", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example?bar=bar&baz=baz", &body)
|
||||
|
||||
errJson := HandleQueryParameters(request, []QueryParam{{Name: "bar"}, {Name: "baz"}})
|
||||
require.Equal(t, true, errJson == nil)
|
||||
query := request.URL.Query()
|
||||
v, ok := query["bar"]
|
||||
require.Equal(t, true, ok, "query param not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of query param values")
|
||||
assert.Equal(t, "bar", v[0])
|
||||
v, ok = query["baz"]
|
||||
require.Equal(t, true, ok, "query param not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of query param values")
|
||||
assert.Equal(t, "baz", v[0])
|
||||
})
|
||||
|
||||
t.Run("hex_and_enum_params", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example?hex=0x626172&baz=baz", &body)
|
||||
|
||||
errJson := HandleQueryParameters(request, []QueryParam{{Name: "hex", Hex: true}, {Name: "baz", Enum: true}})
|
||||
require.Equal(t, true, errJson == nil)
|
||||
query := request.URL.Query()
|
||||
v, ok := query["hex"]
|
||||
require.Equal(t, true, ok, "query param not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of query param values")
|
||||
assert.Equal(t, "YmFy", v[0])
|
||||
v, ok = query["baz"]
|
||||
require.Equal(t, true, ok, "query param not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of query param values")
|
||||
assert.Equal(t, "BAZ", v[0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsRequestParam(t *testing.T) {
|
||||
tests := []struct {
|
||||
s string
|
||||
b bool
|
||||
}{
|
||||
{"", false},
|
||||
{"{", false},
|
||||
{"}", false},
|
||||
{"{}", false},
|
||||
{"{x}", true},
|
||||
{"{very_long_parameter_name_with_underscores}", true},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
b := isRequestParam(tt.s)
|
||||
assert.Equal(t, tt.b, b)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeQueryValues(t *testing.T) {
|
||||
input := make(map[string][]string)
|
||||
input["key"] = []string{"value1", "value2,value3,value4", "value5"}
|
||||
|
||||
normalizeQueryValues(input)
|
||||
require.Equal(t, 5, len(input["key"]))
|
||||
assert.Equal(t, "value1", input["key"][0])
|
||||
assert.Equal(t, "value2", input["key"][1])
|
||||
assert.Equal(t, "value3", input["key"][2])
|
||||
assert.Equal(t, "value4", input["key"][3])
|
||||
assert.Equal(t, "value5", input["key"][4])
|
||||
}
|
||||
@@ -1,179 +0,0 @@
|
||||
package apimiddleware
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/wealdtech/go-bytesutil"
|
||||
)
|
||||
|
||||
// processField calls each processor function on any field that has the matching tag set.
|
||||
// It is a recursive function.
|
||||
func processField(s interface{}, processors []fieldProcessor) error {
|
||||
kind := reflect.TypeOf(s).Kind()
|
||||
if kind != reflect.Ptr && kind != reflect.Slice && kind != reflect.Array {
|
||||
return fmt.Errorf("processing fields of kind '%v' is unsupported", kind)
|
||||
}
|
||||
|
||||
t := reflect.TypeOf(s).Elem()
|
||||
v := reflect.Indirect(reflect.ValueOf(s))
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
switch v.Field(i).Kind() {
|
||||
case reflect.Slice:
|
||||
sliceElem := t.Field(i).Type.Elem()
|
||||
kind := sliceElem.Kind()
|
||||
// Recursively process slices to struct pointers.
|
||||
switch {
|
||||
case kind == reflect.Ptr && sliceElem.Elem().Kind() == reflect.Struct:
|
||||
for j := 0; j < v.Field(i).Len(); j++ {
|
||||
if err := processField(v.Field(i).Index(j).Interface(), processors); err != nil {
|
||||
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
|
||||
}
|
||||
}
|
||||
// Process each string in string slices.
|
||||
case kind == reflect.String:
|
||||
for _, proc := range processors {
|
||||
_, hasTag := t.Field(i).Tag.Lookup(proc.tag)
|
||||
if !hasTag {
|
||||
continue
|
||||
}
|
||||
for j := 0; j < v.Field(i).Len(); j++ {
|
||||
if err := proc.f(v.Field(i).Index(j)); err != nil {
|
||||
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Recursively process struct pointers.
|
||||
case reflect.Ptr:
|
||||
if v.Field(i).Elem().Kind() == reflect.Struct {
|
||||
if err := processField(v.Field(i).Interface(), processors); err != nil {
|
||||
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
|
||||
}
|
||||
}
|
||||
default:
|
||||
field := t.Field(i)
|
||||
for _, proc := range processors {
|
||||
if _, hasTag := field.Tag.Lookup(proc.tag); hasTag {
|
||||
if err := proc.f(v.Field(i)); err != nil {
|
||||
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func hexToBase64Processor(v reflect.Value) error {
|
||||
if v.String() == "0x" {
|
||||
v.SetString("")
|
||||
return nil
|
||||
}
|
||||
b, err := bytesutil.FromHexString(v.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.SetString(base64.StdEncoding.EncodeToString(b))
|
||||
return nil
|
||||
}
|
||||
|
||||
func base64ToHexProcessor(v reflect.Value) error {
|
||||
if v.String() == "" {
|
||||
// Empty hex values are represented as "0x".
|
||||
v.SetString("0x")
|
||||
return nil
|
||||
}
|
||||
b, err := base64.StdEncoding.DecodeString(v.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.SetString(hexutil.Encode(b))
|
||||
return nil
|
||||
}
|
||||
|
||||
func base64ToChecksumAddressProcessor(v reflect.Value) error {
|
||||
if v.String() == "" {
|
||||
// Empty hex values are represented as "0x".
|
||||
v.SetString("0x")
|
||||
return nil
|
||||
}
|
||||
b, err := base64.StdEncoding.DecodeString(v.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.SetString(common.BytesToAddress(b).Hex())
|
||||
return nil
|
||||
}
|
||||
|
||||
func base64ToUint256Processor(v reflect.Value) error {
|
||||
if v.String() == "" {
|
||||
return nil
|
||||
}
|
||||
littleEndian, err := base64.StdEncoding.DecodeString(v.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(littleEndian) != 32 {
|
||||
return errors.New("invalid length for Uint256")
|
||||
}
|
||||
|
||||
// Integers are stored as little-endian, but
|
||||
// big.Int expects big-endian. So we need to reverse
|
||||
// the byte order before decoding.
|
||||
var bigEndian [32]byte
|
||||
for i := 0; i < len(littleEndian); i++ {
|
||||
bigEndian[i] = littleEndian[len(littleEndian)-1-i]
|
||||
}
|
||||
var uint256 big.Int
|
||||
uint256.SetBytes(bigEndian[:])
|
||||
v.SetString(uint256.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
func uint256ToBase64Processor(v reflect.Value) error {
|
||||
if v.String() == "" {
|
||||
return nil
|
||||
}
|
||||
uint256, ok := new(big.Int).SetString(v.String(), 10)
|
||||
if !ok {
|
||||
return fmt.Errorf("could not parse Uint256")
|
||||
}
|
||||
bigEndian := uint256.Bytes()
|
||||
if len(bigEndian) > 32 {
|
||||
return fmt.Errorf("number too big for Uint256")
|
||||
}
|
||||
|
||||
// Integers are stored as little-endian, but
|
||||
// big.Int gives big-endian. So we need to reverse
|
||||
// the byte order before encoding.
|
||||
var littleEndian [32]byte
|
||||
for i := 0; i < len(bigEndian); i++ {
|
||||
littleEndian[i] = bigEndian[len(bigEndian)-1-i]
|
||||
}
|
||||
v.SetString(base64.StdEncoding.EncodeToString(littleEndian[:]))
|
||||
return nil
|
||||
}
|
||||
|
||||
func enumToLowercaseProcessor(v reflect.Value) error {
|
||||
v.SetString(strings.ToLower(v.String()))
|
||||
return nil
|
||||
}
|
||||
|
||||
func timeToUnixProcessor(v reflect.Value) error {
|
||||
t, err := time.Parse(time.RFC3339, v.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.SetString(strconv.FormatUint(uint64(t.Unix()), 10))
|
||||
return nil
|
||||
}
|
||||
@@ -1,283 +0,0 @@
|
||||
package apimiddleware
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/grpc"
|
||||
)
|
||||
|
||||
// DeserializeRequestBodyIntoContainer deserializes the request's body into an endpoint-specific struct.
|
||||
func DeserializeRequestBodyIntoContainer(body io.Reader, requestContainer interface{}) ErrorJson {
|
||||
decoder := json.NewDecoder(body)
|
||||
decoder.DisallowUnknownFields()
|
||||
if err := decoder.Decode(&requestContainer); err != nil {
|
||||
if strings.Contains(err.Error(), "json: unknown field") {
|
||||
e := errors.Wrap(err, "could not decode request body")
|
||||
return &DefaultErrorJson{
|
||||
Message: e.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
return InternalServerErrorWithMessage(err, "could not decode request body")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessRequestContainerFields processes fields of an endpoint-specific container according to field tags.
|
||||
func ProcessRequestContainerFields(requestContainer interface{}) ErrorJson {
|
||||
if err := processField(requestContainer, []fieldProcessor{
|
||||
{
|
||||
tag: "hex",
|
||||
f: hexToBase64Processor,
|
||||
},
|
||||
{
|
||||
tag: "uint256",
|
||||
f: uint256ToBase64Processor,
|
||||
},
|
||||
}); err != nil {
|
||||
return InternalServerErrorWithMessage(err, "could not process request data")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetRequestBodyToRequestContainer makes the endpoint-specific container the new body of the request.
|
||||
func SetRequestBodyToRequestContainer(requestContainer interface{}, req *http.Request) ErrorJson {
|
||||
// Serialize the struct, which now includes a base64-encoded value, into JSON.
|
||||
j, err := json.Marshal(requestContainer)
|
||||
if err != nil {
|
||||
return InternalServerErrorWithMessage(err, "could not marshal request")
|
||||
}
|
||||
// Set the body to the new JSON.
|
||||
req.Body = io.NopCloser(bytes.NewReader(j))
|
||||
req.Header.Set("Content-Length", strconv.Itoa(len(j)))
|
||||
req.ContentLength = int64(len(j))
|
||||
return nil
|
||||
}
|
||||
|
||||
// PrepareRequestForProxying applies additional logic to the request so that it can be correctly proxied to grpc-gateway.
|
||||
func (m *ApiProxyMiddleware) PrepareRequestForProxying(endpoint Endpoint, req *http.Request) ErrorJson {
|
||||
req.URL.Scheme = "http"
|
||||
req.URL.Host = m.GatewayAddress
|
||||
req.RequestURI = ""
|
||||
if errJson := HandleURLParameters(endpoint.Path, req, endpoint.RequestURLLiterals); errJson != nil {
|
||||
return errJson
|
||||
}
|
||||
if errJson := HandleQueryParameters(req, endpoint.RequestQueryParams); errJson != nil {
|
||||
return errJson
|
||||
}
|
||||
// We have to add the prefix after handling parameters because adding the prefix changes URL segment indexing.
|
||||
req.URL.Path = "/internal" + req.URL.Path
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProxyRequest proxies the request to grpc-gateway.
|
||||
func (m *ApiProxyMiddleware) ProxyRequest(req *http.Request) (*http.Response, ErrorJson) {
|
||||
// We do not use http.DefaultClient because it does not have any timeout.
|
||||
netClient := &http.Client{Timeout: m.Timeout}
|
||||
grpcResp, err := netClient.Do(req)
|
||||
if err != nil {
|
||||
if err, ok := err.(net.Error); ok && err.Timeout() {
|
||||
return nil, TimeoutError()
|
||||
}
|
||||
return nil, InternalServerErrorWithMessage(err, "could not proxy request")
|
||||
}
|
||||
if grpcResp == nil {
|
||||
return nil, &DefaultErrorJson{Message: "nil response from gRPC-gateway", Code: http.StatusInternalServerError}
|
||||
}
|
||||
return grpcResp, nil
|
||||
}
|
||||
|
||||
// ReadGrpcResponseBody reads the body from the grpc-gateway's response.
|
||||
func ReadGrpcResponseBody(r io.Reader) ([]byte, ErrorJson) {
|
||||
body, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, InternalServerErrorWithMessage(err, "could not read response body")
|
||||
}
|
||||
return body, nil
|
||||
}
|
||||
|
||||
// HandleGrpcResponseError acts on an error that resulted from a grpc-gateway's response.
|
||||
// Whether there was an error is indicated by the bool return value. In case of an error,
|
||||
// there is no need to write to the response because it's taken care of by the function.
|
||||
func HandleGrpcResponseError(errJson ErrorJson, resp *http.Response, respBody []byte, w http.ResponseWriter) (bool, ErrorJson) {
|
||||
responseHasError := false
|
||||
if err := json.Unmarshal(respBody, errJson); err != nil {
|
||||
return false, InternalServerErrorWithMessage(err, "could not unmarshal error")
|
||||
}
|
||||
if errJson.Msg() != "" {
|
||||
responseHasError = true
|
||||
// Something went wrong, but the request completed, meaning we can write headers and the error message.
|
||||
for h, vs := range resp.Header {
|
||||
for _, v := range vs {
|
||||
if strings.HasSuffix(h, api.VersionHeader) {
|
||||
w.Header().Set(api.VersionHeader, v)
|
||||
} else {
|
||||
w.Header().Set(h, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Handle gRPC timeout.
|
||||
if resp.StatusCode == http.StatusGatewayTimeout {
|
||||
WriteError(w, TimeoutError(), resp.Header)
|
||||
} else {
|
||||
// Set code to HTTP code because unmarshalled body contained gRPC code.
|
||||
errJson.SetCode(resp.StatusCode)
|
||||
WriteError(w, errJson, resp.Header)
|
||||
}
|
||||
}
|
||||
return responseHasError, nil
|
||||
}
|
||||
|
||||
// GrpcResponseIsEmpty determines whether the grpc-gateway's response body contains no data.
|
||||
func GrpcResponseIsEmpty(grpcResponseBody []byte) bool {
|
||||
return len(grpcResponseBody) == 0 || string(grpcResponseBody) == "{}"
|
||||
}
|
||||
|
||||
// DeserializeGrpcResponseBodyIntoContainer deserializes the grpc-gateway's response body into an endpoint-specific struct.
|
||||
func DeserializeGrpcResponseBodyIntoContainer(body []byte, responseContainer interface{}) ErrorJson {
|
||||
if err := json.Unmarshal(body, &responseContainer); err != nil {
|
||||
return InternalServerErrorWithMessage(err, "could not unmarshal response")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessMiddlewareResponseFields processes fields of an endpoint-specific container according to field tags.
|
||||
func ProcessMiddlewareResponseFields(responseContainer interface{}) ErrorJson {
|
||||
if err := processField(responseContainer, []fieldProcessor{
|
||||
{
|
||||
tag: "hex",
|
||||
f: base64ToHexProcessor,
|
||||
},
|
||||
{
|
||||
tag: "address",
|
||||
f: base64ToChecksumAddressProcessor,
|
||||
},
|
||||
{
|
||||
tag: "enum",
|
||||
f: enumToLowercaseProcessor,
|
||||
},
|
||||
{
|
||||
tag: "time",
|
||||
f: timeToUnixProcessor,
|
||||
},
|
||||
{
|
||||
tag: "uint256",
|
||||
f: base64ToUint256Processor,
|
||||
},
|
||||
}); err != nil {
|
||||
return InternalServerErrorWithMessage(err, "could not process response data")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SerializeMiddlewareResponseIntoJson serializes the endpoint-specific response struct into a JSON representation.
|
||||
func SerializeMiddlewareResponseIntoJson(responseContainer interface{}) (jsonResponse []byte, errJson ErrorJson) {
|
||||
j, err := json.Marshal(responseContainer)
|
||||
if err != nil {
|
||||
return nil, InternalServerErrorWithMessage(err, "could not marshal response")
|
||||
}
|
||||
return j, nil
|
||||
}
|
||||
|
||||
// WriteMiddlewareResponseHeadersAndBody populates headers and the body of the final response.
|
||||
func WriteMiddlewareResponseHeadersAndBody(grpcResp *http.Response, responseJson []byte, w http.ResponseWriter) ErrorJson {
|
||||
var statusCodeHeader string
|
||||
for h, vs := range grpcResp.Header {
|
||||
// We don't want to expose any gRPC metadata in the HTTP response, so we skip forwarding metadata headers.
|
||||
if strings.HasPrefix(h, grpc.MetadataPrefix) {
|
||||
if h == grpc.WithPrefix(grpc.HttpCodeMetadataKey) {
|
||||
statusCodeHeader = vs[0]
|
||||
} else if strings.HasSuffix(h, api.VersionHeader) {
|
||||
w.Header().Set(api.VersionHeader, vs[0])
|
||||
}
|
||||
} else {
|
||||
for _, v := range vs {
|
||||
w.Header().Set(h, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !GrpcResponseIsEmpty(responseJson) {
|
||||
w.Header().Set("Content-Length", strconv.Itoa(len(responseJson)))
|
||||
if statusCodeHeader != "" {
|
||||
code, err := strconv.Atoi(statusCodeHeader)
|
||||
if err != nil {
|
||||
return InternalServerErrorWithMessage(err, "could not parse status code")
|
||||
}
|
||||
w.WriteHeader(code)
|
||||
} else {
|
||||
w.WriteHeader(grpcResp.StatusCode)
|
||||
}
|
||||
if _, err := io.Copy(w, io.NopCloser(bytes.NewReader(responseJson))); err != nil {
|
||||
return InternalServerErrorWithMessage(err, "could not write response message")
|
||||
}
|
||||
} else {
|
||||
w.Header().Set("Content-Length", "0")
|
||||
w.WriteHeader(grpcResp.StatusCode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteError writes the error by manipulating headers and the body of the final response.
|
||||
func WriteError(w http.ResponseWriter, errJson ErrorJson, responseHeader http.Header) {
|
||||
// Include custom error in the error JSON.
|
||||
hasCustomError := false
|
||||
if responseHeader != nil {
|
||||
customError, ok := responseHeader[grpc.WithPrefix(grpc.CustomErrorMetadataKey)]
|
||||
if ok {
|
||||
hasCustomError = true
|
||||
// Assume header has only one value and read the 0 index.
|
||||
if err := json.Unmarshal([]byte(customError[0]), errJson); err != nil {
|
||||
log.WithError(err).Error("Could not unmarshal custom error message")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var j []byte
|
||||
if hasCustomError {
|
||||
var err error
|
||||
j, err = json.Marshal(errJson)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not marshal error message")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
// We marshal the response body into a DefaultErrorJson if the custom error is not present.
|
||||
// This is because the ErrorJson argument is the endpoint's error definition, which may contain custom fields.
|
||||
// In such a scenario marhaling the endpoint's error would populate the resulting JSON
|
||||
// with these fields even if they are not present in the gRPC header.
|
||||
d := &DefaultErrorJson{
|
||||
Message: errJson.Msg(),
|
||||
Code: errJson.StatusCode(),
|
||||
}
|
||||
j, err = json.Marshal(d)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not marshal error message")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Length", strconv.Itoa(len(j)))
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(errJson.StatusCode())
|
||||
if _, err := io.Copy(w, io.NopCloser(bytes.NewReader(j))); err != nil {
|
||||
log.WithError(err).Error("Could not write error message")
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup performs final cleanup on the initial response from grpc-gateway.
|
||||
func Cleanup(grpcResponseBody io.ReadCloser) ErrorJson {
|
||||
if err := grpcResponseBody.Close(); err != nil {
|
||||
return InternalServerErrorWithMessage(err, "could not close response body")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,435 +0,0 @@
|
||||
package apimiddleware
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/grpc"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
type testRequestContainer struct {
|
||||
TestString string
|
||||
TestHexString string `hex:"true"`
|
||||
TestEmptyHexString string `hex:"true"`
|
||||
TestUint256String string `uint256:"true"`
|
||||
}
|
||||
|
||||
func defaultRequestContainer() *testRequestContainer {
|
||||
return &testRequestContainer{
|
||||
TestString: "test string",
|
||||
TestHexString: "0x666F6F", // hex encoding of "foo"
|
||||
TestEmptyHexString: "0x",
|
||||
TestUint256String: "4196",
|
||||
}
|
||||
}
|
||||
|
||||
type testResponseContainer struct {
|
||||
TestString string
|
||||
TestHex string `hex:"true"`
|
||||
TestEmptyHex string `hex:"true"`
|
||||
TestAddress string `address:"true"`
|
||||
TestEmptyAddress string `address:"true"`
|
||||
TestUint256 string `uint256:"true"`
|
||||
TestEnum string `enum:"true"`
|
||||
TestTime string `time:"true"`
|
||||
}
|
||||
|
||||
func defaultResponseContainer() *testResponseContainer {
|
||||
return &testResponseContainer{
|
||||
TestString: "test string",
|
||||
TestHex: "Zm9v", // base64 encoding of "foo"
|
||||
TestEmptyHex: "",
|
||||
TestAddress: "Zm9v",
|
||||
TestEmptyAddress: "",
|
||||
TestEnum: "Test Enum",
|
||||
TestTime: "2006-01-02T15:04:05Z",
|
||||
|
||||
// base64 encoding of 4196 in little-endian
|
||||
TestUint256: "ZBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=",
|
||||
}
|
||||
}
|
||||
|
||||
type testErrorJson struct {
|
||||
Message string
|
||||
Code int
|
||||
CustomField string
|
||||
}
|
||||
|
||||
// StatusCode returns the error's underlying error code.
|
||||
func (e *testErrorJson) StatusCode() int {
|
||||
return e.Code
|
||||
}
|
||||
|
||||
// Msg returns the error's underlying message.
|
||||
func (e *testErrorJson) Msg() string {
|
||||
return e.Message
|
||||
}
|
||||
|
||||
// SetCode sets the error's underlying error code.
|
||||
func (e *testErrorJson) SetCode(code int) {
|
||||
e.Code = code
|
||||
}
|
||||
|
||||
// SetMsg sets the error's underlying message.
|
||||
func (e *testErrorJson) SetMsg(msg string) {
|
||||
e.Message = msg
|
||||
}
|
||||
|
||||
func TestDeserializeRequestBodyIntoContainer(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
var bodyJson bytes.Buffer
|
||||
err := json.NewEncoder(&bodyJson).Encode(defaultRequestContainer())
|
||||
require.NoError(t, err)
|
||||
|
||||
container := &testRequestContainer{}
|
||||
errJson := DeserializeRequestBodyIntoContainer(&bodyJson, container)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, "test string", container.TestString)
|
||||
})
|
||||
|
||||
t.Run("error", func(t *testing.T) {
|
||||
var bodyJson bytes.Buffer
|
||||
bodyJson.Write([]byte("foo"))
|
||||
errJson := DeserializeRequestBodyIntoContainer(&bodyJson, &testRequestContainer{})
|
||||
require.NotNil(t, errJson)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not decode request body"))
|
||||
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("unknown field", func(t *testing.T) {
|
||||
var bodyJson bytes.Buffer
|
||||
bodyJson.Write([]byte("{\"foo\":\"foo\"}"))
|
||||
errJson := DeserializeRequestBodyIntoContainer(&bodyJson, &testRequestContainer{})
|
||||
require.NotNil(t, errJson)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not decode request body"))
|
||||
assert.Equal(t, http.StatusBadRequest, errJson.StatusCode())
|
||||
})
|
||||
}
|
||||
|
||||
func TestProcessRequestContainerFields(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
container := defaultRequestContainer()
|
||||
|
||||
errJson := ProcessRequestContainerFields(container)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, "Zm9v", container.TestHexString)
|
||||
assert.Equal(t, "", container.TestEmptyHexString)
|
||||
assert.Equal(t, "ZBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=", container.TestUint256String)
|
||||
})
|
||||
|
||||
t.Run("error", func(t *testing.T) {
|
||||
errJson := ProcessRequestContainerFields("foo")
|
||||
require.NotNil(t, errJson)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not process request data"))
|
||||
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
|
||||
})
|
||||
}
|
||||
|
||||
func TestSetRequestBodyToRequestContainer(t *testing.T) {
|
||||
var body bytes.Buffer
|
||||
request := httptest.NewRequest("GET", "http://foo.example", &body)
|
||||
|
||||
errJson := SetRequestBodyToRequestContainer(defaultRequestContainer(), request)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
container := &testRequestContainer{}
|
||||
require.NoError(t, json.NewDecoder(request.Body).Decode(container))
|
||||
assert.Equal(t, "test string", container.TestString)
|
||||
contentLengthHeader, ok := request.Header["Content-Length"]
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, 1, len(contentLengthHeader), "wrong number of header values")
|
||||
assert.Equal(t, "108", contentLengthHeader[0])
|
||||
assert.Equal(t, int64(108), request.ContentLength)
|
||||
}
|
||||
|
||||
func TestPrepareRequestForProxying(t *testing.T) {
|
||||
middleware := &ApiProxyMiddleware{
|
||||
GatewayAddress: "http://gateway.example",
|
||||
}
|
||||
// We will set some params to make the request more interesting.
|
||||
endpoint := Endpoint{
|
||||
Path: "/{url_param}",
|
||||
RequestURLLiterals: []string{"url_param"},
|
||||
RequestQueryParams: []QueryParam{{Name: "query_param"}},
|
||||
}
|
||||
var body bytes.Buffer
|
||||
request := httptest.NewRequest("GET", "http://foo.example?query_param=bar", &body)
|
||||
|
||||
errJson := middleware.PrepareRequestForProxying(endpoint, request)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, "http", request.URL.Scheme)
|
||||
assert.Equal(t, middleware.GatewayAddress, request.URL.Host)
|
||||
assert.Equal(t, "", request.RequestURI)
|
||||
}
|
||||
|
||||
func TestReadGrpcResponseBody(t *testing.T) {
|
||||
var b bytes.Buffer
|
||||
b.Write([]byte("foo"))
|
||||
|
||||
body, jsonErr := ReadGrpcResponseBody(&b)
|
||||
require.Equal(t, true, jsonErr == nil)
|
||||
assert.Equal(t, "foo", string(body))
|
||||
}
|
||||
|
||||
func TestHandleGrpcResponseError(t *testing.T) {
|
||||
response := &http.Response{
|
||||
StatusCode: 400,
|
||||
Header: http.Header{
|
||||
"Foo": []string{"foo"},
|
||||
"Bar": []string{"bar"},
|
||||
},
|
||||
}
|
||||
writer := httptest.NewRecorder()
|
||||
errJson := &testErrorJson{
|
||||
Message: "foo",
|
||||
Code: 400,
|
||||
}
|
||||
b, err := json.Marshal(errJson)
|
||||
require.NoError(t, err)
|
||||
|
||||
hasError, e := HandleGrpcResponseError(errJson, response, b, writer)
|
||||
require.Equal(t, true, e == nil)
|
||||
assert.Equal(t, true, hasError)
|
||||
v, ok := writer.Header()["Foo"]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, "foo", v[0])
|
||||
v, ok = writer.Header()["Bar"]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, "bar", v[0])
|
||||
assert.Equal(t, 400, errJson.StatusCode())
|
||||
}
|
||||
|
||||
func TestGrpcResponseIsEmpty(t *testing.T) {
|
||||
t.Run("nil", func(t *testing.T) {
|
||||
assert.Equal(t, true, GrpcResponseIsEmpty(nil))
|
||||
})
|
||||
t.Run("empty_slice", func(t *testing.T) {
|
||||
assert.Equal(t, true, GrpcResponseIsEmpty(make([]byte, 0)))
|
||||
})
|
||||
t.Run("empty_brackets", func(t *testing.T) {
|
||||
assert.Equal(t, true, GrpcResponseIsEmpty([]byte("{}")))
|
||||
})
|
||||
t.Run("non_empty", func(t *testing.T) {
|
||||
assert.Equal(t, false, GrpcResponseIsEmpty([]byte("{\"foo\":\"bar\"})")))
|
||||
})
|
||||
}
|
||||
|
||||
func TestDeserializeGrpcResponseBodyIntoContainer(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
body, err := json.Marshal(defaultRequestContainer())
|
||||
require.NoError(t, err)
|
||||
|
||||
container := &testRequestContainer{}
|
||||
errJson := DeserializeGrpcResponseBodyIntoContainer(body, container)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, "test string", container.TestString)
|
||||
})
|
||||
|
||||
t.Run("error", func(t *testing.T) {
|
||||
var bodyJson bytes.Buffer
|
||||
bodyJson.Write([]byte("foo"))
|
||||
errJson := DeserializeGrpcResponseBodyIntoContainer(bodyJson.Bytes(), &testRequestContainer{})
|
||||
require.NotNil(t, errJson)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not unmarshal response"))
|
||||
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
|
||||
})
|
||||
}
|
||||
|
||||
func TestProcessMiddlewareResponseFields(t *testing.T) {
|
||||
t.Run("Ok", func(t *testing.T) {
|
||||
container := defaultResponseContainer()
|
||||
|
||||
errJson := ProcessMiddlewareResponseFields(container)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, "0x666f6f", container.TestHex)
|
||||
assert.Equal(t, "0x", container.TestEmptyHex)
|
||||
assert.Equal(t, "0x0000000000000000000000000000000000666F6f", container.TestAddress)
|
||||
assert.Equal(t, "0x", container.TestEmptyAddress)
|
||||
assert.Equal(t, "4196", container.TestUint256)
|
||||
assert.Equal(t, "test enum", container.TestEnum)
|
||||
assert.Equal(t, "1136214245", container.TestTime)
|
||||
})
|
||||
|
||||
t.Run("error", func(t *testing.T) {
|
||||
errJson := ProcessMiddlewareResponseFields("foo")
|
||||
require.NotNil(t, errJson)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not process response data"))
|
||||
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
|
||||
})
|
||||
}
|
||||
|
||||
func TestSerializeMiddlewareResponseIntoJson(t *testing.T) {
|
||||
container := defaultResponseContainer()
|
||||
j, errJson := SerializeMiddlewareResponseIntoJson(container)
|
||||
assert.Equal(t, true, errJson == nil)
|
||||
cToDeserialize := &testResponseContainer{}
|
||||
require.NoError(t, json.Unmarshal(j, cToDeserialize))
|
||||
assert.Equal(t, "test string", cToDeserialize.TestString)
|
||||
}
|
||||
|
||||
func TestWriteMiddlewareResponseHeadersAndBody(t *testing.T) {
|
||||
t.Run("GET", func(t *testing.T) {
|
||||
response := &http.Response{
|
||||
Header: http.Header{
|
||||
"Foo": []string{"foo"},
|
||||
grpc.WithPrefix(grpc.HttpCodeMetadataKey): []string{"204"},
|
||||
grpc.WithPrefix(api.VersionHeader): []string{"capella"},
|
||||
},
|
||||
}
|
||||
container := defaultResponseContainer()
|
||||
responseJson, err := json.Marshal(container)
|
||||
require.NoError(t, err)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
errJson := WriteMiddlewareResponseHeadersAndBody(response, responseJson, writer)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
v, ok := writer.Header()["Foo"]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, "foo", v[0])
|
||||
v, ok = writer.Header()["Content-Length"]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, "224", v[0])
|
||||
v, ok = writer.Header()["Eth-Consensus-Version"]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
assert.Equal(t, "capella", v[0])
|
||||
assert.Equal(t, 204, writer.Code)
|
||||
assert.DeepEqual(t, responseJson, writer.Body.Bytes())
|
||||
})
|
||||
|
||||
t.Run("GET_no_grpc_status_code_header", func(t *testing.T) {
|
||||
response := &http.Response{
|
||||
Header: http.Header{},
|
||||
StatusCode: 204,
|
||||
}
|
||||
container := defaultResponseContainer()
|
||||
responseJson, err := json.Marshal(container)
|
||||
require.NoError(t, err)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
errJson := WriteMiddlewareResponseHeadersAndBody(response, responseJson, writer)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, 204, writer.Code)
|
||||
})
|
||||
|
||||
t.Run("GET_invalid_status_code", func(t *testing.T) {
|
||||
response := &http.Response{
|
||||
Header: http.Header{"Grpc-Metadata-Eth-Consensus-Version": []string{"capella"}},
|
||||
}
|
||||
|
||||
// Set invalid status code.
|
||||
response.Header[grpc.WithPrefix(grpc.HttpCodeMetadataKey)] = []string{"invalid"}
|
||||
response.Header[grpc.WithPrefix(api.VersionHeader)] = []string{"capella"}
|
||||
|
||||
container := defaultResponseContainer()
|
||||
responseJson, err := json.Marshal(container)
|
||||
require.NoError(t, err)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
errJson := WriteMiddlewareResponseHeadersAndBody(response, responseJson, writer)
|
||||
require.Equal(t, false, errJson == nil)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not parse status code"))
|
||||
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("POST", func(t *testing.T) {
|
||||
response := &http.Response{
|
||||
Header: http.Header{},
|
||||
StatusCode: 204,
|
||||
}
|
||||
container := defaultResponseContainer()
|
||||
responseJson, err := json.Marshal(container)
|
||||
require.NoError(t, err)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
errJson := WriteMiddlewareResponseHeadersAndBody(response, responseJson, writer)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, 204, writer.Code)
|
||||
})
|
||||
|
||||
t.Run("POST_with_response_body", func(t *testing.T) {
|
||||
response := &http.Response{
|
||||
Header: http.Header{},
|
||||
StatusCode: 204,
|
||||
}
|
||||
container := defaultResponseContainer()
|
||||
responseJson, err := json.Marshal(container)
|
||||
require.NoError(t, err)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
errJson := WriteMiddlewareResponseHeadersAndBody(response, responseJson, writer)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, 204, writer.Code)
|
||||
assert.DeepEqual(t, responseJson, writer.Body.Bytes())
|
||||
})
|
||||
|
||||
t.Run("POST_with_empty_json_body", func(t *testing.T) {
|
||||
response := &http.Response{
|
||||
Header: http.Header{},
|
||||
StatusCode: 204,
|
||||
}
|
||||
responseJson, err := json.Marshal(struct{}{})
|
||||
require.NoError(t, err)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
errJson := WriteMiddlewareResponseHeadersAndBody(response, responseJson, writer)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, 204, writer.Code)
|
||||
assert.DeepEqual(t, []byte(nil), writer.Body.Bytes())
|
||||
assert.Equal(t, "0", writer.Header()["Content-Length"][0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestWriteError(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
responseHeader := http.Header{
|
||||
grpc.WithPrefix(grpc.CustomErrorMetadataKey): []string{"{\"CustomField\":\"bar\"}"},
|
||||
}
|
||||
errJson := &testErrorJson{
|
||||
Message: "foo",
|
||||
Code: 500,
|
||||
}
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
WriteError(writer, errJson, responseHeader)
|
||||
v, ok := writer.Header()["Content-Length"]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, "48", v[0])
|
||||
v, ok = writer.Header()["Content-Type"]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, "application/json", v[0])
|
||||
assert.Equal(t, 500, writer.Code)
|
||||
eDeserialize := &testErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), eDeserialize))
|
||||
assert.Equal(t, "foo", eDeserialize.Message)
|
||||
assert.Equal(t, 500, eDeserialize.Code)
|
||||
assert.Equal(t, "bar", eDeserialize.CustomField)
|
||||
})
|
||||
|
||||
t.Run("invalid_custom_error_header", func(t *testing.T) {
|
||||
logHook := test.NewGlobal()
|
||||
|
||||
responseHeader := http.Header{
|
||||
grpc.WithPrefix(grpc.CustomErrorMetadataKey): []string{"invalid"},
|
||||
}
|
||||
|
||||
WriteError(httptest.NewRecorder(), &testErrorJson{}, responseHeader)
|
||||
assert.LogsContain(t, logHook, "Could not unmarshal custom error message")
|
||||
})
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
package apimiddleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ---------------
|
||||
// Error handling.
|
||||
// ---------------
|
||||
|
||||
// ErrorJson describes common functionality of all JSON error representations.
|
||||
type ErrorJson interface {
|
||||
StatusCode() int
|
||||
SetCode(code int)
|
||||
Msg() string
|
||||
SetMsg(msg string)
|
||||
}
|
||||
|
||||
// DefaultErrorJson is a JSON representation of a simple error value, containing only a message and an error code.
|
||||
type DefaultErrorJson struct {
|
||||
Message string `json:"message"`
|
||||
Code int `json:"code"`
|
||||
}
|
||||
|
||||
// InternalServerErrorWithMessage returns a DefaultErrorJson with 500 code and a custom message.
|
||||
func InternalServerErrorWithMessage(err error, message string) *DefaultErrorJson {
|
||||
e := errors.Wrapf(err, message)
|
||||
return &DefaultErrorJson{
|
||||
Message: e.Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
}
|
||||
|
||||
// InternalServerError returns a DefaultErrorJson with 500 code.
|
||||
func InternalServerError(err error) *DefaultErrorJson {
|
||||
return &DefaultErrorJson{
|
||||
Message: err.Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
}
|
||||
|
||||
func TimeoutError() *DefaultErrorJson {
|
||||
return &DefaultErrorJson{
|
||||
Message: "Request timeout",
|
||||
Code: http.StatusRequestTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
// StatusCode returns the error's underlying error code.
|
||||
func (e *DefaultErrorJson) StatusCode() int {
|
||||
return e.Code
|
||||
}
|
||||
|
||||
// Msg returns the error's underlying message.
|
||||
func (e *DefaultErrorJson) Msg() string {
|
||||
return e.Message
|
||||
}
|
||||
|
||||
// SetCode sets the error's underlying error code.
|
||||
func (e *DefaultErrorJson) SetCode(code int) {
|
||||
e.Code = code
|
||||
}
|
||||
|
||||
// SetMsg sets the error's underlying message.
|
||||
func (e *DefaultErrorJson) SetMsg(msg string) {
|
||||
e.Message = msg
|
||||
}
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/gorilla/mux"
|
||||
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime"
|
||||
"github.com/rs/cors"
|
||||
"google.golang.org/grpc"
|
||||
@@ -34,7 +33,6 @@ type PbHandlerRegistration func(context.Context, *gwruntime.ServeMux, *grpc.Clie
|
||||
|
||||
// MuxHandler is a function that implements the mux handler functionality.
|
||||
type MuxHandler func(
|
||||
apiMiddlewareHandler *apimiddleware.ApiProxyMiddleware,
|
||||
h http.HandlerFunc,
|
||||
w http.ResponseWriter,
|
||||
req *http.Request,
|
||||
@@ -42,16 +40,15 @@ type MuxHandler func(
|
||||
|
||||
// Config parameters for setting up the gateway service.
|
||||
type config struct {
|
||||
maxCallRecvMsgSize uint64
|
||||
remoteCert string
|
||||
gatewayAddr string
|
||||
remoteAddr string
|
||||
allowedOrigins []string
|
||||
apiMiddlewareEndpointFactory apimiddleware.EndpointFactory
|
||||
muxHandler MuxHandler
|
||||
pbHandlers []*PbMux
|
||||
router *mux.Router
|
||||
timeout time.Duration
|
||||
maxCallRecvMsgSize uint64
|
||||
remoteCert string
|
||||
gatewayAddr string
|
||||
remoteAddr string
|
||||
allowedOrigins []string
|
||||
muxHandler MuxHandler
|
||||
pbHandlers []*PbMux
|
||||
router *mux.Router
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// Gateway is the gRPC gateway to serve HTTP JSON traffic as a proxy and forward it to the gRPC server.
|
||||
@@ -60,7 +57,6 @@ type Gateway struct {
|
||||
conn *grpc.ClientConn
|
||||
server *http.Server
|
||||
cancel context.CancelFunc
|
||||
proxy *apimiddleware.ApiProxyMiddleware
|
||||
ctx context.Context
|
||||
startFailure error
|
||||
}
|
||||
@@ -110,13 +106,9 @@ func (g *Gateway) Start() {
|
||||
|
||||
corsMux := g.corsMiddleware(g.cfg.router)
|
||||
|
||||
if g.cfg.apiMiddlewareEndpointFactory != nil && !g.cfg.apiMiddlewareEndpointFactory.IsNil() {
|
||||
g.registerApiMiddleware()
|
||||
}
|
||||
|
||||
if g.cfg.muxHandler != nil {
|
||||
g.cfg.router.PathPrefix("/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
g.cfg.muxHandler(g.proxy, corsMux.ServeHTTP, w, r)
|
||||
g.cfg.muxHandler(corsMux.ServeHTTP, w, r)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -229,13 +221,3 @@ func (g *Gateway) dialUnix(ctx context.Context, addr string) (*grpc.ClientConn,
|
||||
}
|
||||
return grpc.DialContext(ctx, addr, opts...)
|
||||
}
|
||||
|
||||
func (g *Gateway) registerApiMiddleware() {
|
||||
g.proxy = &apimiddleware.ApiProxyMiddleware{
|
||||
GatewayAddress: g.cfg.gatewayAddr,
|
||||
EndpointCreator: g.cfg.apiMiddlewareEndpointFactory,
|
||||
Timeout: g.cfg.timeout,
|
||||
}
|
||||
log.Info("Starting API middleware")
|
||||
g.proxy.Run(g.cfg.router)
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
@@ -18,36 +17,18 @@ import (
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
type mockEndpointFactory struct {
|
||||
}
|
||||
|
||||
func (*mockEndpointFactory) Paths() []string {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
func (*mockEndpointFactory) Create(_ string) (*apimiddleware.Endpoint, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (*mockEndpointFactory) IsNil() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func TestGateway_Customized(t *testing.T) {
|
||||
r := mux.NewRouter()
|
||||
cert := "cert"
|
||||
origins := []string{"origin"}
|
||||
size := uint64(100)
|
||||
endpointFactory := &mockEndpointFactory{}
|
||||
|
||||
opts := []Option{
|
||||
WithRouter(r),
|
||||
WithRemoteCert(cert),
|
||||
WithAllowedOrigins(origins),
|
||||
WithMaxCallRecvMsgSize(size),
|
||||
WithApiMiddleware(endpointFactory),
|
||||
WithMuxHandler(func(
|
||||
_ *apimiddleware.ApiProxyMiddleware,
|
||||
_ http.HandlerFunc,
|
||||
_ http.ResponseWriter,
|
||||
_ *http.Request,
|
||||
@@ -63,7 +44,6 @@ func TestGateway_Customized(t *testing.T) {
|
||||
require.Equal(t, 1, len(g.cfg.allowedOrigins))
|
||||
assert.Equal(t, origins[0], g.cfg.allowedOrigins[0])
|
||||
assert.Equal(t, size, g.cfg.maxCallRecvMsgSize)
|
||||
assert.Equal(t, endpointFactory, g.cfg.apiMiddlewareEndpointFactory)
|
||||
}
|
||||
|
||||
func TestGateway_StartStop(t *testing.T) {
|
||||
@@ -83,7 +63,6 @@ func TestGateway_StartStop(t *testing.T) {
|
||||
WithGatewayAddr(gatewayAddress),
|
||||
WithRemoteAddr(selfAddress),
|
||||
WithMuxHandler(func(
|
||||
_ *apimiddleware.ApiProxyMiddleware,
|
||||
_ http.HandlerFunc,
|
||||
_ http.ResponseWriter,
|
||||
_ *http.Request,
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
|
||||
)
|
||||
|
||||
type Option func(g *Gateway) error
|
||||
@@ -70,14 +69,6 @@ func WithMaxCallRecvMsgSize(size uint64) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithApiMiddleware allows adding an API middleware proxy to the gateway.
|
||||
func WithApiMiddleware(endpointFactory apimiddleware.EndpointFactory) Option {
|
||||
return func(g *Gateway) error {
|
||||
g.cfg.apiMiddlewareEndpointFactory = endpointFactory
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithTimeout allows changing the timeout value for API calls.
|
||||
func WithTimeout(seconds uint64) Option {
|
||||
return func(g *Gateway) error {
|
||||
|
||||
@@ -99,26 +99,22 @@ go_library(
|
||||
],
|
||||
)
|
||||
|
||||
test_suite(
|
||||
name = "go_default_test",
|
||||
tests = [
|
||||
":go_raceoff_test",
|
||||
":go_raceon_test",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_raceoff_test",
|
||||
name = "go_default_test",
|
||||
size = "medium",
|
||||
srcs = [
|
||||
"blockchain_test.go",
|
||||
"chain_info_norace_test.go",
|
||||
"chain_info_test.go",
|
||||
"checktags_test.go",
|
||||
"error_test.go",
|
||||
"execution_engine_test.go",
|
||||
"forkchoice_update_execution_test.go",
|
||||
"head_sync_committee_info_test.go",
|
||||
"head_test.go",
|
||||
"init_sync_process_block_test.go",
|
||||
"init_test.go",
|
||||
"lightclient_test.go",
|
||||
"log_test.go",
|
||||
"metrics_test.go",
|
||||
"mock_test.go",
|
||||
@@ -127,98 +123,66 @@ go_test(
|
||||
"process_block_test.go",
|
||||
"receive_attestation_test.go",
|
||||
"receive_block_test.go",
|
||||
"service_norace_test.go",
|
||||
"service_test.go",
|
||||
"setup_test.go",
|
||||
"weak_subjectivity_checks_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
gotags = ["develop"],
|
||||
tags = ["CI_race_detection"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/execution/testing:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/blocks/testing:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@in_gopkg_d4l3k_messagediff_v1//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_x_net//context:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_raceon_test",
|
||||
srcs = [
|
||||
"chain_info_norace_test.go",
|
||||
"checktags_test.go",
|
||||
"init_test.go",
|
||||
"mock_test.go",
|
||||
"receive_block_test.go",
|
||||
"service_norace_test.go",
|
||||
"setup_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
gc_goopts = [
|
||||
# Go 1.14 enables checkptr by default when building with -race or -msan. There is a pointer
|
||||
# issue in boltdb, so must disable checkptr at compile time. This flag can be removed once
|
||||
# the project is migrated to etcd's version of boltdb and the issue has been fixed.
|
||||
# See: https://github.com/etcd-io/bbolt/issues/187.
|
||||
"-d=checkptr=0",
|
||||
],
|
||||
gotags = ["develop"],
|
||||
race = "on",
|
||||
tags = ["race_on"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/execution/testing:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/blocks/testing:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_x_net//context:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -464,6 +464,13 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
|
||||
return !isCanonical, nil
|
||||
}
|
||||
|
||||
// TargetRootForEpoch wraps the corresponding method in forkchoice
|
||||
func (s *Service) TargetRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.TargetRootForEpoch(root, epoch)
|
||||
}
|
||||
|
||||
// Ancestor returns the block root of an ancestry block from the input block root.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
|
||||
@@ -10,6 +10,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/kzg",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
@@ -26,6 +27,8 @@ go_test(
|
||||
deps = [
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -31,6 +32,11 @@ func IsDataAvailable(commitments [][]byte, sidecars []*ethpb.DeprecatedBlobSidec
|
||||
return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs)
|
||||
}
|
||||
|
||||
// VerifyROBlobCommitment is a helper that massages the fields of an ROBlob into the types needed to call VerifyBlobKZGProof.
|
||||
func VerifyROBlobCommitment(sc blocks.ROBlob) error {
|
||||
return kzgContext.VerifyBlobKZGProof(bytesToBlob(sc.Blob), bytesToCommitment(sc.KzgCommitment), bytesToKZGProof(sc.KzgProof))
|
||||
}
|
||||
|
||||
func bytesToBlob(blob []byte) (ret GoKZG.Blob) {
|
||||
copy(ret[:], blob)
|
||||
return
|
||||
|
||||
@@ -1,13 +1,63 @@
|
||||
package kzg
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
|
||||
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func deterministicRandomness(seed int64) [32]byte {
|
||||
// Converts an int64 to a byte slice
|
||||
buf := new(bytes.Buffer)
|
||||
err := binary.Write(buf, binary.BigEndian, seed)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Failed to write int64 to bytes buffer")
|
||||
return [32]byte{}
|
||||
}
|
||||
bytes := buf.Bytes()
|
||||
|
||||
return sha256.Sum256(bytes)
|
||||
}
|
||||
|
||||
// Returns a serialized random field element in big-endian
|
||||
func GetRandFieldElement(seed int64) [32]byte {
|
||||
bytes := deterministicRandomness(seed)
|
||||
var r fr.Element
|
||||
r.SetBytes(bytes[:])
|
||||
|
||||
return GoKZG.SerializeScalar(r)
|
||||
}
|
||||
|
||||
// Returns a random blob using the passed seed as entropy
|
||||
func GetRandBlob(seed int64) GoKZG.Blob {
|
||||
var blob GoKZG.Blob
|
||||
bytesPerBlob := GoKZG.ScalarsPerBlob * GoKZG.SerializedScalarSize
|
||||
for i := 0; i < bytesPerBlob; i += GoKZG.SerializedScalarSize {
|
||||
fieldElementBytes := GetRandFieldElement(seed + int64(i))
|
||||
copy(blob[i:i+GoKZG.SerializedScalarSize], fieldElementBytes[:])
|
||||
}
|
||||
return blob
|
||||
}
|
||||
|
||||
func GenerateCommitmentAndProof(blob GoKZG.Blob) (GoKZG.KZGCommitment, GoKZG.KZGProof, error) {
|
||||
commitment, err := kzgContext.BlobToKZGCommitment(blob, 0)
|
||||
if err != nil {
|
||||
return GoKZG.KZGCommitment{}, GoKZG.KZGProof{}, err
|
||||
}
|
||||
proof, err := kzgContext.ComputeBlobKZGProof(blob, commitment, 0)
|
||||
if err != nil {
|
||||
return GoKZG.KZGCommitment{}, GoKZG.KZGProof{}, err
|
||||
}
|
||||
return commitment, proof, err
|
||||
}
|
||||
|
||||
func TestIsDataAvailable(t *testing.T) {
|
||||
sidecars := make([]*ethpb.DeprecatedBlobSidecar, 0)
|
||||
commitments := make([][]byte, 0)
|
||||
@@ -23,3 +73,13 @@ func TestBytesToAny(t *testing.T) {
|
||||
require.DeepEqual(t, commitment, bytesToCommitment(bytes))
|
||||
require.DeepEqual(t, proof, bytesToKZGProof(bytes))
|
||||
}
|
||||
|
||||
func TestGenerateCommitmentAndProof(t *testing.T) {
|
||||
blob := GetRandBlob(123)
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
expectedCommitment := GoKZG.KZGCommitment{180, 218, 156, 194, 59, 20, 10, 189, 186, 254, 132, 93, 7, 127, 104, 172, 238, 240, 237, 70, 83, 89, 1, 152, 99, 0, 165, 65, 143, 62, 20, 215, 230, 14, 205, 95, 28, 245, 54, 25, 160, 16, 178, 31, 232, 207, 38, 85}
|
||||
expectedProof := GoKZG.KZGProof{128, 110, 116, 170, 56, 111, 126, 87, 229, 234, 211, 42, 110, 150, 129, 206, 73, 142, 167, 243, 90, 149, 240, 240, 236, 204, 143, 182, 229, 249, 81, 27, 153, 171, 83, 70, 144, 250, 42, 1, 188, 215, 71, 235, 30, 7, 175, 86}
|
||||
require.Equal(t, expectedCommitment, commitment)
|
||||
require.Equal(t, expectedProof, proof)
|
||||
}
|
||||
|
||||
@@ -53,15 +53,11 @@ func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Chec
|
||||
|
||||
// VerifyLmdFfgConsistency verifies that attestation's LMD and FFG votes are consistency to each other.
|
||||
func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestation) error {
|
||||
targetSlot, err := slots.EpochStart(a.Data.Target.Epoch)
|
||||
r, err := s.TargetRootForEpoch([32]byte(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r, err := s.Ancestor(ctx, a.Data.BeaconBlockRoot, targetSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(a.Data.Target.Root, r) {
|
||||
if !bytes.Equal(a.Data.Target.Root, r[:]) {
|
||||
return fmt.Errorf("FFG and LMD votes are not consistent, block root: %#x, target root: %#x, canonical target root: %#x", a.Data.BeaconBlockRoot, a.Data.Target.Root, r)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -36,50 +36,28 @@ func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) {
|
||||
require.ErrorContains(t, "exceeds max allowed value relative to the local clock", err)
|
||||
}
|
||||
|
||||
func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
|
||||
func TestVerifyLMDFFGConsistent(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b32)
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
f := service.cfg.ForkChoiceStore
|
||||
fc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, r32, err := prepareForkchoiceState(ctx, 32, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, fc, fc)
|
||||
require.NoError(t, err)
|
||||
b33 := util.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b33)
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, f.InsertNode(ctx, state, r32))
|
||||
|
||||
state, r33, err := prepareForkchoiceState(ctx, 33, [32]byte{'b'}, r32, params.BeaconConfig().ZeroHash, fc, fc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, r33))
|
||||
|
||||
wanted := "FFG and LMD votes are not consistent"
|
||||
a := util.NewAttestation()
|
||||
a.Data.Target.Epoch = 1
|
||||
a.Data.Target.Root = []byte{'a'}
|
||||
a.Data.Target.Root = []byte{'c'}
|
||||
a.Data.BeaconBlockRoot = r33[:]
|
||||
require.ErrorContains(t, wanted, service.VerifyLmdFfgConsistency(context.Background(), a))
|
||||
}
|
||||
|
||||
func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b32)
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b33 := util.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b33)
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
a := util.NewAttestation()
|
||||
a.Data.Target.Epoch = 1
|
||||
a.Data.Target.Root = r32[:]
|
||||
a.Data.BeaconBlockRoot = r33[:]
|
||||
err = service.VerifyLmdFfgConsistency(context.Background(), a)
|
||||
require.NoError(t, err, "Could not verify LMD and FFG votes to be consistent")
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@ go_library(
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
|
||||
@@ -2,7 +2,6 @@ package builder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
@@ -11,7 +10,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
@@ -27,7 +25,7 @@ var ErrNoBuilder = errors.New("builder endpoint not configured")
|
||||
|
||||
// BlockBuilder defines the interface for interacting with the block builder
|
||||
type BlockBuilder interface {
|
||||
SubmitBlindedBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blobs []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error)
|
||||
SubmitBlindedBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error)
|
||||
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubKey [48]byte) (builder.SignedBid, error)
|
||||
RegisterValidator(ctx context.Context, reg []*ethpb.SignedValidatorRegistrationV1) error
|
||||
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
@@ -90,7 +88,7 @@ func (s *Service) Stop() error {
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock submits a blinded block to the builder relay network.
|
||||
func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, blobs []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.SubmitBlindedBlock")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
@@ -100,11 +98,8 @@ func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlyS
|
||||
if s.c == nil {
|
||||
return nil, nil, ErrNoBuilder
|
||||
}
|
||||
if uint64(len(blobs)) > fieldparams.MaxBlobsPerBlock {
|
||||
return nil, nil, fmt.Errorf("blob count %d beyond max limit of %d", len(blobs), fieldparams.MaxBlobsPerBlock)
|
||||
}
|
||||
|
||||
return s.c.SubmitBlindedBlock(ctx, b, blobs)
|
||||
return s.c.SubmitBlindedBlock(ctx, b)
|
||||
}
|
||||
|
||||
// GetHeader retrieves the header for a given slot and parent hash from the builder relay network.
|
||||
|
||||
@@ -62,7 +62,7 @@ func Test_BuilderMethodsWithouClient(t *testing.T) {
|
||||
_, err = s.GetHeader(context.Background(), 0, [32]byte{}, [48]byte{})
|
||||
assert.ErrorContains(t, ErrNoBuilder.Error(), err)
|
||||
|
||||
_, _, err = s.SubmitBlindedBlock(context.Background(), nil, nil)
|
||||
_, _, err = s.SubmitBlindedBlock(context.Background(), nil)
|
||||
assert.ErrorContains(t, ErrNoBuilder.Error(), err)
|
||||
|
||||
err = s.RegisterValidator(context.Background(), nil)
|
||||
|
||||
@@ -45,7 +45,7 @@ func (s *MockBuilderService) Configured() bool {
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock for mocking.
|
||||
func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.ReadOnlySignedBeaconBlock, _ []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
switch b.Version() {
|
||||
case version.Bellatrix:
|
||||
w, err := blocks.WrappedExecutionPayload(s.Payload)
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
func TestProcessVoluntaryExits_NotActiveLongEnoughToExit(t *testing.T) {
|
||||
@@ -134,6 +135,10 @@ func TestProcessVoluntaryExits_AppliesCorrectStatus(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestVerifyExitAndSignature(t *testing.T) {
|
||||
undo := util.HackDenebMaxuint(t)
|
||||
defer undo()
|
||||
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
require.NoError(t, err)
|
||||
tests := []struct {
|
||||
name string
|
||||
setup func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, state.ReadOnlyBeaconState, error)
|
||||
@@ -241,11 +246,11 @@ func TestVerifyExitAndSignature(t *testing.T) {
|
||||
fork := ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().DenebForkVersion,
|
||||
Epoch: primitives.Epoch(2),
|
||||
Epoch: params.BeaconConfig().DenebForkEpoch,
|
||||
}
|
||||
signedExit := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 2,
|
||||
Epoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
ValidatorIndex: 0,
|
||||
},
|
||||
}
|
||||
@@ -253,7 +258,7 @@ func TestVerifyExitAndSignature(t *testing.T) {
|
||||
bs, err := state_native.InitializeFromProtoUnsafeDeneb(ðpb.BeaconStateDeneb{
|
||||
GenesisValidatorsRoot: bs.GenesisValidatorsRoot(),
|
||||
Fork: fork,
|
||||
Slot: (params.BeaconConfig().SlotsPerEpoch * 2) + 1,
|
||||
Slot: denebSlot,
|
||||
Validators: bs.Validators(),
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -58,8 +58,8 @@ go_test(
|
||||
"weak_subjectivity_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
race = "on",
|
||||
shard_count = 2,
|
||||
tags = ["CI_race_detection"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
|
||||
@@ -16,7 +16,6 @@ go_library(
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
)
|
||||
|
||||
// ForkVersionByteLength length of fork version byte array.
|
||||
@@ -57,18 +56,22 @@ const (
|
||||
|
||||
// ComputeDomainAndSign computes the domain and signing root and sign it using the passed in private key.
|
||||
func ComputeDomainAndSign(st state.ReadOnlyBeaconState, epoch primitives.Epoch, obj fssz.HashRoot, domain [4]byte, key bls.SecretKey) ([]byte, error) {
|
||||
fork := st.Fork()
|
||||
return ComputeDomainAndSignWithoutState(st.Fork(), epoch, domain, st.GenesisValidatorsRoot(), obj, key)
|
||||
}
|
||||
|
||||
// ComputeDomainAndSignWithoutState offers the same functionalit as ComputeDomainAndSign without the need to provide a BeaconState.
|
||||
// This is particularly helpful for signing values in tests.
|
||||
func ComputeDomainAndSignWithoutState(fork *ethpb.Fork, epoch primitives.Epoch, domain [4]byte, vr []byte, obj fssz.HashRoot, key bls.SecretKey) ([]byte, error) {
|
||||
// EIP-7044: Beginning in Deneb, fix the fork version to Capella for signed exits.
|
||||
// This allows for signed validator exits to be valid forever.
|
||||
if st.Version() >= version.Deneb && domain == params.BeaconConfig().DomainVoluntaryExit {
|
||||
if domain == params.BeaconConfig().DomainVoluntaryExit && epoch >= params.BeaconConfig().DenebForkEpoch {
|
||||
fork = ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
Epoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
d, err := Domain(fork, epoch, domain, st.GenesisValidatorsRoot())
|
||||
d, err := Domain(fork, epoch, domain, vr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -102,8 +105,14 @@ func Data(rootFunc func() ([32]byte, error), domain []byte) ([32]byte, error) {
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
return ComputeSigningRootForRoot(objRoot, domain)
|
||||
}
|
||||
|
||||
// ComputeSigningRootForRoot works the same as ComputeSigningRoot,
|
||||
// except that gets the root from an argument instead of a callback.
|
||||
func ComputeSigningRootForRoot(root [32]byte, domain []byte) ([32]byte, error) {
|
||||
container := ðpb.SigningData{
|
||||
ObjectRoot: objRoot[:],
|
||||
ObjectRoot: root[:],
|
||||
Domain: domain,
|
||||
}
|
||||
return container.HashTreeRoot()
|
||||
|
||||
@@ -3,16 +3,9 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv"
|
||||
)
|
||||
|
||||
// NewDB initializes a new DB.
|
||||
func NewDB(ctx context.Context, dirPath string) (Database, error) {
|
||||
return kv.NewKVStore(ctx, dirPath)
|
||||
}
|
||||
|
||||
// NewFileName uses the KVStoreDatafilePath so that if this layer of
|
||||
// indirection between db.NewDB->kv.NewKVStore ever changes, it will be easy to remember
|
||||
// to also change this filename indirection at the same time.
|
||||
|
||||
@@ -11,10 +11,14 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/logging:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_spf13_afero//:go_default_library",
|
||||
@@ -28,9 +32,12 @@ go_test(
|
||||
deps = [
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_spf13_afero//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -1,19 +1,26 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/io/file"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/logging"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
@@ -26,24 +33,40 @@ const (
|
||||
sszExt = "ssz"
|
||||
partExt = "part"
|
||||
|
||||
bufferEpochs = 2
|
||||
directoryPermissions = 0700
|
||||
)
|
||||
|
||||
// BlobStorageOption is a functional option for configuring a BlobStorage.
|
||||
type BlobStorageOption func(*BlobStorage)
|
||||
|
||||
// WithBlobRetentionEpochs is an option that changes the number of epochs blobs will be persisted.
|
||||
func WithBlobRetentionEpochs(e primitives.Epoch) BlobStorageOption {
|
||||
return func(b *BlobStorage) {
|
||||
b.retentionEpochs = e
|
||||
}
|
||||
}
|
||||
|
||||
// NewBlobStorage creates a new instance of the BlobStorage object. Note that the implementation of BlobStorage may
|
||||
// attempt to hold a file lock to guarantee exclusive control of the blob storage directory, so this should only be
|
||||
// initialized once per beacon node.
|
||||
func NewBlobStorage(base string) (*BlobStorage, error) {
|
||||
func NewBlobStorage(base string, opts ...BlobStorageOption) (*BlobStorage, error) {
|
||||
base = path.Clean(base)
|
||||
if err := file.MkdirAll(base); err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to create blob storage at %s: %w", base, err)
|
||||
}
|
||||
fs := afero.NewBasePathFs(afero.NewOsFs(), base)
|
||||
return &BlobStorage{fs: fs}, nil
|
||||
b := &BlobStorage{fs: fs, retentionEpochs: params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest}
|
||||
for _, o := range opts {
|
||||
o(b)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// BlobStorage is the concrete implementation of the filesystem backend for saving and retrieving BlobSidecars.
|
||||
type BlobStorage struct {
|
||||
fs afero.Fs
|
||||
fs afero.Fs
|
||||
retentionEpochs primitives.Epoch
|
||||
}
|
||||
|
||||
// Save saves blobs given a list of sidecars.
|
||||
@@ -177,3 +200,75 @@ func (p blobNamer) partPath() string {
|
||||
func (p blobNamer) path() string {
|
||||
return p.fname(sszExt)
|
||||
}
|
||||
|
||||
// Prune prunes blobs in the base directory based on the retention epoch.
|
||||
// It deletes blobs older than currentEpoch - (retentionEpochs+bufferEpochs).
|
||||
// This is so that we keep a slight buffer and blobs are deleted after n+2 epochs.
|
||||
func (bs *BlobStorage) Prune(currentSlot primitives.Slot) error {
|
||||
retentionSlots, err := slots.EpochStart(bs.retentionEpochs + bufferEpochs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if currentSlot < retentionSlots {
|
||||
return nil // Overflow would occur
|
||||
}
|
||||
|
||||
folders, err := afero.ReadDir(bs.fs, ".")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, folder := range folders {
|
||||
if folder.IsDir() {
|
||||
if err := bs.processFolder(folder, currentSlot, retentionSlots); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// processFolder will delete the folder of blobs if the blob slot is outside the
|
||||
// retention period. We determine the slot by looking at the first blob in the folder.
|
||||
func (bs *BlobStorage) processFolder(folder os.FileInfo, currentSlot, retentionSlots primitives.Slot) error {
|
||||
f, err := bs.fs.Open(filepath.Join(folder.Name(), "0."+sszExt))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := f.Close(); err != nil {
|
||||
log.WithError(err).Errorf("Could not close blob file")
|
||||
}
|
||||
}()
|
||||
|
||||
slot, err := slotFromBlob(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if slot < (currentSlot - retentionSlots) {
|
||||
if err = bs.fs.RemoveAll(folder.Name()); err != nil {
|
||||
return errors.Wrapf(err, "failed to delete blob %s", f.Name())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// slotFromBlob reads the ssz data of a file at the specified offset (8 + 131072 + 48 + 48 = 131176 bytes),
|
||||
// which is calculated based on the size of the BlobSidecar struct and is based on the size of the fields
|
||||
// preceding the slot information within SignedBeaconBlockHeader.
|
||||
func slotFromBlob(at io.ReaderAt) (primitives.Slot, error) {
|
||||
b := make([]byte, 8)
|
||||
_, err := at.ReadAt(b, 131176)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
rawSlot := binary.LittleEndian.Uint64(b)
|
||||
return primitives.Slot(rawSlot), nil
|
||||
}
|
||||
|
||||
// Delete removes the directory matching the provided block root and all the blobs it contains.
|
||||
func (bs *BlobStorage) Delete(root [32]byte) error {
|
||||
if err := bs.fs.RemoveAll(hexutil.Encode(root[:])); err != nil {
|
||||
return fmt.Errorf("failed to delete blobs for root %#x: %w", root, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,16 +2,19 @@ package filesystem
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/spf13/afero"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
@@ -20,7 +23,8 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("no error for duplicate", func(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
existingSidecar := testSidecars[0]
|
||||
|
||||
blobPath := namerForSidecar(existingSidecar).path()
|
||||
@@ -70,7 +74,8 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlobIndicesBounds(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
root := [32]byte{}
|
||||
|
||||
okIdx := uint64(fieldparams.MaxBlobsPerBlock - 1)
|
||||
@@ -98,3 +103,106 @@ func writeFakeSSZ(t *testing.T, fs afero.Fs, root [32]byte, idx uint64) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fh.Close())
|
||||
}
|
||||
|
||||
func TestBlobStoragePrune(t *testing.T) {
|
||||
currentSlot := primitives.Slot(200000)
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("PruneOne", func(t *testing.T) {
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 300, fieldparams.MaxBlobsPerBlock)
|
||||
testSidecars, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, sidecar := range testSidecars {
|
||||
require.NoError(t, bs.Save(sidecar))
|
||||
}
|
||||
|
||||
require.NoError(t, bs.Prune(currentSlot))
|
||||
|
||||
remainingFolders, err := afero.ReadDir(fs, ".")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(remainingFolders))
|
||||
})
|
||||
t.Run("PruneMany", func(t *testing.T) {
|
||||
blockQty := 10
|
||||
slot := primitives.Slot(0)
|
||||
|
||||
for j := 0; j <= blockQty; j++ {
|
||||
root := bytesutil.ToBytes32(bytesutil.ToBytes(uint64(slot), 32))
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, root, slot, fieldparams.MaxBlobsPerBlock)
|
||||
testSidecars, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, bs.Save(testSidecars[0]))
|
||||
|
||||
slot += 10000
|
||||
}
|
||||
|
||||
require.NoError(t, bs.Prune(currentSlot))
|
||||
|
||||
remainingFolders, err := afero.ReadDir(fs, ".")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 4, len(remainingFolders))
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkPruning(b *testing.B) {
|
||||
var t *testing.T
|
||||
_, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockQty := 10000
|
||||
currentSlot := primitives.Slot(150000)
|
||||
slot := primitives.Slot(0)
|
||||
|
||||
for j := 0; j <= blockQty; j++ {
|
||||
root := bytesutil.ToBytes32(bytesutil.ToBytes(uint64(slot), 32))
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, root, slot, fieldparams.MaxBlobsPerBlock)
|
||||
testSidecars, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, bs.Save(testSidecars[0]))
|
||||
|
||||
slot += 100
|
||||
}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := bs.Prune(currentSlot)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlobStorageDelete(t *testing.T) {
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
rawRoot := "0xcf9bb70c98f58092c9d6459227c9765f984d240be9690e85179bc5a6f60366ad"
|
||||
blockRoot, err := hexutil.Decode(rawRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, fieldparams.MaxBlobsPerBlock)
|
||||
testSidecars, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
for _, sidecar := range testSidecars {
|
||||
require.NoError(t, bs.Save(sidecar))
|
||||
}
|
||||
|
||||
exists, err := afero.DirExists(fs, hexutil.Encode(blockRoot))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, exists)
|
||||
|
||||
// Delete the directory corresponding to the block root
|
||||
require.NoError(t, bs.Delete(bytesutil.ToBytes32(blockRoot)))
|
||||
|
||||
// Ensure that the directory no longer exists after deletion
|
||||
exists, err = afero.DirExists(fs, hexutil.Encode(blockRoot))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, exists)
|
||||
|
||||
// Deleting a non-existent root does not return an error.
|
||||
require.NoError(t, bs.Delete(bytesutil.ToBytes32([]byte{0x1})))
|
||||
}
|
||||
|
||||
func TestNewBlobStorage(t *testing.T) {
|
||||
_, err := NewBlobStorage(path.Join(t.TempDir(), "good"))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package filesystem
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
@@ -15,9 +16,10 @@ func NewEphemeralBlobStorage(_ testing.TB) *BlobStorage {
|
||||
|
||||
// NewEphemeralBlobStorageWithFs can be used by tests that want access to the virtual filesystem
|
||||
// in order to interact with it outside the parameters of the BlobStorage api.
|
||||
func NewEphemeralBlobStorageWithFs(_ testing.TB) (afero.Fs, *BlobStorage) {
|
||||
func NewEphemeralBlobStorageWithFs(_ testing.TB) (afero.Fs, *BlobStorage, error) {
|
||||
fs := afero.NewMemMapFs()
|
||||
return fs, &BlobStorage{fs: fs}
|
||||
retentionEpoch := params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
|
||||
return fs, &BlobStorage{fs: fs, retentionEpochs: retentionEpoch}, nil
|
||||
}
|
||||
|
||||
type BlobMocker struct {
|
||||
@@ -44,7 +46,7 @@ func (bm *BlobMocker) CreateFakeIndices(root [32]byte, indices []uint64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewEpehmeralBlobStorageWithMocker returns a *BlobMocker value in addition to the BlobStorage value.
|
||||
// NewEphemeralBlobStorageWithMocker returns a *BlobMocker value in addition to the BlobStorage value.
|
||||
// BlockMocker encapsulates things blob path construction to avoid leaking implementation details.
|
||||
func NewEphemeralBlobStorageWithMocker(_ testing.TB) (*BlobMocker, *BlobStorage) {
|
||||
fs := afero.NewMemMapFs()
|
||||
|
||||
@@ -13,7 +13,6 @@ go_library(
|
||||
"error.go",
|
||||
"execution_chain.go",
|
||||
"finalized_block_roots.go",
|
||||
"flags.go",
|
||||
"genesis.go",
|
||||
"key.go",
|
||||
"kv.go",
|
||||
@@ -39,7 +38,6 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/genesis:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -67,7 +65,6 @@ go_library(
|
||||
"@com_github_prysmaticlabs_prombbolt//:go_default_library",
|
||||
"@com_github_schollz_progressbar_v3//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@io_etcd_go_bbolt//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
@@ -86,7 +83,6 @@ go_test(
|
||||
"encoding_test.go",
|
||||
"execution_chain_test.go",
|
||||
"finalized_block_roots_test.go",
|
||||
"flags_test.go",
|
||||
"genesis_test.go",
|
||||
"init_test.go",
|
||||
"kv_test.go",
|
||||
@@ -107,7 +103,6 @@ go_test(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/genesis:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -125,7 +120,6 @@ go_test(
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
|
||||
"@io_etcd_go_bbolt//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
|
||||
@@ -3,7 +3,6 @@ package kv
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -63,7 +62,7 @@ func (s *Store) SaveBlobSidecar(ctx context.Context, scs []*ethpb.DeprecatedBlob
|
||||
defer span.End()
|
||||
|
||||
first := scs[0]
|
||||
newKey := blobSidecarKey(first)
|
||||
newKey := s.blobSidecarKey(first)
|
||||
prefix := newKey.BufferPrefix()
|
||||
var prune []blobRotatingKey
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
@@ -235,7 +234,7 @@ func (s *Store) BlobSidecarsBySlot(ctx context.Context, slot types.Slot, indices
|
||||
defer span.End()
|
||||
|
||||
var enc []byte
|
||||
sk := slotKey(slot)
|
||||
sk := s.slotKey(slot)
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
c := tx.Bucket(blobsBucket).Cursor()
|
||||
// Bucket size is bounded and bolt cursors are fast. Moreover, a thin caching layer can be added.
|
||||
@@ -281,32 +280,37 @@ func (s *Store) DeleteBlobSidecars(ctx context.Context, beaconBlockRoot [32]byte
|
||||
|
||||
// We define a blob sidecar key as: bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
|
||||
// where slot_to_rotating_buffer(slot) = slot % MAX_SLOTS_TO_PERSIST_BLOBS.
|
||||
func blobSidecarKey(blob *ethpb.DeprecatedBlobSidecar) blobRotatingKey {
|
||||
key := slotKey(blob.Slot)
|
||||
func (s *Store) blobSidecarKey(blob *ethpb.DeprecatedBlobSidecar) blobRotatingKey {
|
||||
key := s.slotKey(blob.Slot)
|
||||
key = append(key, bytesutil.SlotToBytesBigEndian(blob.Slot)...)
|
||||
key = append(key, blob.BlockRoot...)
|
||||
return key
|
||||
}
|
||||
|
||||
func slotKey(slot types.Slot) []byte {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
maxSlotsToPersistBlobs := types.Slot(maxEpochsToPersistBlobs.Mul(uint64(slotsPerEpoch)))
|
||||
return bytesutil.SlotToBytesBigEndian(slot.ModSlot(maxSlotsToPersistBlobs))
|
||||
func (s *Store) slotKey(slot types.Slot) []byte {
|
||||
return bytesutil.SlotToBytesBigEndian(slot.ModSlot(s.blobRetentionSlots()))
|
||||
}
|
||||
|
||||
func checkEpochsForBlobSidecarsRequestBucket(db *bolt.DB) error {
|
||||
func (s *Store) blobRetentionSlots() types.Slot {
|
||||
return types.Slot(s.blobRetentionEpochs.Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
|
||||
}
|
||||
|
||||
var errBlobRetentionEpochMismatch = errors.New("epochs for blobs request value in DB does not match runtime config")
|
||||
|
||||
func (s *Store) checkEpochsForBlobSidecarsRequestBucket(db *bolt.DB) error {
|
||||
uRetentionEpochs := uint64(s.blobRetentionEpochs)
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket(chainMetadataBucket)
|
||||
v := b.Get(blobRetentionEpochsKey)
|
||||
if v == nil {
|
||||
if err := b.Put(blobRetentionEpochsKey, bytesutil.Uint64ToBytesBigEndian(uint64(maxEpochsToPersistBlobs))); err != nil {
|
||||
if err := b.Put(blobRetentionEpochsKey, bytesutil.Uint64ToBytesBigEndian(uRetentionEpochs)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
e := bytesutil.BytesToUint64BigEndian(v)
|
||||
if e != uint64(maxEpochsToPersistBlobs) {
|
||||
return fmt.Errorf("epochs for blobs request value in DB %d does not match config value %d", e, maxEpochsToPersistBlobs)
|
||||
if e != uRetentionEpochs {
|
||||
return errors.Wrapf(errBlobRetentionEpochMismatch, "db=%d, config=%d", e, uRetentionEpochs)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
|
||||
@@ -3,13 +3,10 @@ package kv
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"flag"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -18,7 +15,6 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assertions"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/urfave/cli/v2"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
@@ -493,7 +489,7 @@ func BenchmarkStore_BlobSidecarsByRoot(b *testing.B) {
|
||||
scs := []*ethpb.DeprecatedBlobSidecar{
|
||||
{BlockRoot: r, Slot: primitives.Slot(i)},
|
||||
}
|
||||
k := blobSidecarKey(scs[0])
|
||||
k := s.blobSidecarKey(scs[0])
|
||||
encodedBlobSidecar, err := encode(ctx, ðpb.BlobSidecars{Sidecars: scs})
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, bkt.Put(k, encodedBlobSidecar))
|
||||
@@ -514,27 +510,23 @@ func BenchmarkStore_BlobSidecarsByRoot(b *testing.B) {
|
||||
}
|
||||
|
||||
func Test_checkEpochsForBlobSidecarsRequestBucket(t *testing.T) {
|
||||
dbStore := setupDB(t)
|
||||
s := setupDB(t)
|
||||
|
||||
require.NoError(t, checkEpochsForBlobSidecarsRequestBucket(dbStore.db)) // First write
|
||||
require.NoError(t, checkEpochsForBlobSidecarsRequestBucket(dbStore.db)) // First check
|
||||
require.NoError(t, s.checkEpochsForBlobSidecarsRequestBucket(s.db)) // First write
|
||||
require.NoError(t, s.checkEpochsForBlobSidecarsRequestBucket(s.db)) // First check
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.Uint64(flags.BlobRetentionEpoch.Name, 0, "")
|
||||
require.NoError(t, set.Set(flags.BlobRetentionEpoch.Name, strconv.FormatUint(42069, 10)))
|
||||
cliCtx := cli.NewContext(&cli.App{}, set, nil)
|
||||
require.NoError(t, ConfigureBlobRetentionEpoch(cliCtx))
|
||||
require.ErrorContains(t, "epochs for blobs request value in DB 4096 does not match config value 42069", checkEpochsForBlobSidecarsRequestBucket(dbStore.db))
|
||||
s.blobRetentionEpochs += 1
|
||||
require.ErrorIs(t, s.checkEpochsForBlobSidecarsRequestBucket(s.db), errBlobRetentionEpochMismatch)
|
||||
}
|
||||
|
||||
func TestBlobRotatingKey(t *testing.T) {
|
||||
k := blobSidecarKey(ðpb.DeprecatedBlobSidecar{
|
||||
s := setupDB(t)
|
||||
k := s.blobSidecarKey(ðpb.DeprecatedBlobSidecar{
|
||||
Slot: 1,
|
||||
BlockRoot: []byte{2},
|
||||
})
|
||||
|
||||
require.Equal(t, types.Slot(1), k.Slot())
|
||||
require.DeepEqual(t, []byte{2}, k.BlockRoot())
|
||||
require.DeepEqual(t, slotKey(types.Slot(1)), k.BufferPrefix())
|
||||
require.DeepEqual(t, s.slotKey(types.Slot(1)), k.BufferPrefix())
|
||||
}
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var maxEpochsToPersistBlobs = params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
|
||||
|
||||
// ConfigureBlobRetentionEpoch sets the epoch for blob retention based on command-line context. It sets the local config `maxEpochsToPersistBlobs`.
|
||||
// If the flag is not set, the spec default `MinEpochsForBlobsSidecarsRequest` is used.
|
||||
// An error if the input epoch is smaller than the spec default value.
|
||||
func ConfigureBlobRetentionEpoch(cliCtx *cli.Context) error {
|
||||
// Check if the blob retention epoch flag is set.
|
||||
if cliCtx.IsSet(flags.BlobRetentionEpoch.Name) {
|
||||
// Retrieve and cast the epoch value.
|
||||
epochValue := cliCtx.Uint64(flags.BlobRetentionEpoch.Name)
|
||||
e := primitives.Epoch(epochValue)
|
||||
|
||||
// Validate the epoch value against the spec default.
|
||||
if e < params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest {
|
||||
return fmt.Errorf("%s smaller than spec default, %d < %d", flags.BlobRetentionEpoch.Name, e, params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
}
|
||||
|
||||
maxEpochsToPersistBlobs = e
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func TestConfigureBlobRetentionEpoch(t *testing.T) {
|
||||
maxEpochsToPersistBlobs = params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
|
||||
params.SetupTestConfigCleanup(t)
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
|
||||
// Test case: Spec default.
|
||||
require.NoError(t, ConfigureBlobRetentionEpoch(cli.NewContext(&app, set, nil)))
|
||||
require.Equal(t, params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest, maxEpochsToPersistBlobs)
|
||||
|
||||
set.Uint64(flags.BlobRetentionEpoch.Name, 0, "")
|
||||
minEpochsForSidecarRequest := uint64(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
require.NoError(t, set.Set(flags.BlobRetentionEpoch.Name, strconv.FormatUint(2*minEpochsForSidecarRequest, 10)))
|
||||
cliCtx := cli.NewContext(&app, set, nil)
|
||||
|
||||
// Test case: Input epoch is greater than or equal to spec value.
|
||||
require.NoError(t, ConfigureBlobRetentionEpoch(cliCtx))
|
||||
require.Equal(t, primitives.Epoch(2*minEpochsForSidecarRequest), maxEpochsToPersistBlobs)
|
||||
|
||||
// Test case: Input epoch is less than spec value.
|
||||
require.NoError(t, set.Set(flags.BlobRetentionEpoch.Name, strconv.FormatUint(minEpochsForSidecarRequest-1, 10)))
|
||||
cliCtx = cli.NewContext(&app, set, nil)
|
||||
err := ConfigureBlobRetentionEpoch(cliCtx)
|
||||
require.ErrorContains(t, "blob-retention-epochs smaller than spec default", err)
|
||||
}
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/io/file"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
@@ -90,6 +91,7 @@ type Store struct {
|
||||
validatorEntryCache *ristretto.Cache
|
||||
stateSummaryCache *stateSummaryCache
|
||||
ctx context.Context
|
||||
blobRetentionEpochs primitives.Epoch
|
||||
}
|
||||
|
||||
// StoreDatafilePath is the canonical construction of a full
|
||||
@@ -133,10 +135,20 @@ var Buckets = [][]byte{
|
||||
blobsBucket,
|
||||
}
|
||||
|
||||
// KVStoreOption is a functional option that modifies a kv.Store.
|
||||
type KVStoreOption func(*Store)
|
||||
|
||||
// WithBlobRetentionEpochs sets the variable configuring the blob retention window.
|
||||
func WithBlobRetentionEpochs(e primitives.Epoch) KVStoreOption {
|
||||
return func(s *Store) {
|
||||
s.blobRetentionEpochs = e
|
||||
}
|
||||
}
|
||||
|
||||
// NewKVStore initializes a new boltDB key-value store at the directory
|
||||
// path specified, creates the kv-buckets based on the schema, and stores
|
||||
// an open connection db object as a property of the Store struct.
|
||||
func NewKVStore(ctx context.Context, dirPath string) (*Store, error) {
|
||||
func NewKVStore(ctx context.Context, dirPath string, opts ...KVStoreOption) (*Store, error) {
|
||||
hasDir, err := file.HasDir(dirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -189,6 +201,9 @@ func NewKVStore(ctx context.Context, dirPath string) (*Store, error) {
|
||||
stateSummaryCache: newStateSummaryCache(),
|
||||
ctx: ctx,
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(kv)
|
||||
}
|
||||
if err := kv.db.Update(func(tx *bolt.Tx) error {
|
||||
return createBuckets(tx, Buckets...)
|
||||
}); err != nil {
|
||||
@@ -202,10 +217,14 @@ func NewKVStore(ctx context.Context, dirPath string) (*Store, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := checkEpochsForBlobSidecarsRequestBucket(boltDB); err != nil {
|
||||
if err := kv.checkEpochsForBlobSidecarsRequestBucket(boltDB); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to check epochs for blob sidecars request bucket")
|
||||
}
|
||||
|
||||
// set a default so that tests don't break
|
||||
if kv.blobRetentionEpochs == 0 {
|
||||
kv.blobRetentionEpochs = params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
|
||||
}
|
||||
return kv, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
@@ -15,7 +16,8 @@ import (
|
||||
|
||||
// setupDB instantiates and returns a Store instance.
|
||||
func setupDB(t testing.TB) *Store {
|
||||
db, err := NewKVStore(context.Background(), t.TempDir())
|
||||
opt := WithBlobRetentionEpochs(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
db, err := NewKVStore(context.Background(), t.TempDir(), opt)
|
||||
require.NoError(t, err, "Failed to instantiate DB")
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, db.Close(), "Failed to close database")
|
||||
|
||||
@@ -108,3 +108,10 @@ func WithFinalizedStateAtStartup(st state.BeaconState) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithJwtId(jwtId string) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.jwtId = jwtId
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -128,6 +128,7 @@ type config struct {
|
||||
currHttpEndpoint network.Endpoint
|
||||
headers []string
|
||||
finalizedStateAtStartup state.BeaconState
|
||||
jwtId string
|
||||
}
|
||||
|
||||
// Service fetches important information about the canonical
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
"doc.go",
|
||||
"error.go",
|
||||
"interfaces.go",
|
||||
"ro.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice",
|
||||
visibility = [
|
||||
@@ -22,3 +23,15 @@ go_library(
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["ro_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -637,3 +637,41 @@ func (f *ForkChoice) Slot(root [32]byte) (primitives.Slot, error) {
|
||||
}
|
||||
return n.slot, nil
|
||||
}
|
||||
|
||||
// TargetRootForEpoch returns the root of the target block for a given epoch.
|
||||
// The epoch parameter is crucial to identify the correct target root. For example:
|
||||
// When inserting a block at slot 63 with block root 0xA and target root 0xB (pointing to the block at slot 32),
|
||||
// and at slot 64, where the block is skipped, the attestation will reference the target root as 0xA (for slot 63), not 0xB (for slot 32).
|
||||
// This implies that if the input slot exceeds the block slot, the target root will be the same as the block root.
|
||||
// We also allow for the epoch to be below the current target for this root, in
|
||||
// which case we return the root of the checkpoint of the chain containing the
|
||||
// passed root, at the given epoch
|
||||
func (f *ForkChoice) TargetRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
|
||||
n, ok := f.store.nodeByRoot[root]
|
||||
if !ok || n == nil {
|
||||
return [32]byte{}, ErrNilNode
|
||||
}
|
||||
nodeEpoch := slots.ToEpoch(n.slot)
|
||||
if epoch > nodeEpoch {
|
||||
return n.root, nil
|
||||
}
|
||||
if n.target == nil {
|
||||
return [32]byte{}, nil
|
||||
}
|
||||
targetRoot := n.target.root
|
||||
if epoch == nodeEpoch {
|
||||
return targetRoot, nil
|
||||
}
|
||||
targetNode, ok := f.store.nodeByRoot[targetRoot]
|
||||
if !ok || targetNode == nil {
|
||||
return [32]byte{}, ErrNilNode
|
||||
}
|
||||
// If slot 0 was not missed we consider a previous block to go back at least one epoch
|
||||
if nodeEpoch == slots.ToEpoch(targetNode.slot) {
|
||||
targetNode = targetNode.parent
|
||||
if targetNode == nil {
|
||||
return [32]byte{}, ErrNilNode
|
||||
}
|
||||
}
|
||||
return f.TargetRootForEpoch(targetNode.root, epoch)
|
||||
}
|
||||
|
||||
@@ -75,7 +75,6 @@ func (s *Store) insert(ctx context.Context,
|
||||
}
|
||||
|
||||
parent := s.nodeByRoot[parentRoot]
|
||||
|
||||
n := &Node{
|
||||
slot: slot,
|
||||
root: root,
|
||||
@@ -89,6 +88,17 @@ func (s *Store) insert(ctx context.Context,
|
||||
timestamp: uint64(time.Now().Unix()),
|
||||
}
|
||||
|
||||
// Set the node's target checkpoint
|
||||
if slot%params.BeaconConfig().SlotsPerEpoch == 0 {
|
||||
n.target = n
|
||||
} else if parent != nil {
|
||||
if slots.ToEpoch(slot) == slots.ToEpoch(parent.slot) {
|
||||
n.target = parent.target
|
||||
} else {
|
||||
n.target = parent
|
||||
}
|
||||
}
|
||||
|
||||
s.nodeByPayload[payloadHash] = n
|
||||
s.nodeByRoot[root] = n
|
||||
if parent == nil {
|
||||
@@ -145,6 +155,9 @@ func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalized
|
||||
return ctx.Err()
|
||||
}
|
||||
if node == finalizedNode {
|
||||
if node.target != node {
|
||||
node.target = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
for _, child := range node.children {
|
||||
|
||||
@@ -435,3 +435,90 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), count)
|
||||
}
|
||||
|
||||
func TestStore_TargetRootForEpoch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
target, err := f.TargetRootForEpoch(blkRoot, 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, target, blkRoot)
|
||||
|
||||
state, root1, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'b'}, blkRoot, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, root1))
|
||||
target, err = f.TargetRootForEpoch(root1, 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, target, blkRoot)
|
||||
|
||||
// Insert a block for the next epoch (missed slot 0)
|
||||
|
||||
state, root2, err := prepareForkchoiceState(ctx, 2*params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'c'}, root1, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, root2))
|
||||
target, err = f.TargetRootForEpoch(root2, 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, target, root1)
|
||||
|
||||
state, root3, err := prepareForkchoiceState(ctx, 2*params.BeaconConfig().SlotsPerEpoch+2, [32]byte{'d'}, root2, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, root3))
|
||||
target, err = f.TargetRootForEpoch(root2, 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, target, root1)
|
||||
|
||||
// Prune finalization
|
||||
s := f.store
|
||||
s.finalizedCheckpoint.Root = root1
|
||||
require.NoError(t, s.prune(ctx))
|
||||
target, err = f.TargetRootForEpoch(root1, 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{}, target)
|
||||
|
||||
// Insert a block for next epoch (slot 0 present)
|
||||
|
||||
state, root4, err := prepareForkchoiceState(ctx, 3*params.BeaconConfig().SlotsPerEpoch, [32]byte{'e'}, root1, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, root4))
|
||||
target, err = f.TargetRootForEpoch(root4, 3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, target, root4)
|
||||
|
||||
state, root5, err := prepareForkchoiceState(ctx, 3*params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'f'}, root4, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, root5))
|
||||
target, err = f.TargetRootForEpoch(root5, 3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, target, root4)
|
||||
|
||||
// Target root where the target epoch is same or ahead of the block slot
|
||||
target, err = f.TargetRootForEpoch(root5, 4)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, target, root5)
|
||||
|
||||
// Target root where the target epoch is two epochs ago
|
||||
target, err = f.TargetRootForEpoch(root5, 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root1, target) // the parent of root4 in epoch 3 is root 1 in epoch 1
|
||||
|
||||
// Target root where the target is two epochs ago, slot 0 was missed
|
||||
state, root6, err := prepareForkchoiceState(ctx, 4*params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'g'}, root5, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, root6))
|
||||
target, err = f.TargetRootForEpoch(root6, 4)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, target, root5)
|
||||
target, err = f.TargetRootForEpoch(root6, 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, target, root1)
|
||||
|
||||
// Prune finalization
|
||||
s.finalizedCheckpoint.Root = root4
|
||||
require.NoError(t, s.prune(ctx))
|
||||
target, err = f.TargetRootForEpoch(root4, 3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root4, target)
|
||||
}
|
||||
|
||||
@@ -50,6 +50,7 @@ type Node struct {
|
||||
root [fieldparams.RootLength]byte // root of the block converted to the node.
|
||||
payloadHash [fieldparams.RootLength]byte // payloadHash of the block converted to the node.
|
||||
parent *Node // parent index of this node.
|
||||
target *Node // target checkpoint for
|
||||
children []*Node // the list of direct children of this Node
|
||||
justifiedEpoch primitives.Epoch // justifiedEpoch of this node.
|
||||
unrealizedJustifiedEpoch primitives.Epoch // the epoch that would be justified if the block would be advanced to the next epoch.
|
||||
|
||||
@@ -16,10 +16,9 @@ type BalancesByRooter func(context.Context, [32]byte) ([]uint64, error)
|
||||
|
||||
// ForkChoicer represents the full fork choice interface composed of all the sub-interfaces.
|
||||
type ForkChoicer interface {
|
||||
RLocker // separate interface isolates read locking for ROForkChoice.
|
||||
Lock()
|
||||
Unlock()
|
||||
RLock()
|
||||
RUnlock()
|
||||
HeadRetriever // to compute head.
|
||||
BlockProcessor // to track new block for fork choice.
|
||||
AttestationProcessor // to track new attestation for fork choice.
|
||||
@@ -27,6 +26,12 @@ type ForkChoicer interface {
|
||||
Setter // to set fork choice information.
|
||||
}
|
||||
|
||||
// RLocker represents forkchoice's internal RWMutex read-only lock/unlock methods.
|
||||
type RLocker interface {
|
||||
RLock()
|
||||
RUnlock()
|
||||
}
|
||||
|
||||
// HeadRetriever retrieves head root and optimistic info of the current chain.
|
||||
type HeadRetriever interface {
|
||||
Head(context.Context) ([32]byte, error)
|
||||
@@ -47,28 +52,33 @@ type AttestationProcessor interface {
|
||||
|
||||
// Getter returns fork choice related information.
|
||||
type Getter interface {
|
||||
HasNode([32]byte) bool
|
||||
ProposerBoost() [fieldparams.RootLength]byte
|
||||
FastGetter
|
||||
AncestorRoot(ctx context.Context, root [32]byte, slot primitives.Slot) ([32]byte, error)
|
||||
CommonAncestor(ctx context.Context, root1 [32]byte, root2 [32]byte) ([32]byte, primitives.Slot, error)
|
||||
IsCanonical(root [32]byte) bool
|
||||
FinalizedCheckpoint() *forkchoicetypes.Checkpoint
|
||||
IsViableForCheckpoint(*forkchoicetypes.Checkpoint) (bool, error)
|
||||
FinalizedPayloadBlockHash() [32]byte
|
||||
JustifiedCheckpoint() *forkchoicetypes.Checkpoint
|
||||
PreviousJustifiedCheckpoint() *forkchoicetypes.Checkpoint
|
||||
JustifiedPayloadBlockHash() [32]byte
|
||||
UnrealizedJustifiedPayloadBlockHash() [32]byte
|
||||
NodeCount() int
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
ForkChoiceDump(context.Context) (*forkchoice2.Dump, error)
|
||||
Weight(root [32]byte) (uint64, error)
|
||||
Tips() ([][32]byte, []primitives.Slot)
|
||||
}
|
||||
|
||||
type FastGetter interface {
|
||||
FinalizedCheckpoint() *forkchoicetypes.Checkpoint
|
||||
FinalizedPayloadBlockHash() [32]byte
|
||||
HasNode([32]byte) bool
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
IsCanonical(root [32]byte) bool
|
||||
IsOptimistic(root [32]byte) (bool, error)
|
||||
IsViableForCheckpoint(*forkchoicetypes.Checkpoint) (bool, error)
|
||||
JustifiedCheckpoint() *forkchoicetypes.Checkpoint
|
||||
JustifiedPayloadBlockHash() [32]byte
|
||||
LastRoot(primitives.Epoch) [32]byte
|
||||
NodeCount() int
|
||||
PreviousJustifiedCheckpoint() *forkchoicetypes.Checkpoint
|
||||
ProposerBoost() [fieldparams.RootLength]byte
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
ShouldOverrideFCU() bool
|
||||
Slot([32]byte) (primitives.Slot, error)
|
||||
LastRoot(primitives.Epoch) [32]byte
|
||||
TargetRootForEpoch([32]byte, primitives.Epoch) ([32]byte, error)
|
||||
UnrealizedJustifiedPayloadBlockHash() [32]byte
|
||||
Weight(root [32]byte) (uint64, error)
|
||||
}
|
||||
|
||||
// Setter allows to set forkchoice information
|
||||
|
||||
164
beacon-chain/forkchoice/ro.go
Normal file
164
beacon-chain/forkchoice/ro.go
Normal file
@@ -0,0 +1,164 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// ROForkChoice is an implementation of forkchoice.Getter which calls `Rlock`/`RUnlock`
|
||||
// around a delegated method call to the underlying Getter implementation.
|
||||
type ROForkChoice struct {
|
||||
getter FastGetter
|
||||
l RLocker
|
||||
}
|
||||
|
||||
var _ FastGetter = &ROForkChoice{}
|
||||
|
||||
// ROWrappable represents the subset of ForkChoicer a type needs to support
|
||||
// in order for ROForkChoice to wrap it. This simplifies the creation of a mock
|
||||
// type that can be used to assert that all of the wrapped methods are correctly
|
||||
// called between mutex acquire/release.
|
||||
type ROWrappable interface {
|
||||
RLocker
|
||||
FastGetter
|
||||
}
|
||||
|
||||
// NewROForkChoice returns an ROForkChoice that delegates forkchoice.Getter calls to the
|
||||
// given value after first using its Locker methods to make sure it is correctly locked.
|
||||
func NewROForkChoice(w ROWrappable) *ROForkChoice {
|
||||
return &ROForkChoice{getter: w, l: w}
|
||||
}
|
||||
|
||||
// HasNode delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) HasNode(root [32]byte) bool {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.HasNode(root)
|
||||
}
|
||||
|
||||
// ProposerBoost delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) ProposerBoost() [fieldparams.RootLength]byte {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.ProposerBoost()
|
||||
}
|
||||
|
||||
// IsCanonical delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) IsCanonical(root [32]byte) bool {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.IsCanonical(root)
|
||||
}
|
||||
|
||||
// FinalizedCheckpoint delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) FinalizedCheckpoint() *forkchoicetypes.Checkpoint {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.FinalizedCheckpoint()
|
||||
}
|
||||
|
||||
// IsViableForCheckpoint delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.IsViableForCheckpoint(cp)
|
||||
}
|
||||
|
||||
// FinalizedPayloadBlockHash delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) FinalizedPayloadBlockHash() [32]byte {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.FinalizedPayloadBlockHash()
|
||||
}
|
||||
|
||||
// JustifiedCheckpoint delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) JustifiedCheckpoint() *forkchoicetypes.Checkpoint {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.JustifiedCheckpoint()
|
||||
}
|
||||
|
||||
// PreviousJustifiedCheckpoint delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) PreviousJustifiedCheckpoint() *forkchoicetypes.Checkpoint {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.PreviousJustifiedCheckpoint()
|
||||
}
|
||||
|
||||
// JustifiedPayloadBlockHash delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) JustifiedPayloadBlockHash() [32]byte {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.JustifiedPayloadBlockHash()
|
||||
}
|
||||
|
||||
// UnrealizedJustifiedPayloadBlockHash delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) UnrealizedJustifiedPayloadBlockHash() [32]byte {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.UnrealizedJustifiedPayloadBlockHash()
|
||||
}
|
||||
|
||||
// NodeCount delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) NodeCount() int {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.NodeCount()
|
||||
}
|
||||
|
||||
// HighestReceivedBlockSlot delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) HighestReceivedBlockSlot() primitives.Slot {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.HighestReceivedBlockSlot()
|
||||
}
|
||||
|
||||
// ReceivedBlocksLastEpoch delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.ReceivedBlocksLastEpoch()
|
||||
}
|
||||
|
||||
// Weight delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) Weight(root [32]byte) (uint64, error) {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.Weight(root)
|
||||
}
|
||||
|
||||
// IsOptimistic delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) IsOptimistic(root [32]byte) (bool, error) {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.IsOptimistic(root)
|
||||
}
|
||||
|
||||
// ShouldOverrideFCU delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) ShouldOverrideFCU() bool {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.ShouldOverrideFCU()
|
||||
}
|
||||
|
||||
// Slot delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) Slot(root [32]byte) (primitives.Slot, error) {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.Slot(root)
|
||||
}
|
||||
|
||||
// LastRoot delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) LastRoot(e primitives.Epoch) [32]byte {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.LastRoot(e)
|
||||
}
|
||||
|
||||
// TargetRootForEpoch delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) TargetRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.TargetRootForEpoch(root, epoch)
|
||||
}
|
||||
282
beacon-chain/forkchoice/ro_test.go
Normal file
282
beacon-chain/forkchoice/ro_test.go
Normal file
@@ -0,0 +1,282 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
type mockCall int
|
||||
|
||||
const (
|
||||
lockCalled mockCall = iota
|
||||
unlockCalled
|
||||
rlockCalled
|
||||
runlockCalled
|
||||
hasNodeCalled
|
||||
proposerBoostCalled
|
||||
isCanonicalCalled
|
||||
finalizedCheckpointCalled
|
||||
isViableForCheckpointCalled
|
||||
finalizedPayloadBlockHashCalled
|
||||
justifiedCheckpointCalled
|
||||
previousJustifiedCheckpointCalled
|
||||
justifiedPayloadBlockHashCalled
|
||||
unrealizedJustifiedPayloadBlockHashCalled
|
||||
nodeCountCalled
|
||||
highestReceivedBlockSlotCalled
|
||||
receivedBlocksLastEpochCalled
|
||||
weightCalled
|
||||
isOptimisticCalled
|
||||
shouldOverrideFCUCalled
|
||||
slotCalled
|
||||
lastRootCalled
|
||||
targetRootForEpochCalled
|
||||
)
|
||||
|
||||
func _discard(t *testing.T, e error) {
|
||||
if e != nil {
|
||||
_, err := io.Discard.Write([]byte(e.Error()))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// ensures that the correct func was called with the correct lock pattern
|
||||
// for each method in the interface.
|
||||
func TestROLocking(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
call mockCall
|
||||
cb func(FastGetter)
|
||||
}{
|
||||
{
|
||||
name: "hasNodeCalled",
|
||||
call: hasNodeCalled,
|
||||
cb: func(g FastGetter) { g.HasNode([32]byte{}) },
|
||||
},
|
||||
{
|
||||
name: "proposerBoostCalled",
|
||||
call: proposerBoostCalled,
|
||||
cb: func(g FastGetter) { g.ProposerBoost() },
|
||||
},
|
||||
{
|
||||
name: "isCanonicalCalled",
|
||||
call: isCanonicalCalled,
|
||||
cb: func(g FastGetter) { g.IsCanonical([32]byte{}) },
|
||||
},
|
||||
{
|
||||
name: "finalizedCheckpointCalled",
|
||||
call: finalizedCheckpointCalled,
|
||||
cb: func(g FastGetter) { g.FinalizedCheckpoint() },
|
||||
},
|
||||
{
|
||||
name: "isViableForCheckpointCalled",
|
||||
call: isViableForCheckpointCalled,
|
||||
cb: func(g FastGetter) { _, err := g.IsViableForCheckpoint(nil); _discard(t, err) },
|
||||
},
|
||||
{
|
||||
name: "finalizedPayloadBlockHashCalled",
|
||||
call: finalizedPayloadBlockHashCalled,
|
||||
cb: func(g FastGetter) { g.FinalizedPayloadBlockHash() },
|
||||
},
|
||||
{
|
||||
name: "justifiedCheckpointCalled",
|
||||
call: justifiedCheckpointCalled,
|
||||
cb: func(g FastGetter) { g.JustifiedCheckpoint() },
|
||||
},
|
||||
{
|
||||
name: "previousJustifiedCheckpointCalled",
|
||||
call: previousJustifiedCheckpointCalled,
|
||||
cb: func(g FastGetter) { g.PreviousJustifiedCheckpoint() },
|
||||
},
|
||||
{
|
||||
name: "justifiedPayloadBlockHashCalled",
|
||||
call: justifiedPayloadBlockHashCalled,
|
||||
cb: func(g FastGetter) { g.JustifiedPayloadBlockHash() },
|
||||
},
|
||||
{
|
||||
name: "unrealizedJustifiedPayloadBlockHashCalled",
|
||||
call: unrealizedJustifiedPayloadBlockHashCalled,
|
||||
cb: func(g FastGetter) { g.UnrealizedJustifiedPayloadBlockHash() },
|
||||
},
|
||||
{
|
||||
name: "nodeCountCalled",
|
||||
call: nodeCountCalled,
|
||||
cb: func(g FastGetter) { g.NodeCount() },
|
||||
},
|
||||
{
|
||||
name: "highestReceivedBlockSlotCalled",
|
||||
call: highestReceivedBlockSlotCalled,
|
||||
cb: func(g FastGetter) { g.HighestReceivedBlockSlot() },
|
||||
},
|
||||
{
|
||||
name: "receivedBlocksLastEpochCalled",
|
||||
call: receivedBlocksLastEpochCalled,
|
||||
cb: func(g FastGetter) { _, err := g.ReceivedBlocksLastEpoch(); _discard(t, err) },
|
||||
},
|
||||
{
|
||||
name: "weightCalled",
|
||||
call: weightCalled,
|
||||
cb: func(g FastGetter) { _, err := g.Weight([32]byte{}); _discard(t, err) },
|
||||
},
|
||||
{
|
||||
name: "isOptimisticCalled",
|
||||
call: isOptimisticCalled,
|
||||
cb: func(g FastGetter) { _, err := g.IsOptimistic([32]byte{}); _discard(t, err) },
|
||||
},
|
||||
{
|
||||
name: "shouldOverrideFCUCalled",
|
||||
call: shouldOverrideFCUCalled,
|
||||
cb: func(g FastGetter) { g.ShouldOverrideFCU() },
|
||||
},
|
||||
{
|
||||
name: "slotCalled",
|
||||
call: slotCalled,
|
||||
cb: func(g FastGetter) { _, err := g.Slot([32]byte{}); _discard(t, err) },
|
||||
},
|
||||
{
|
||||
name: "lastRootCalled",
|
||||
call: lastRootCalled,
|
||||
cb: func(g FastGetter) { g.LastRoot(0) },
|
||||
},
|
||||
{
|
||||
name: "targetRootForEpochCalled",
|
||||
call: targetRootForEpochCalled,
|
||||
cb: func(g FastGetter) { _, err := g.TargetRootForEpoch([32]byte{}, 0); _discard(t, err) },
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
m := &mockROForkchoice{}
|
||||
ro := NewROForkChoice(m)
|
||||
c.cb(ro)
|
||||
require.Equal(t, rlockCalled, m.calls[0])
|
||||
require.Equal(t, c.call, m.calls[1])
|
||||
require.Equal(t, runlockCalled, m.calls[2])
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type mockROForkchoice struct {
|
||||
calls []mockCall
|
||||
}
|
||||
|
||||
var _ FastGetter = &mockROForkchoice{}
|
||||
|
||||
var _ RLocker = &mockROForkchoice{}
|
||||
|
||||
func (ro *mockROForkchoice) Lock() {
|
||||
ro.calls = append(ro.calls, lockCalled)
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) RLock() {
|
||||
ro.calls = append(ro.calls, rlockCalled)
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) Unlock() {
|
||||
ro.calls = append(ro.calls, unlockCalled)
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) RUnlock() {
|
||||
ro.calls = append(ro.calls, runlockCalled)
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) HasNode(_ [32]byte) bool {
|
||||
ro.calls = append(ro.calls, hasNodeCalled)
|
||||
return false
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) ProposerBoost() [fieldparams.RootLength]byte {
|
||||
ro.calls = append(ro.calls, proposerBoostCalled)
|
||||
return [fieldparams.RootLength]byte{}
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) IsCanonical(_ [32]byte) bool {
|
||||
ro.calls = append(ro.calls, isCanonicalCalled)
|
||||
return false
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) FinalizedCheckpoint() *forkchoicetypes.Checkpoint {
|
||||
ro.calls = append(ro.calls, finalizedCheckpointCalled)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) IsViableForCheckpoint(_ *forkchoicetypes.Checkpoint) (bool, error) {
|
||||
ro.calls = append(ro.calls, isViableForCheckpointCalled)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) FinalizedPayloadBlockHash() [32]byte {
|
||||
ro.calls = append(ro.calls, finalizedPayloadBlockHashCalled)
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) JustifiedCheckpoint() *forkchoicetypes.Checkpoint {
|
||||
ro.calls = append(ro.calls, justifiedCheckpointCalled)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) PreviousJustifiedCheckpoint() *forkchoicetypes.Checkpoint {
|
||||
ro.calls = append(ro.calls, previousJustifiedCheckpointCalled)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) JustifiedPayloadBlockHash() [32]byte {
|
||||
ro.calls = append(ro.calls, justifiedPayloadBlockHashCalled)
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) UnrealizedJustifiedPayloadBlockHash() [32]byte {
|
||||
ro.calls = append(ro.calls, unrealizedJustifiedPayloadBlockHashCalled)
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) NodeCount() int {
|
||||
ro.calls = append(ro.calls, nodeCountCalled)
|
||||
return 0
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) HighestReceivedBlockSlot() primitives.Slot {
|
||||
ro.calls = append(ro.calls, highestReceivedBlockSlotCalled)
|
||||
return 0
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
ro.calls = append(ro.calls, receivedBlocksLastEpochCalled)
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) Weight(_ [32]byte) (uint64, error) {
|
||||
ro.calls = append(ro.calls, weightCalled)
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) IsOptimistic(_ [32]byte) (bool, error) {
|
||||
ro.calls = append(ro.calls, isOptimisticCalled)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) ShouldOverrideFCU() bool {
|
||||
ro.calls = append(ro.calls, shouldOverrideFCUCalled)
|
||||
return false
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) Slot(_ [32]byte) (primitives.Slot, error) {
|
||||
ro.calls = append(ro.calls, slotCalled)
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) LastRoot(_ primitives.Epoch) [32]byte {
|
||||
ro.calls = append(ro.calls, lastRootCalled)
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// TargetRootForEpoch implements FastGetter.
|
||||
func (ro *mockROForkchoice) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
|
||||
ro.calls = append(ro.calls, targetRootForEpochCalled)
|
||||
return [32]byte{}, nil
|
||||
}
|
||||
@@ -8,7 +8,6 @@ go_library(
|
||||
deps = [
|
||||
"//api/gateway:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//proto/eth/service:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library",
|
||||
"@org_golang_google_protobuf//encoding/protojson:go_default_library",
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/gateway"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
ethpbservice "github.com/prysmaticlabs/prysm/v4/proto/eth/service"
|
||||
ethpbalpha "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
)
|
||||
@@ -51,10 +50,7 @@ func DefaultConfig(enableDebugRPCEndpoints bool, httpModules string) MuxConfig {
|
||||
}
|
||||
}
|
||||
if flags.EnableHTTPEthAPI(httpModules) {
|
||||
ethRegistrations := []gateway.PbHandlerRegistration{
|
||||
ethpbservice.RegisterBeaconChainHandler,
|
||||
ethpbservice.RegisterEventsHandler,
|
||||
}
|
||||
ethRegistrations := []gateway.PbHandlerRegistration{}
|
||||
ethMux := gwruntime.NewServeMux(
|
||||
gwruntime.WithMarshalerOption(gwruntime.MIMEWildcard, &gwruntime.HTTPBodyMarshaler{
|
||||
Marshaler: &gwruntime.JSONPb{
|
||||
|
||||
@@ -14,7 +14,7 @@ func TestDefaultConfig(t *testing.T) {
|
||||
assert.NotNil(t, cfg.EthPbMux.Mux)
|
||||
require.Equal(t, 2, len(cfg.EthPbMux.Patterns))
|
||||
assert.Equal(t, "/internal/eth/v1/", cfg.EthPbMux.Patterns[0])
|
||||
assert.Equal(t, 2, len(cfg.EthPbMux.Registrations))
|
||||
assert.Equal(t, 0, len(cfg.EthPbMux.Registrations))
|
||||
assert.Equal(t, (*gateway.PbMux)(nil), cfg.V1AlphaPbMux)
|
||||
})
|
||||
t.Run("Without Eth API", func(t *testing.T) {
|
||||
|
||||
@@ -41,7 +41,6 @@ go_library(
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/rpc:go_default_library",
|
||||
"//beacon-chain/rpc/apimiddleware:go_default_library",
|
||||
"//beacon-chain/slasher:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
|
||||
@@ -43,7 +43,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/slasher"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
@@ -114,11 +113,12 @@ type BeaconNode struct {
|
||||
clockWaiter startup.ClockWaiter
|
||||
initialSyncComplete chan struct{}
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
blobRetentionEpochs primitives.Epoch
|
||||
}
|
||||
|
||||
// New creates a new node instance, sets up configuration options, and registers
|
||||
// every required service to the node.
|
||||
func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*BeaconNode, error) {
|
||||
if err := configureTracing(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -156,9 +156,6 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
if err := configureExecutionSetting(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := kv.ConfigureBlobRetentionEpoch(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configureFastSSZHashingAlgorithm()
|
||||
|
||||
// Initializes any forks here.
|
||||
@@ -166,7 +163,7 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
|
||||
registry := runtime.NewServiceRegistry()
|
||||
|
||||
ctx, cancel := context.WithCancel(cliCtx.Context)
|
||||
ctx := cliCtx.Context
|
||||
beacon := &BeaconNode{
|
||||
cliCtx: cliCtx,
|
||||
ctx: ctx,
|
||||
@@ -381,7 +378,7 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
|
||||
log.WithField("database-path", dbPath).Info("Checking DB")
|
||||
|
||||
d, err := db.NewDB(b.ctx, dbPath)
|
||||
d, err := kv.NewKVStore(b.ctx, dbPath, kv.WithBlobRetentionEpochs(b.blobRetentionEpochs))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -403,7 +400,8 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
if err := d.ClearDB(); err != nil {
|
||||
return errors.Wrap(err, "could not clear database")
|
||||
}
|
||||
d, err = db.NewDB(b.ctx, dbPath)
|
||||
|
||||
d, err = kv.NewKVStore(b.ctx, dbPath, kv.WithBlobRetentionEpochs(b.blobRetentionEpochs))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create new database")
|
||||
}
|
||||
@@ -570,6 +568,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
|
||||
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
|
||||
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),
|
||||
QueueSize: cliCtx.Uint(cmd.PubsubQueueSize.Name),
|
||||
AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name),
|
||||
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
|
||||
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
|
||||
@@ -675,6 +674,7 @@ func (b *BeaconNode) registerPOWChainService() error {
|
||||
execution.WithStateGen(b.stateGen),
|
||||
execution.WithBeaconNodeStatsUpdater(bs),
|
||||
execution.WithFinalizedStateAtStartup(b.finalizedStateAtStartUp),
|
||||
execution.WithJwtId(b.cliCtx.String(flags.JwtId.Name)),
|
||||
)
|
||||
web3Service, err := execution.NewService(b.ctx, opts...)
|
||||
if err != nil {
|
||||
@@ -846,6 +846,7 @@ func (b *BeaconNode) registerRPCService(router *mux.Router) error {
|
||||
ForkchoiceFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
BlockReceiver: chainService,
|
||||
BlobReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
@@ -937,9 +938,6 @@ func (b *BeaconNode) registerGRPCGateway(router *mux.Router) error {
|
||||
apigateway.WithAllowedOrigins(allowedOrigins),
|
||||
apigateway.WithTimeout(uint64(timeout)),
|
||||
}
|
||||
if flags.EnableHTTPEthAPI(httpModules) {
|
||||
opts = append(opts, apigateway.WithApiMiddleware(&apimiddleware.BeaconEndpointFactory{}))
|
||||
}
|
||||
g, err := apigateway.New(b.ctx, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -33,6 +33,12 @@ import (
|
||||
// Ensure BeaconNode implements interfaces.
|
||||
var _ statefeed.Notifier = (*BeaconNode)(nil)
|
||||
|
||||
func newCliContextWithCancel(app *cli.App, set *flag.FlagSet) (*cli.Context, context.CancelFunc) {
|
||||
context, cancel := context.WithCancel(context.Background())
|
||||
parent := &cli.Context{Context: context}
|
||||
return cli.NewContext(app, set, parent), cancel
|
||||
}
|
||||
|
||||
// Test that beacon chain node can close.
|
||||
func TestNodeClose_OK(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
@@ -49,9 +55,9 @@ func TestNodeClose_OK(t *testing.T) {
|
||||
require.NoError(t, set.Set("suggested-fee-recipient", "0x6e35733c5af9B61374A128e6F85f553aF09ff89A"))
|
||||
cmd.ValidatorMonitorIndicesFlag.Value = &cli.IntSlice{}
|
||||
cmd.ValidatorMonitorIndicesFlag.Value.SetInt(1)
|
||||
ctx := cli.NewContext(&app, set, nil)
|
||||
ctx, cancel := newCliContextWithCancel(&app, set)
|
||||
|
||||
node, err := New(ctx)
|
||||
node, err := New(ctx, cancel)
|
||||
require.NoError(t, err)
|
||||
|
||||
node.Close()
|
||||
@@ -68,8 +74,8 @@ func TestNodeStart_Ok(t *testing.T) {
|
||||
set.String("suggested-fee-recipient", "0x6e35733c5af9B61374A128e6F85f553aF09ff89A", "fee recipient")
|
||||
require.NoError(t, set.Set("suggested-fee-recipient", "0x6e35733c5af9B61374A128e6F85f553aF09ff89A"))
|
||||
|
||||
ctx := cli.NewContext(&app, set, nil)
|
||||
node, err := New(ctx, WithBlockchainFlagOptions([]blockchain.Option{}),
|
||||
ctx, cancel := newCliContextWithCancel(&app, set)
|
||||
node, err := New(ctx, cancel, WithBlockchainFlagOptions([]blockchain.Option{}),
|
||||
WithBuilderFlagOptions([]builder.Option{}),
|
||||
WithExecutionChainOptions([]execution.Option{}),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)))
|
||||
@@ -81,7 +87,6 @@ func TestNodeStart_Ok(t *testing.T) {
|
||||
time.Sleep(3 * time.Second)
|
||||
node.Close()
|
||||
require.LogsContain(t, hook, "Starting beacon node")
|
||||
|
||||
}
|
||||
|
||||
func TestNodeStart_Ok_registerDeterministicGenesisService(t *testing.T) {
|
||||
@@ -117,8 +122,8 @@ func TestNodeStart_Ok_registerDeterministicGenesisService(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, os.WriteFile("genesis_ssz.json", genesisBytes, 0666))
|
||||
set.String("genesis-state", "genesis_ssz.json", "")
|
||||
ctx := cli.NewContext(&app, set, nil)
|
||||
node, err := New(ctx, WithBlockchainFlagOptions([]blockchain.Option{}),
|
||||
ctx, cancel := newCliContextWithCancel(&app, set)
|
||||
node, err := New(ctx, cancel, WithBlockchainFlagOptions([]blockchain.Option{}),
|
||||
WithBuilderFlagOptions([]builder.Option{}),
|
||||
WithExecutionChainOptions([]execution.Option{}))
|
||||
require.NoError(t, err)
|
||||
@@ -149,12 +154,12 @@ func TestClearDB(t *testing.T) {
|
||||
set.Bool(cmd.ForceClearDB.Name, true, "force clear db")
|
||||
set.String("suggested-fee-recipient", "0x6e35733c5af9B61374A128e6F85f553aF09ff89A", "fee recipient")
|
||||
require.NoError(t, set.Set("suggested-fee-recipient", "0x6e35733c5af9B61374A128e6F85f553aF09ff89A"))
|
||||
context := cli.NewContext(&app, set, nil)
|
||||
context, cancel := newCliContextWithCancel(&app, set)
|
||||
options := []Option{
|
||||
WithExecutionChainOptions([]execution.Option{execution.WithHttpEndpoint(endpoint)}),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
|
||||
}
|
||||
_, err = New(context, options...)
|
||||
_, err = New(context, cancel, options...)
|
||||
require.NoError(t, err)
|
||||
require.LogsContain(t, hook, "Removing database")
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/builder"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// Option for beacon node configuration.
|
||||
@@ -41,3 +42,11 @@ func WithBlobStorage(bs *filesystem.BlobStorage) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithBlobRetentionEpochs sets the blobRetentionEpochs value, used in kv store initialization.
|
||||
func WithBlobRetentionEpochs(e primitives.Epoch) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
bn.blobRetentionEpochs = e
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -121,15 +121,11 @@ func (c *AttCaches) DeleteSeenUnaggregatedAttestations() (int, error) {
|
||||
defer c.unAggregateAttLock.Unlock()
|
||||
|
||||
count := 0
|
||||
for _, att := range c.unAggregatedAtt {
|
||||
for r, att := range c.unAggregatedAtt {
|
||||
if att == nil || helpers.IsAggregated(att) {
|
||||
continue
|
||||
}
|
||||
if seen, err := c.hasSeenBit(att); err == nil && seen {
|
||||
r, err := hashFn(att)
|
||||
if err != nil {
|
||||
return count, errors.Wrap(err, "could not tree hash attestation")
|
||||
}
|
||||
delete(c.unAggregatedAtt, r)
|
||||
count++
|
||||
}
|
||||
|
||||
@@ -6,6 +6,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
)
|
||||
|
||||
// This is the default queue size used if we have specified an invalid one.
|
||||
const defaultPubsubQueueSize = 600
|
||||
|
||||
// Config for the p2p service. These parameters are set from application level flags
|
||||
// to initialize the p2p service.
|
||||
type Config struct {
|
||||
@@ -25,9 +28,20 @@ type Config struct {
|
||||
TCPPort uint
|
||||
UDPPort uint
|
||||
MaxPeers uint
|
||||
QueueSize uint
|
||||
AllowListCIDR string
|
||||
DenyListCIDR []string
|
||||
StateNotifier statefeed.Notifier
|
||||
DB db.ReadOnlyDatabase
|
||||
ClockWaiter startup.ClockWaiter
|
||||
}
|
||||
|
||||
// validateConfig validates whether the values provided are accurate and will set
|
||||
// the appropriate values for those that are invalid.
|
||||
func validateConfig(cfg *Config) *Config {
|
||||
if cfg.QueueSize == 0 {
|
||||
log.Warnf("Invalid pubsub queue size of %d initialized, setting the quese size as %d instead", cfg.QueueSize, defaultPubsubQueueSize)
|
||||
cfg.QueueSize = defaultPubsubQueueSize
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ const (
|
||||
|
||||
// High watermark buffer signifies the buffer till which
|
||||
// we will handle inbound requests.
|
||||
highWatermarkBuffer = 10
|
||||
highWatermarkBuffer = 20
|
||||
)
|
||||
|
||||
// InterceptPeerDial tests whether we're permitted to Dial the specified peer.
|
||||
|
||||
@@ -113,7 +113,7 @@ var (
|
||||
Name: "p2p_pubsub_reject_total",
|
||||
Help: "The number of messages rejected of a particular topic",
|
||||
},
|
||||
[]string{"topic"})
|
||||
[]string{"topic", "reason"})
|
||||
pubsubPeerThrottle = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_throttle_total",
|
||||
Help: "The number of times a peer has been throttled for a particular topic",
|
||||
@@ -121,31 +121,46 @@ var (
|
||||
[]string{"topic"})
|
||||
pubsubRPCRecv = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_rpc_recv_total",
|
||||
Help: "The number of messages received via rpc for a particular topic",
|
||||
Help: "The number of messages received via rpc for a particular control message",
|
||||
},
|
||||
[]string{"control_message"})
|
||||
pubsubRPCSubRecv = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_rpc_recv_sub_total",
|
||||
Help: "The number of subscription messages received via rpc",
|
||||
})
|
||||
pubsubRPCPubRecv = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_rpc_recv_pub_total",
|
||||
Help: "The number of publish messages received via rpc for a particular topic",
|
||||
},
|
||||
[]string{"topic"})
|
||||
pubsubRPCDrop = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_rpc_drop_total",
|
||||
Help: "The number of messages dropped via rpc for a particular topic",
|
||||
Help: "The number of messages dropped via rpc for a particular control message",
|
||||
},
|
||||
[]string{"control_message"})
|
||||
pubsubRPCSubDrop = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_rpc_drop_sub_total",
|
||||
Help: "The number of subscription messages dropped via rpc",
|
||||
})
|
||||
pubsubRPCPubDrop = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_rpc_drop_pub_total",
|
||||
Help: "The number of publish messages dropped via rpc for a particular topic",
|
||||
},
|
||||
[]string{"topic"})
|
||||
pubsubRPCSent = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_rpc_sent_total",
|
||||
Help: "The number of messages sent via rpc for a particular topic",
|
||||
Help: "The number of messages sent via rpc for a particular control message",
|
||||
},
|
||||
[]string{"control_message"})
|
||||
pubsubRPCSubSent = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_rpc_sent_sub_total",
|
||||
Help: "The number of subscription messages sent via rpc",
|
||||
})
|
||||
pubsubRPCPubSent = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_rpc_sent_pub_total",
|
||||
Help: "The number of publish messages sent via rpc for a particular topic",
|
||||
},
|
||||
[]string{"topic"})
|
||||
)
|
||||
|
||||
func (s *Service) updateMetrics() {
|
||||
|
||||
@@ -139,9 +139,9 @@ func (s *Service) pubsubOptions() []pubsub.Option {
|
||||
return MsgID(s.genesisValidatorsRoot, pmsg)
|
||||
}),
|
||||
pubsub.WithSubscriptionFilter(s),
|
||||
pubsub.WithPeerOutboundQueueSize(pubsubQueueSize),
|
||||
pubsub.WithPeerOutboundQueueSize(int(s.cfg.QueueSize)),
|
||||
pubsub.WithMaxMessageSize(int(params.BeaconNetworkConfig().GossipMaxSizeBellatrix)),
|
||||
pubsub.WithValidateQueueSize(pubsubQueueSize),
|
||||
pubsub.WithValidateQueueSize(int(s.cfg.QueueSize)),
|
||||
pubsub.WithPeerScore(peerScoringParams()),
|
||||
pubsub.WithPeerScoreInspect(s.peerInspector, time.Minute),
|
||||
pubsub.WithGossipSubParams(pubsubGossipParam()),
|
||||
|
||||
@@ -10,6 +10,15 @@ import (
|
||||
|
||||
var _ = pubsub.RawTracer(gossipTracer{})
|
||||
|
||||
// Initializes the values for the pubsub rpc action.
|
||||
type action int
|
||||
|
||||
const (
|
||||
recv action = iota
|
||||
send
|
||||
drop
|
||||
)
|
||||
|
||||
// This tracer is used to implement metrics collection for messages received
|
||||
// and broadcasted through gossipsub.
|
||||
type gossipTracer struct {
|
||||
@@ -58,7 +67,7 @@ func (g gossipTracer) DeliverMessage(msg *pubsub.Message) {
|
||||
|
||||
// RejectMessage .
|
||||
func (g gossipTracer) RejectMessage(msg *pubsub.Message, reason string) {
|
||||
pubsubMessageReject.WithLabelValues(*msg.Topic).Inc()
|
||||
pubsubMessageReject.WithLabelValues(*msg.Topic, reason).Inc()
|
||||
}
|
||||
|
||||
// DuplicateMessage .
|
||||
@@ -79,25 +88,33 @@ func (g gossipTracer) ThrottlePeer(p peer.ID) {
|
||||
|
||||
// RecvRPC .
|
||||
func (g gossipTracer) RecvRPC(rpc *pubsub.RPC) {
|
||||
setMetricFromRPC(pubsubRPCSubRecv, pubsubRPCRecv, rpc)
|
||||
g.setMetricFromRPC(recv, pubsubRPCSubRecv, pubsubRPCPubRecv, pubsubRPCRecv, rpc)
|
||||
}
|
||||
|
||||
// SendRPC .
|
||||
func (g gossipTracer) SendRPC(rpc *pubsub.RPC, p peer.ID) {
|
||||
setMetricFromRPC(pubsubRPCSubSent, pubsubRPCSent, rpc)
|
||||
g.setMetricFromRPC(send, pubsubRPCSubSent, pubsubRPCPubSent, pubsubRPCSent, rpc)
|
||||
}
|
||||
|
||||
// DropRPC .
|
||||
func (g gossipTracer) DropRPC(rpc *pubsub.RPC, p peer.ID) {
|
||||
setMetricFromRPC(pubsubRPCSubDrop, pubsubRPCDrop, rpc)
|
||||
g.setMetricFromRPC(drop, pubsubRPCSubDrop, pubsubRPCPubDrop, pubsubRPCDrop, rpc)
|
||||
}
|
||||
|
||||
func setMetricFromRPC(ctr prometheus.Counter, gauge *prometheus.CounterVec, rpc *pubsub.RPC) {
|
||||
ctr.Add(float64(len(rpc.Subscriptions)))
|
||||
func (g gossipTracer) setMetricFromRPC(act action, subCtr prometheus.Counter, pubCtr, ctrlCtr *prometheus.CounterVec, rpc *pubsub.RPC) {
|
||||
subCtr.Add(float64(len(rpc.Subscriptions)))
|
||||
if rpc.Control != nil {
|
||||
gauge.WithLabelValues("graft").Add(float64(len(rpc.Control.Graft)))
|
||||
gauge.WithLabelValues("prune").Add(float64(len(rpc.Control.Prune)))
|
||||
gauge.WithLabelValues("ihave").Add(float64(len(rpc.Control.Ihave)))
|
||||
gauge.WithLabelValues("iwant").Add(float64(len(rpc.Control.Iwant)))
|
||||
ctrlCtr.WithLabelValues("graft").Add(float64(len(rpc.Control.Graft)))
|
||||
ctrlCtr.WithLabelValues("prune").Add(float64(len(rpc.Control.Prune)))
|
||||
ctrlCtr.WithLabelValues("ihave").Add(float64(len(rpc.Control.Ihave)))
|
||||
ctrlCtr.WithLabelValues("iwant").Add(float64(len(rpc.Control.Iwant)))
|
||||
}
|
||||
for _, msg := range rpc.Publish {
|
||||
// For incoming messages from pubsub, we do not record metrics for them as these values
|
||||
// could be junk.
|
||||
if act == recv {
|
||||
continue
|
||||
}
|
||||
pubCtr.WithLabelValues(*msg.Topic).Inc()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,10 +48,6 @@ var refreshRate = slots.DivideSlotBy(2)
|
||||
// maxBadResponses is the maximum number of bad responses from a peer before we stop talking to it.
|
||||
const maxBadResponses = 5
|
||||
|
||||
// pubsubQueueSize is the size that we assign to our validation queue and outbound message queue for
|
||||
// gossipsub.
|
||||
const pubsubQueueSize = 600
|
||||
|
||||
// maxDialTimeout is the timeout for a single peer dial.
|
||||
var maxDialTimeout = params.BeaconNetworkConfig().RespTimeout
|
||||
|
||||
@@ -69,7 +65,7 @@ type Service struct {
|
||||
metaData metadata.Metadata
|
||||
pubsub *pubsub.PubSub
|
||||
joinedTopics map[string]*pubsub.Topic
|
||||
joinedTopicsLock sync.Mutex
|
||||
joinedTopicsLock sync.RWMutex
|
||||
subnetsLock map[uint64]*sync.RWMutex
|
||||
subnetsLockLock sync.Mutex // Lock access to subnetsLock
|
||||
initializationLock sync.Mutex
|
||||
@@ -98,6 +94,8 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
}
|
||||
|
||||
s.cfg = validateConfig(s.cfg)
|
||||
|
||||
dv5Nodes := parseBootStrapAddrs(s.cfg.BootstrapNodeAddr)
|
||||
|
||||
cfg.Discv5BootStrapAddr = dv5Nodes
|
||||
|
||||
@@ -36,6 +36,7 @@ go_library(
|
||||
"//beacon-chain/rpc/eth/rewards:go_default_library",
|
||||
"//beacon-chain/rpc/eth/validator:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/beacon:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/node:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/beacon:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/debug:go_default_library",
|
||||
@@ -50,7 +51,6 @@ go_library(
|
||||
"//config/params:go_default_library",
|
||||
"//io/logs:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//proto/eth/service:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_grpc_ecosystem_go_grpc_middleware//:go_default_library",
|
||||
@@ -75,6 +75,18 @@ go_test(
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/execution/testing:go_default_library",
|
||||
"//beacon-chain/rpc/eth/beacon:go_default_library",
|
||||
"//beacon-chain/rpc/eth/blob:go_default_library",
|
||||
"//beacon-chain/rpc/eth/builder:go_default_library",
|
||||
"//beacon-chain/rpc/eth/debug:go_default_library",
|
||||
"//beacon-chain/rpc/eth/events:go_default_library",
|
||||
"//beacon-chain/rpc/eth/light-client:go_default_library",
|
||||
"//beacon-chain/rpc/eth/node:go_default_library",
|
||||
"//beacon-chain/rpc/eth/rewards:go_default_library",
|
||||
"//beacon-chain/rpc/eth/validator:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/beacon:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/node:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/validator:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/sync/initial-sync/testing:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"custom_handlers.go",
|
||||
"custom_hooks.go",
|
||||
"endpoint_factory.go",
|
||||
"structs.go",
|
||||
"structs_marshalling.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/apimiddleware",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/gateway/apimiddleware:go_default_library",
|
||||
"//beacon-chain/rpc/eth/events:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_r3labs_sse_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"custom_handlers_test.go",
|
||||
"custom_hooks_test.go",
|
||||
"endpoint_factory_test.go",
|
||||
"structs_marshalling_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api/gateway/apimiddleware:go_default_library",
|
||||
"//beacon-chain/rpc/eth/events:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_r3labs_sse_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,173 +0,0 @@
|
||||
package apimiddleware
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/events"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/r3labs/sse/v2"
|
||||
)
|
||||
|
||||
func handleEvents(m *apimiddleware.ApiProxyMiddleware, _ apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
sseClient := sse.NewClient("http://" + m.GatewayAddress + "/internal" + req.URL.RequestURI())
|
||||
sseClient.Headers["Grpc-Timeout"] = "0S"
|
||||
eventChan := make(chan *sse.Event)
|
||||
|
||||
// We use grpc-gateway as the server side of events, not the sse library.
|
||||
// Because of this subscribing to streams doesn't work as intended, resulting in each event being handled by all subscriptions.
|
||||
// To handle events properly, we subscribe just once using a placeholder value ('events') and handle all topics inside this subscription.
|
||||
if err := sseClient.SubscribeChan("events", eventChan); err != nil {
|
||||
apimiddleware.WriteError(w, apimiddleware.InternalServerError(err), nil)
|
||||
sseClient.Unsubscribe(eventChan)
|
||||
return
|
||||
}
|
||||
|
||||
errJson := receiveEvents(eventChan, w, req)
|
||||
if errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
}
|
||||
|
||||
sseClient.Unsubscribe(eventChan)
|
||||
return true
|
||||
}
|
||||
|
||||
type dataSubset struct {
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
func receiveEvents(eventChan <-chan *sse.Event, w http.ResponseWriter, req *http.Request) apimiddleware.ErrorJson {
|
||||
for {
|
||||
select {
|
||||
case msg := <-eventChan:
|
||||
var data interface{}
|
||||
|
||||
// The message's event comes to us with trailing whitespace. Remove it here for
|
||||
// ease of future processing.
|
||||
msg.Event = bytes.TrimSpace(msg.Event)
|
||||
|
||||
switch string(msg.Event) {
|
||||
case events.HeadTopic:
|
||||
data = &EventHeadJson{}
|
||||
case events.BlockTopic:
|
||||
data = &ReceivedBlockDataJson{}
|
||||
case events.AttestationTopic:
|
||||
data = &AttestationJson{}
|
||||
|
||||
// Data received in the aggregated att event does not fit the expected event stream output.
|
||||
// We extract the underlying attestation from event data
|
||||
// and assign the attestation back to event data for further processing.
|
||||
aggEventData := &AggregatedAttReceivedDataJson{}
|
||||
if err := json.Unmarshal(msg.Data, aggEventData); err != nil {
|
||||
return apimiddleware.InternalServerError(err)
|
||||
}
|
||||
var attData []byte
|
||||
var err error
|
||||
// If true, then we have an unaggregated attestation
|
||||
if aggEventData.Aggregate == nil {
|
||||
unaggEventData := &UnaggregatedAttReceivedDataJson{}
|
||||
if err := json.Unmarshal(msg.Data, unaggEventData); err != nil {
|
||||
return apimiddleware.InternalServerError(err)
|
||||
}
|
||||
attData, err = json.Marshal(unaggEventData)
|
||||
if err != nil {
|
||||
return apimiddleware.InternalServerError(err)
|
||||
}
|
||||
} else {
|
||||
attData, err = json.Marshal(aggEventData.Aggregate)
|
||||
if err != nil {
|
||||
return apimiddleware.InternalServerError(err)
|
||||
}
|
||||
}
|
||||
msg.Data = attData
|
||||
case events.VoluntaryExitTopic:
|
||||
data = &SignedVoluntaryExitJson{}
|
||||
case events.FinalizedCheckpointTopic:
|
||||
data = &EventFinalizedCheckpointJson{}
|
||||
case events.ChainReorgTopic:
|
||||
data = &EventChainReorgJson{}
|
||||
case events.SyncCommitteeContributionTopic:
|
||||
data = &SignedContributionAndProofJson{}
|
||||
case events.BLSToExecutionChangeTopic:
|
||||
data = &SignedBLSToExecutionChangeJson{}
|
||||
case events.PayloadAttributesTopic:
|
||||
dataSubset := &dataSubset{}
|
||||
if err := json.Unmarshal(msg.Data, dataSubset); err != nil {
|
||||
return apimiddleware.InternalServerError(err)
|
||||
}
|
||||
switch dataSubset.Version {
|
||||
case version.String(version.Capella):
|
||||
data = &EventPayloadAttributeStreamV2Json{}
|
||||
case version.String(version.Bellatrix):
|
||||
data = &EventPayloadAttributeStreamV1Json{}
|
||||
default:
|
||||
return apimiddleware.InternalServerError(errors.New("payload version unsupported"))
|
||||
}
|
||||
case events.BlobSidecarTopic:
|
||||
data = &EventBlobSidecarJson{}
|
||||
case "error":
|
||||
data = &EventErrorJson{}
|
||||
default:
|
||||
return &apimiddleware.DefaultErrorJson{
|
||||
Message: fmt.Sprintf("Event type '%s' not supported", string(msg.Event)),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
}
|
||||
|
||||
if errJson := writeEvent(msg, w, data); errJson != nil {
|
||||
return errJson
|
||||
}
|
||||
if errJson := flushEvent(w); errJson != nil {
|
||||
return errJson
|
||||
}
|
||||
case <-req.Context().Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func writeEvent(msg *sse.Event, w http.ResponseWriter, data interface{}) apimiddleware.ErrorJson {
|
||||
if err := json.Unmarshal(msg.Data, data); err != nil {
|
||||
return apimiddleware.InternalServerError(err)
|
||||
}
|
||||
if errJson := apimiddleware.ProcessMiddlewareResponseFields(data); errJson != nil {
|
||||
return errJson
|
||||
}
|
||||
dataJson, errJson := apimiddleware.SerializeMiddlewareResponseIntoJson(data)
|
||||
if errJson != nil {
|
||||
return errJson
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "text/event-stream")
|
||||
|
||||
if _, err := w.Write([]byte("event: ")); err != nil {
|
||||
return apimiddleware.InternalServerError(err)
|
||||
}
|
||||
if _, err := w.Write(msg.Event); err != nil {
|
||||
return apimiddleware.InternalServerError(err)
|
||||
}
|
||||
if _, err := w.Write([]byte("\ndata: ")); err != nil {
|
||||
return apimiddleware.InternalServerError(err)
|
||||
}
|
||||
if _, err := w.Write(dataJson); err != nil {
|
||||
return apimiddleware.InternalServerError(err)
|
||||
}
|
||||
if _, err := w.Write([]byte("\n\n")); err != nil {
|
||||
return apimiddleware.InternalServerError(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func flushEvent(w http.ResponseWriter) apimiddleware.ErrorJson {
|
||||
flusher, ok := w.(http.Flusher)
|
||||
if !ok {
|
||||
return &apimiddleware.DefaultErrorJson{Message: fmt.Sprintf("Flush not supported in %T", w), Code: http.StatusInternalServerError}
|
||||
}
|
||||
flusher.Flush()
|
||||
return nil
|
||||
}
|
||||
@@ -1,212 +0,0 @@
|
||||
package apimiddleware
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/events"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/r3labs/sse/v2"
|
||||
)
|
||||
|
||||
func TestReceiveEvents(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ch := make(chan *sse.Event)
|
||||
w := httptest.NewRecorder()
|
||||
w.Body = &bytes.Buffer{}
|
||||
req := httptest.NewRequest("GET", "http://foo.example", &bytes.Buffer{})
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
go func() {
|
||||
base64Val := "Zm9v"
|
||||
data := &EventFinalizedCheckpointJson{
|
||||
Block: base64Val,
|
||||
State: base64Val,
|
||||
Epoch: "1",
|
||||
}
|
||||
bData, err := json.Marshal(data)
|
||||
require.NoError(t, err)
|
||||
msg := &sse.Event{
|
||||
Data: bData,
|
||||
Event: []byte(events.FinalizedCheckpointTopic),
|
||||
}
|
||||
ch <- msg
|
||||
time.Sleep(time.Second)
|
||||
cancel()
|
||||
}()
|
||||
|
||||
errJson := receiveEvents(ch, w, req)
|
||||
assert.Equal(t, true, errJson == nil)
|
||||
|
||||
expectedEvent := `event: finalized_checkpoint
|
||||
data: {"block":"0x666f6f","state":"0x666f6f","epoch":"1","execution_optimistic":false}
|
||||
|
||||
`
|
||||
assert.DeepEqual(t, expectedEvent, w.Body.String())
|
||||
}
|
||||
|
||||
func TestReceiveEvents_AggregatedAtt(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ch := make(chan *sse.Event)
|
||||
w := httptest.NewRecorder()
|
||||
w.Body = &bytes.Buffer{}
|
||||
req := httptest.NewRequest("GET", "http://foo.example", &bytes.Buffer{})
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
go func() {
|
||||
base64Val := "Zm9v"
|
||||
data := AggregatedAttReceivedDataJson{
|
||||
Aggregate: &AttestationJson{
|
||||
AggregationBits: base64Val,
|
||||
Data: &AttestationDataJson{
|
||||
Slot: "1",
|
||||
CommitteeIndex: "1",
|
||||
BeaconBlockRoot: base64Val,
|
||||
Source: nil,
|
||||
Target: nil,
|
||||
},
|
||||
Signature: base64Val,
|
||||
},
|
||||
}
|
||||
bData, err := json.Marshal(data)
|
||||
require.NoError(t, err)
|
||||
msg := &sse.Event{
|
||||
Data: bData,
|
||||
Event: []byte(events.AttestationTopic),
|
||||
}
|
||||
ch <- msg
|
||||
time.Sleep(time.Second)
|
||||
cancel()
|
||||
}()
|
||||
|
||||
errJson := receiveEvents(ch, w, req)
|
||||
assert.Equal(t, true, errJson == nil)
|
||||
|
||||
expectedEvent := `event: attestation
|
||||
data: {"aggregation_bits":"0x666f6f","data":{"slot":"1","index":"1","beacon_block_root":"0x666f6f","source":null,"target":null},"signature":"0x666f6f"}
|
||||
|
||||
`
|
||||
assert.DeepEqual(t, expectedEvent, w.Body.String())
|
||||
}
|
||||
|
||||
func TestReceiveEvents_UnaggregatedAtt(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ch := make(chan *sse.Event)
|
||||
w := httptest.NewRecorder()
|
||||
w.Body = &bytes.Buffer{}
|
||||
req := httptest.NewRequest("GET", "http://foo.example", &bytes.Buffer{})
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
go func() {
|
||||
base64Val := "Zm9v"
|
||||
data := UnaggregatedAttReceivedDataJson{
|
||||
AggregationBits: base64Val,
|
||||
Data: &AttestationDataJson{
|
||||
Slot: "1",
|
||||
CommitteeIndex: "1",
|
||||
BeaconBlockRoot: base64Val,
|
||||
Source: nil,
|
||||
Target: nil,
|
||||
},
|
||||
Signature: base64Val,
|
||||
}
|
||||
bData, err := json.Marshal(data)
|
||||
require.NoError(t, err)
|
||||
msg := &sse.Event{
|
||||
Data: bData,
|
||||
Event: []byte(events.AttestationTopic),
|
||||
}
|
||||
ch <- msg
|
||||
time.Sleep(time.Second)
|
||||
cancel()
|
||||
}()
|
||||
|
||||
errJson := receiveEvents(ch, w, req)
|
||||
assert.Equal(t, true, errJson == nil)
|
||||
|
||||
expectedEvent := `event: attestation
|
||||
data: {"aggregation_bits":"0x666f6f","data":{"slot":"1","index":"1","beacon_block_root":"0x666f6f","source":null,"target":null},"signature":"0x666f6f"}
|
||||
|
||||
`
|
||||
assert.DeepEqual(t, expectedEvent, w.Body.String())
|
||||
}
|
||||
|
||||
func TestReceiveEvents_EventNotSupported(t *testing.T) {
|
||||
ch := make(chan *sse.Event)
|
||||
w := httptest.NewRecorder()
|
||||
w.Body = &bytes.Buffer{}
|
||||
req := httptest.NewRequest("GET", "http://foo.example", &bytes.Buffer{})
|
||||
|
||||
go func() {
|
||||
msg := &sse.Event{
|
||||
Data: []byte("foo"),
|
||||
Event: []byte("not_supported"),
|
||||
}
|
||||
ch <- msg
|
||||
}()
|
||||
|
||||
errJson := receiveEvents(ch, w, req)
|
||||
require.NotNil(t, errJson)
|
||||
assert.Equal(t, "Event type 'not_supported' not supported", errJson.Msg())
|
||||
}
|
||||
|
||||
func TestReceiveEvents_TrailingSpace(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ch := make(chan *sse.Event)
|
||||
w := httptest.NewRecorder()
|
||||
w.Body = &bytes.Buffer{}
|
||||
req := httptest.NewRequest("GET", "http://foo.example", &bytes.Buffer{})
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
go func() {
|
||||
base64Val := "Zm9v"
|
||||
data := &EventFinalizedCheckpointJson{
|
||||
Block: base64Val,
|
||||
State: base64Val,
|
||||
Epoch: "1",
|
||||
}
|
||||
bData, err := json.Marshal(data)
|
||||
require.NoError(t, err)
|
||||
msg := &sse.Event{
|
||||
Data: bData,
|
||||
Event: []byte("finalized_checkpoint "),
|
||||
}
|
||||
ch <- msg
|
||||
time.Sleep(time.Second)
|
||||
cancel()
|
||||
}()
|
||||
|
||||
errJson := receiveEvents(ch, w, req)
|
||||
assert.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, `event: finalized_checkpoint
|
||||
data: {"block":"0x666f6f","state":"0x666f6f","epoch":"1","execution_optimistic":false}
|
||||
|
||||
`, w.Body.String())
|
||||
}
|
||||
|
||||
func TestWriteEvent(t *testing.T) {
|
||||
base64Val := "Zm9v"
|
||||
data := &EventFinalizedCheckpointJson{
|
||||
Block: base64Val,
|
||||
State: base64Val,
|
||||
Epoch: "1",
|
||||
}
|
||||
bData, err := json.Marshal(data)
|
||||
require.NoError(t, err)
|
||||
msg := &sse.Event{
|
||||
Data: bData,
|
||||
Event: []byte("test_event"),
|
||||
}
|
||||
w := httptest.NewRecorder()
|
||||
w.Body = &bytes.Buffer{}
|
||||
|
||||
errJson := writeEvent(msg, w, &EventFinalizedCheckpointJson{})
|
||||
require.Equal(t, true, errJson == nil)
|
||||
written := w.Body.String()
|
||||
assert.Equal(t, "event: test_event\ndata: {\"block\":\"0x666f6f\",\"state\":\"0x666f6f\",\"epoch\":\"1\",\"execution_optimistic\":false}\n\n", written)
|
||||
}
|
||||
@@ -1,277 +0,0 @@
|
||||
package apimiddleware
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
// https://ethereum.github.io/beacon-APIs/?urls.primaryName=dev#/Beacon/submitPoolBLSToExecutionChange
|
||||
// expects posting a top-level array. We make it more proto-friendly by wrapping it in a struct.
|
||||
|
||||
type v1alpha1SignedPhase0Block struct {
|
||||
Block *BeaconBlockJson `json:"block"` // tech debt on phase 0 called this block instead of "message"
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type phase0PublishBlockRequestJson struct {
|
||||
Message *v1alpha1SignedPhase0Block `json:"phase0_block"`
|
||||
}
|
||||
|
||||
type altairPublishBlockRequestJson struct {
|
||||
AltairBlock *SignedBeaconBlockAltairJson `json:"altair_block"`
|
||||
}
|
||||
|
||||
type bellatrixPublishBlockRequestJson struct {
|
||||
BellatrixBlock *SignedBeaconBlockBellatrixJson `json:"bellatrix_block"`
|
||||
}
|
||||
|
||||
type bellatrixPublishBlindedBlockRequestJson struct {
|
||||
BellatrixBlock *SignedBlindedBeaconBlockBellatrixJson `json:"bellatrix_block"`
|
||||
}
|
||||
|
||||
type capellaPublishBlockRequestJson struct {
|
||||
CapellaBlock *SignedBeaconBlockCapellaJson `json:"capella_block"`
|
||||
}
|
||||
|
||||
type capellaPublishBlindedBlockRequestJson struct {
|
||||
CapellaBlock *SignedBlindedBeaconBlockCapellaJson `json:"capella_block"`
|
||||
}
|
||||
|
||||
type denebPublishBlockRequestJson struct {
|
||||
DenebContents *SignedBeaconBlockContentsDenebJson `json:"deneb_contents"`
|
||||
}
|
||||
|
||||
type denebPublishBlindedBlockRequestJson struct {
|
||||
DenebContents *SignedBlindedBeaconBlockContentsDenebJson `json:"deneb_contents"`
|
||||
}
|
||||
|
||||
// setInitialPublishBlockPostRequest is triggered before we deserialize the request JSON into a struct.
|
||||
// We don't know which version of the block got posted, but we can determine it from the slot.
|
||||
// We know that blocks of all versions have a Message field with a Slot field,
|
||||
// so we deserialize the request into a struct s, which has the right fields, to obtain the slot.
|
||||
// Once we know the slot, we can determine what the PostRequest field of the endpoint should be, and we set it appropriately.
|
||||
func setInitialPublishBlockPostRequest(endpoint *apimiddleware.Endpoint,
|
||||
_ http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
|
||||
s := struct {
|
||||
Slot string
|
||||
}{}
|
||||
|
||||
buf, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not read body")
|
||||
}
|
||||
|
||||
typeParseMap := make(map[string]json.RawMessage)
|
||||
if err := json.Unmarshal(buf, &typeParseMap); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not parse object")
|
||||
}
|
||||
if val, ok := typeParseMap["message"]; ok {
|
||||
if err := json.Unmarshal(val, &s); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not unmarshal field 'message' ")
|
||||
}
|
||||
} else if val, ok := typeParseMap["signed_block"]; ok {
|
||||
temp := struct {
|
||||
Message struct {
|
||||
Slot string
|
||||
}
|
||||
}{}
|
||||
if err := json.Unmarshal(val, &temp); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not unmarshal field 'signed_block' ")
|
||||
}
|
||||
s.Slot = temp.Message.Slot
|
||||
} else {
|
||||
return false, &apimiddleware.DefaultErrorJson{Message: "could not parse slot from request", Code: http.StatusInternalServerError}
|
||||
}
|
||||
slot, err := strconv.ParseUint(s.Slot, 10, 64)
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "slot is not an unsigned integer")
|
||||
}
|
||||
currentEpoch := slots.ToEpoch(primitives.Slot(slot))
|
||||
if currentEpoch < params.BeaconConfig().AltairForkEpoch {
|
||||
endpoint.PostRequest = &SignedBeaconBlockJson{}
|
||||
} else if currentEpoch < params.BeaconConfig().BellatrixForkEpoch {
|
||||
endpoint.PostRequest = &SignedBeaconBlockAltairJson{}
|
||||
} else if currentEpoch < params.BeaconConfig().CapellaForkEpoch {
|
||||
endpoint.PostRequest = &SignedBeaconBlockBellatrixJson{}
|
||||
} else if currentEpoch < params.BeaconConfig().DenebForkEpoch {
|
||||
endpoint.PostRequest = &SignedBeaconBlockCapellaJson{}
|
||||
} else {
|
||||
endpoint.PostRequest = &SignedBeaconBlockContentsDenebJson{}
|
||||
}
|
||||
req.Body = io.NopCloser(bytes.NewBuffer(buf))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// In preparePublishedBlock we transform the PostRequest.
|
||||
// gRPC expects an XXX_block field in the JSON object, but we have a message field at this point.
|
||||
// We do a simple conversion depending on the type of endpoint.PostRequest
|
||||
// (which was filled out previously in setInitialPublishBlockPostRequest).
|
||||
func preparePublishedBlock(endpoint *apimiddleware.Endpoint, _ http.ResponseWriter, _ *http.Request) apimiddleware.ErrorJson {
|
||||
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockJson); ok {
|
||||
// Prepare post request that can be properly decoded on gRPC side.
|
||||
endpoint.PostRequest = &phase0PublishBlockRequestJson{
|
||||
Message: &v1alpha1SignedPhase0Block{
|
||||
Block: block.Message,
|
||||
Signature: block.Signature,
|
||||
},
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockAltairJson); ok {
|
||||
// Prepare post request that can be properly decoded on gRPC side.
|
||||
endpoint.PostRequest = &altairPublishBlockRequestJson{
|
||||
AltairBlock: block,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockBellatrixJson); ok {
|
||||
// Prepare post request that can be properly decoded on gRPC side.
|
||||
endpoint.PostRequest = &bellatrixPublishBlockRequestJson{
|
||||
BellatrixBlock: block,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockCapellaJson); ok {
|
||||
// Prepare post request that can be properly decoded on gRPC side.
|
||||
endpoint.PostRequest = &capellaPublishBlockRequestJson{
|
||||
CapellaBlock: block,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockContentsDenebJson); ok {
|
||||
// Prepare post request that can be properly decoded on gRPC side.
|
||||
endpoint.PostRequest = &denebPublishBlockRequestJson{
|
||||
DenebContents: block,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return apimiddleware.InternalServerError(errors.New("unsupported block type"))
|
||||
}
|
||||
|
||||
// setInitialPublishBlindedBlockPostRequest is triggered before we deserialize the request JSON into a struct.
|
||||
// We don't know which version of the block got posted, but we can determine it from the slot.
|
||||
// We know that blocks of all versions have a Message field with a Slot field,
|
||||
// so we deserialize the request into a struct s, which has the right fields, to obtain the slot.
|
||||
// Once we know the slot, we can determine what the PostRequest field of the endpoint should be, and we set it appropriately.
|
||||
func setInitialPublishBlindedBlockPostRequest(endpoint *apimiddleware.Endpoint,
|
||||
_ http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
|
||||
s := struct {
|
||||
Slot string
|
||||
}{}
|
||||
|
||||
buf, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not read body")
|
||||
}
|
||||
|
||||
typeParseMap := make(map[string]json.RawMessage)
|
||||
if err = json.Unmarshal(buf, &typeParseMap); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not parse object")
|
||||
}
|
||||
if val, ok := typeParseMap["message"]; ok {
|
||||
if err = json.Unmarshal(val, &s); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not unmarshal field 'message' ")
|
||||
}
|
||||
} else if val, ok = typeParseMap["signed_blinded_block"]; ok {
|
||||
temp := struct {
|
||||
Message struct {
|
||||
Slot string
|
||||
}
|
||||
}{}
|
||||
if err = json.Unmarshal(val, &temp); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not unmarshal field 'signed_block' ")
|
||||
}
|
||||
s.Slot = temp.Message.Slot
|
||||
} else {
|
||||
return false, &apimiddleware.DefaultErrorJson{Message: "could not parse slot from request", Code: http.StatusInternalServerError}
|
||||
}
|
||||
slot, err := strconv.ParseUint(s.Slot, 10, 64)
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "slot is not an unsigned integer")
|
||||
}
|
||||
currentEpoch := slots.ToEpoch(primitives.Slot(slot))
|
||||
if currentEpoch < params.BeaconConfig().AltairForkEpoch {
|
||||
endpoint.PostRequest = &SignedBeaconBlockJson{}
|
||||
} else if currentEpoch < params.BeaconConfig().BellatrixForkEpoch {
|
||||
endpoint.PostRequest = &SignedBeaconBlockAltairJson{}
|
||||
} else if currentEpoch < params.BeaconConfig().CapellaForkEpoch {
|
||||
endpoint.PostRequest = &SignedBlindedBeaconBlockBellatrixJson{}
|
||||
} else if currentEpoch < params.BeaconConfig().DenebForkEpoch {
|
||||
endpoint.PostRequest = &SignedBlindedBeaconBlockCapellaJson{}
|
||||
} else {
|
||||
endpoint.PostRequest = &SignedBlindedBeaconBlockContentsDenebJson{}
|
||||
}
|
||||
req.Body = io.NopCloser(bytes.NewBuffer(buf))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// In preparePublishedBlindedBlock we transform the PostRequest.
|
||||
// gRPC expects either an XXX_block field in the JSON object, but we have a message field at this point.
|
||||
// We do a simple conversion depending on the type of endpoint.PostRequest
|
||||
// (which was filled out previously in setInitialPublishBlockPostRequest).
|
||||
func preparePublishedBlindedBlock(endpoint *apimiddleware.Endpoint, _ http.ResponseWriter, _ *http.Request) apimiddleware.ErrorJson {
|
||||
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockJson); ok {
|
||||
endpoint.PostRequest = &phase0PublishBlockRequestJson{
|
||||
Message: &v1alpha1SignedPhase0Block{
|
||||
Block: block.Message,
|
||||
Signature: block.Signature,
|
||||
},
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if block, ok := endpoint.PostRequest.(*SignedBeaconBlockAltairJson); ok {
|
||||
// Prepare post request that can be properly decoded on gRPC side.
|
||||
actualPostReq := &altairPublishBlockRequestJson{
|
||||
AltairBlock: block,
|
||||
}
|
||||
endpoint.PostRequest = actualPostReq
|
||||
return nil
|
||||
}
|
||||
if block, ok := endpoint.PostRequest.(*SignedBlindedBeaconBlockBellatrixJson); ok {
|
||||
// Prepare post request that can be properly decoded on gRPC side.
|
||||
actualPostReq := &bellatrixPublishBlindedBlockRequestJson{
|
||||
BellatrixBlock: &SignedBlindedBeaconBlockBellatrixJson{
|
||||
Message: block.Message,
|
||||
Signature: block.Signature,
|
||||
},
|
||||
}
|
||||
endpoint.PostRequest = actualPostReq
|
||||
return nil
|
||||
}
|
||||
if block, ok := endpoint.PostRequest.(*SignedBlindedBeaconBlockCapellaJson); ok {
|
||||
// Prepare post request that can be properly decoded on gRPC side.
|
||||
actualPostReq := &capellaPublishBlindedBlockRequestJson{
|
||||
CapellaBlock: &SignedBlindedBeaconBlockCapellaJson{
|
||||
Message: block.Message,
|
||||
Signature: block.Signature,
|
||||
},
|
||||
}
|
||||
endpoint.PostRequest = actualPostReq
|
||||
return nil
|
||||
}
|
||||
if blockContents, ok := endpoint.PostRequest.(*SignedBlindedBeaconBlockContentsDenebJson); ok {
|
||||
// Prepare post request that can be properly decoded on gRPC side.
|
||||
actualPostReq := &denebPublishBlindedBlockRequestJson{
|
||||
DenebContents: &SignedBlindedBeaconBlockContentsDenebJson{
|
||||
SignedBlindedBlock: blockContents.SignedBlindedBlock,
|
||||
SignedBlindedBlobSidecars: blockContents.SignedBlindedBlobSidecars,
|
||||
},
|
||||
}
|
||||
endpoint.PostRequest = actualPostReq
|
||||
return nil
|
||||
}
|
||||
return apimiddleware.InternalServerError(errors.New("unsupported block type"))
|
||||
}
|
||||
@@ -1,359 +0,0 @@
|
||||
package apimiddleware
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
func TestSetInitialPublishBlockPostRequest(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.BellatrixForkEpoch = params.BeaconConfig().AltairForkEpoch + 1
|
||||
cfg.CapellaForkEpoch = params.BeaconConfig().BellatrixForkEpoch + 1
|
||||
cfg.DenebForkEpoch = params.BeaconConfig().CapellaForkEpoch + 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
endpoint := &apimiddleware.Endpoint{}
|
||||
s := struct {
|
||||
Message struct {
|
||||
Slot string
|
||||
} `json:"message"`
|
||||
}{}
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
s.Message = struct{ Slot string }{Slot: "0"}
|
||||
j, err := json.Marshal(s)
|
||||
require.NoError(t, err)
|
||||
var body bytes.Buffer
|
||||
_, err = body.Write(j)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
runDefault, errJson := setInitialPublishBlockPostRequest(endpoint, nil, request)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
|
||||
assert.Equal(t, reflect.TypeOf(SignedBeaconBlockJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
|
||||
})
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
slot, err := slots.EpochStart(params.BeaconConfig().AltairForkEpoch)
|
||||
require.NoError(t, err)
|
||||
s.Message = struct{ Slot string }{Slot: strconv.FormatUint(uint64(slot), 10)}
|
||||
j, err := json.Marshal(s)
|
||||
require.NoError(t, err)
|
||||
var body bytes.Buffer
|
||||
_, err = body.Write(j)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
runDefault, errJson := setInitialPublishBlockPostRequest(endpoint, nil, request)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
|
||||
assert.Equal(t, reflect.TypeOf(SignedBeaconBlockAltairJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
|
||||
})
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
slot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
s.Message = struct{ Slot string }{Slot: strconv.FormatUint(uint64(slot), 10)}
|
||||
j, err := json.Marshal(s)
|
||||
require.NoError(t, err)
|
||||
var body bytes.Buffer
|
||||
_, err = body.Write(j)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
runDefault, errJson := setInitialPublishBlockPostRequest(endpoint, nil, request)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
|
||||
assert.Equal(t, reflect.TypeOf(SignedBeaconBlockBellatrixJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
|
||||
})
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.CapellaForkEpoch = cfg.BellatrixForkEpoch.Add(2)
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
slot, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
|
||||
require.NoError(t, err)
|
||||
s.Message = struct{ Slot string }{Slot: strconv.FormatUint(uint64(slot), 10)}
|
||||
j, err := json.Marshal(s)
|
||||
require.NoError(t, err)
|
||||
var body bytes.Buffer
|
||||
_, err = body.Write(j)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
runDefault, errJson := setInitialPublishBlockPostRequest(endpoint, nil, request)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
|
||||
assert.Equal(t, reflect.TypeOf(SignedBeaconBlockCapellaJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
|
||||
})
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.DenebForkEpoch = cfg.CapellaForkEpoch.Add(2)
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
slot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
require.NoError(t, err)
|
||||
denebS := struct {
|
||||
SignedBlock struct {
|
||||
Message struct {
|
||||
Slot string
|
||||
} `json:"message"`
|
||||
} `json:"signed_block"`
|
||||
}{}
|
||||
denebS.SignedBlock = struct {
|
||||
Message struct {
|
||||
Slot string
|
||||
} `json:"message"`
|
||||
}{
|
||||
Message: struct {
|
||||
Slot string
|
||||
}{Slot: strconv.FormatUint(uint64(slot), 10)},
|
||||
}
|
||||
j, err := json.Marshal(denebS)
|
||||
require.NoError(t, err)
|
||||
var body bytes.Buffer
|
||||
_, err = body.Write(j)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
runDefault, errJson := setInitialPublishBlockPostRequest(endpoint, nil, request)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
|
||||
assert.Equal(t, reflect.TypeOf(SignedBeaconBlockContentsDenebJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
|
||||
})
|
||||
}
|
||||
|
||||
func TestPreparePublishedBlock(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
endpoint := &apimiddleware.Endpoint{
|
||||
PostRequest: &SignedBeaconBlockJson{
|
||||
Message: &BeaconBlockJson{
|
||||
Body: &BeaconBlockBodyJson{},
|
||||
},
|
||||
},
|
||||
}
|
||||
errJson := preparePublishedBlock(endpoint, nil, nil)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
_, ok := endpoint.PostRequest.(*phase0PublishBlockRequestJson)
|
||||
assert.Equal(t, true, ok)
|
||||
})
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
endpoint := &apimiddleware.Endpoint{
|
||||
PostRequest: &SignedBeaconBlockAltairJson{
|
||||
Message: &BeaconBlockAltairJson{
|
||||
Body: &BeaconBlockBodyAltairJson{},
|
||||
},
|
||||
},
|
||||
}
|
||||
errJson := preparePublishedBlock(endpoint, nil, nil)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
_, ok := endpoint.PostRequest.(*altairPublishBlockRequestJson)
|
||||
assert.Equal(t, true, ok)
|
||||
})
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
endpoint := &apimiddleware.Endpoint{
|
||||
PostRequest: &SignedBeaconBlockBellatrixJson{
|
||||
Message: &BeaconBlockBellatrixJson{
|
||||
Body: &BeaconBlockBodyBellatrixJson{},
|
||||
},
|
||||
},
|
||||
}
|
||||
errJson := preparePublishedBlock(endpoint, nil, nil)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
_, ok := endpoint.PostRequest.(*bellatrixPublishBlockRequestJson)
|
||||
assert.Equal(t, true, ok)
|
||||
})
|
||||
|
||||
t.Run("unsupported block type", func(t *testing.T) {
|
||||
errJson := preparePublishedBlock(&apimiddleware.Endpoint{}, nil, nil)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "unsupported block type"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestSetInitialPublishBlindedBlockPostRequest(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.BellatrixForkEpoch = params.BeaconConfig().AltairForkEpoch + 1
|
||||
cfg.CapellaForkEpoch = params.BeaconConfig().BellatrixForkEpoch + 1
|
||||
cfg.DenebForkEpoch = params.BeaconConfig().CapellaForkEpoch + 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
endpoint := &apimiddleware.Endpoint{}
|
||||
s := struct {
|
||||
Message struct {
|
||||
Slot string
|
||||
} `json:"message"`
|
||||
}{}
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
s.Message = struct{ Slot string }{Slot: "0"}
|
||||
j, err := json.Marshal(s)
|
||||
require.NoError(t, err)
|
||||
var body bytes.Buffer
|
||||
_, err = body.Write(j)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
runDefault, errJson := setInitialPublishBlindedBlockPostRequest(endpoint, nil, request)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
|
||||
assert.Equal(t, reflect.TypeOf(SignedBeaconBlockJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
|
||||
})
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
slot, err := slots.EpochStart(params.BeaconConfig().AltairForkEpoch)
|
||||
require.NoError(t, err)
|
||||
s.Message = struct{ Slot string }{Slot: strconv.FormatUint(uint64(slot), 10)}
|
||||
j, err := json.Marshal(s)
|
||||
require.NoError(t, err)
|
||||
var body bytes.Buffer
|
||||
_, err = body.Write(j)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
runDefault, errJson := setInitialPublishBlindedBlockPostRequest(endpoint, nil, request)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
|
||||
assert.Equal(t, reflect.TypeOf(SignedBeaconBlockAltairJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
|
||||
})
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
slot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
s.Message = struct{ Slot string }{Slot: strconv.FormatUint(uint64(slot), 10)}
|
||||
j, err := json.Marshal(s)
|
||||
require.NoError(t, err)
|
||||
var body bytes.Buffer
|
||||
_, err = body.Write(j)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
runDefault, errJson := setInitialPublishBlindedBlockPostRequest(endpoint, nil, request)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
|
||||
assert.Equal(t, reflect.TypeOf(SignedBlindedBeaconBlockBellatrixJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
|
||||
})
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
slot, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
|
||||
require.NoError(t, err)
|
||||
s.Message = struct{ Slot string }{Slot: strconv.FormatUint(uint64(slot), 10)}
|
||||
j, err := json.Marshal(s)
|
||||
require.NoError(t, err)
|
||||
var body bytes.Buffer
|
||||
_, err = body.Write(j)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
runDefault, errJson := setInitialPublishBlindedBlockPostRequest(endpoint, nil, request)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
|
||||
assert.Equal(t, reflect.TypeOf(SignedBlindedBeaconBlockCapellaJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
|
||||
})
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
slot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
require.NoError(t, err)
|
||||
denebS := struct {
|
||||
SignedBlindedBlock struct {
|
||||
Message struct {
|
||||
Slot string
|
||||
} `json:"message"`
|
||||
} `json:"signed_blinded_block"`
|
||||
}{}
|
||||
denebS.SignedBlindedBlock = struct {
|
||||
Message struct {
|
||||
Slot string
|
||||
} `json:"message"`
|
||||
}{
|
||||
Message: struct {
|
||||
Slot string
|
||||
}{Slot: strconv.FormatUint(uint64(slot), 10)},
|
||||
}
|
||||
j, err := json.Marshal(denebS)
|
||||
require.NoError(t, err)
|
||||
var body bytes.Buffer
|
||||
_, err = body.Write(j)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
runDefault, errJson := setInitialPublishBlindedBlockPostRequest(endpoint, nil, request)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
|
||||
assert.Equal(t, reflect.TypeOf(SignedBlindedBeaconBlockContentsDenebJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
|
||||
})
|
||||
}
|
||||
|
||||
func TestPreparePublishedBlindedBlock(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
endpoint := &apimiddleware.Endpoint{
|
||||
PostRequest: &SignedBeaconBlockJson{
|
||||
Message: &BeaconBlockJson{
|
||||
Body: &BeaconBlockBodyJson{},
|
||||
},
|
||||
},
|
||||
}
|
||||
errJson := preparePublishedBlindedBlock(endpoint, nil, nil)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
_, ok := endpoint.PostRequest.(*phase0PublishBlockRequestJson)
|
||||
assert.Equal(t, true, ok)
|
||||
})
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
endpoint := &apimiddleware.Endpoint{
|
||||
PostRequest: &SignedBeaconBlockAltairJson{
|
||||
Message: &BeaconBlockAltairJson{
|
||||
Body: &BeaconBlockBodyAltairJson{},
|
||||
},
|
||||
},
|
||||
}
|
||||
errJson := preparePublishedBlindedBlock(endpoint, nil, nil)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
_, ok := endpoint.PostRequest.(*altairPublishBlockRequestJson)
|
||||
assert.Equal(t, true, ok)
|
||||
})
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
endpoint := &apimiddleware.Endpoint{
|
||||
PostRequest: &SignedBlindedBeaconBlockBellatrixJson{
|
||||
Message: &BlindedBeaconBlockBellatrixJson{
|
||||
Body: &BlindedBeaconBlockBodyBellatrixJson{},
|
||||
},
|
||||
},
|
||||
}
|
||||
errJson := preparePublishedBlindedBlock(endpoint, nil, nil)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
_, ok := endpoint.PostRequest.(*bellatrixPublishBlindedBlockRequestJson)
|
||||
assert.Equal(t, true, ok)
|
||||
})
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
endpoint := &apimiddleware.Endpoint{
|
||||
PostRequest: &SignedBlindedBeaconBlockCapellaJson{
|
||||
Message: &BlindedBeaconBlockCapellaJson{
|
||||
Body: &BlindedBeaconBlockBodyCapellaJson{},
|
||||
},
|
||||
},
|
||||
}
|
||||
errJson := preparePublishedBlindedBlock(endpoint, nil, nil)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
_, ok := endpoint.PostRequest.(*capellaPublishBlindedBlockRequestJson)
|
||||
assert.Equal(t, true, ok)
|
||||
})
|
||||
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
endpoint := &apimiddleware.Endpoint{
|
||||
PostRequest: &SignedBlindedBeaconBlockContentsDenebJson{
|
||||
SignedBlindedBlock: &SignedBlindedBeaconBlockDenebJson{
|
||||
Message: &BlindedBeaconBlockDenebJson{},
|
||||
},
|
||||
SignedBlindedBlobSidecars: []*SignedBlindedBlobSidecarJson{},
|
||||
},
|
||||
}
|
||||
errJson := preparePublishedBlindedBlock(endpoint, nil, nil)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
_, ok := endpoint.PostRequest.(*denebPublishBlindedBlockRequestJson)
|
||||
assert.Equal(t, true, ok)
|
||||
})
|
||||
t.Run("unsupported block type", func(t *testing.T) {
|
||||
errJson := preparePublishedBlock(&apimiddleware.Endpoint{}, nil, nil)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "unsupported block type"))
|
||||
})
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
package apimiddleware
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
|
||||
)
|
||||
|
||||
// BeaconEndpointFactory creates endpoints used for running beacon chain API calls through the API Middleware.
|
||||
type BeaconEndpointFactory struct {
|
||||
}
|
||||
|
||||
func (f *BeaconEndpointFactory) IsNil() bool {
|
||||
return f == nil
|
||||
}
|
||||
|
||||
// Paths is a collection of all valid beacon chain API paths.
|
||||
func (_ *BeaconEndpointFactory) Paths() []string {
|
||||
return []string{
|
||||
"/eth/v1/beacon/weak_subjectivity",
|
||||
"/eth/v1/events",
|
||||
}
|
||||
}
|
||||
|
||||
// Create returns a new endpoint for the provided API path.
|
||||
func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, error) {
|
||||
endpoint := apimiddleware.DefaultEndpoint()
|
||||
switch path {
|
||||
case "/eth/v1/beacon/weak_subjectivity":
|
||||
endpoint.GetResponse = &WeakSubjectivityResponse{}
|
||||
case "/eth/v1/events":
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleEvents}
|
||||
default:
|
||||
return nil, errors.New("invalid path")
|
||||
}
|
||||
|
||||
endpoint.Path = path
|
||||
return &endpoint, nil
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
package apimiddleware_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestBeaconEndpointFactory_AllPathsRegistered(t *testing.T) {
|
||||
f := &apimiddleware.BeaconEndpointFactory{}
|
||||
|
||||
for _, p := range f.Paths() {
|
||||
_, err := f.Create(p)
|
||||
require.NoError(t, err, "failed to register %s", p)
|
||||
}
|
||||
}
|
||||
@@ -1,779 +0,0 @@
|
||||
package apimiddleware
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
|
||||
)
|
||||
|
||||
//----------------
|
||||
// Requests and responses.
|
||||
//----------------
|
||||
|
||||
// WeakSubjectivityResponse is used to marshal/unmarshal the response for the
|
||||
// /eth/v1/beacon/weak_subjectivity endpoint.
|
||||
type WeakSubjectivityResponse struct {
|
||||
Data *struct {
|
||||
Checkpoint *CheckpointJson `json:"ws_checkpoint"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type BlockRootResponseJson struct {
|
||||
Data *BlockRootContainerJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type DepositContractResponseJson struct {
|
||||
Data *DepositContractJson `json:"data"`
|
||||
}
|
||||
|
||||
type AggregateAttestationResponseJson struct {
|
||||
Data *AttestationJson `json:"data"`
|
||||
}
|
||||
|
||||
type BeaconCommitteeSubscribeJson struct {
|
||||
ValidatorIndex string `json:"validator_index"`
|
||||
CommitteeIndex string `json:"committee_index"`
|
||||
CommitteesAtSlot string `json:"committees_at_slot"`
|
||||
Slot string `json:"slot"`
|
||||
IsAggregator bool `json:"is_aggregator"`
|
||||
}
|
||||
|
||||
type ProduceSyncCommitteeContributionResponseJson struct {
|
||||
Data *SyncCommitteeContributionJson `json:"data"`
|
||||
}
|
||||
|
||||
type ForkChoiceNodeResponseJson struct {
|
||||
Slot string `json:"slot"`
|
||||
BlockRoot string `json:"block_root" hex:"true"`
|
||||
ParentRoot string `json:"parent_root" hex:"true"`
|
||||
JustifiedEpoch string `json:"justified_epoch"`
|
||||
FinalizedEpoch string `json:"finalized_epoch"`
|
||||
Weight string `json:"weight"`
|
||||
Validity string `json:"validity" enum:"true"`
|
||||
ExecutionBlockHash string `json:"execution_block_hash" hex:"true"`
|
||||
ExtraData *ForkChoiceNodeExtraDataJson `json:"extra_data"`
|
||||
}
|
||||
|
||||
type ForkChoiceNodeExtraDataJson struct {
|
||||
UnrealizedJustifiedEpoch string `json:"unrealized_justified_epoch"`
|
||||
UnrealizedFinalizedEpoch string `json:"unrealized_finalized_epoch"`
|
||||
Balance string `json:"balance"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
TimeStamp string `json:"timestamp"`
|
||||
}
|
||||
|
||||
type ForkChoiceResponseJson struct {
|
||||
JustifiedCheckpoint *CheckpointJson `json:"justified_checkpoint"`
|
||||
FinalizedCheckpoint *CheckpointJson `json:"finalized_checkpoint"`
|
||||
ForkChoiceNodes []*ForkChoiceNodeResponseJson `json:"fork_choice_nodes"`
|
||||
ExtraData *ForkChoiceResponseExtraDataJson `json:"extra_data"`
|
||||
}
|
||||
|
||||
type ForkChoiceResponseExtraDataJson struct {
|
||||
BestJustifiedCheckpoint *CheckpointJson `json:"best_justified_checkpoint"`
|
||||
UnrealizedJustifiedCheckpoint *CheckpointJson `json:"unrealized_justified_checkpoint"`
|
||||
UnrealizedFinalizedCheckpoint *CheckpointJson `json:"unrealized_finalized_checkpoint"`
|
||||
ProposerBoostRoot string `json:"proposer_boost_root" hex:"true"`
|
||||
PreviousProposerBoostRoot string `json:"previous_proposer_boost_root" hex:"true"`
|
||||
HeadRoot string `json:"head_root" hex:"true"`
|
||||
}
|
||||
|
||||
//----------------
|
||||
// Reusable types.
|
||||
//----------------
|
||||
|
||||
type CheckpointJson struct {
|
||||
Epoch string `json:"epoch"`
|
||||
Root string `json:"root" hex:"true"`
|
||||
}
|
||||
|
||||
type BlockRootContainerJson struct {
|
||||
Root string `json:"root" hex:"true"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockJson struct {
|
||||
Message *BeaconBlockJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type BeaconBlockJson struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
Body *BeaconBlockBodyJson `json:"body"`
|
||||
}
|
||||
|
||||
type BeaconBlockBodyJson struct {
|
||||
RandaoReveal string `json:"randao_reveal" hex:"true"`
|
||||
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti" hex:"true"`
|
||||
ProposerSlashings []*ProposerSlashingJson `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingJson `json:"attester_slashings"`
|
||||
Attestations []*AttestationJson `json:"attestations"`
|
||||
Deposits []*DepositJson `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExitJson `json:"voluntary_exits"`
|
||||
}
|
||||
|
||||
type BeaconBlockContainerV2Json struct {
|
||||
Phase0Block *BeaconBlockJson `json:"phase0_block"`
|
||||
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
|
||||
BellatrixBlock *BeaconBlockBellatrixJson `json:"bellatrix_block"`
|
||||
CapellaBlock *BeaconBlockCapellaJson `json:"capella_block"`
|
||||
DenebContents *BeaconBlockContentsDenebJson `json:"deneb_contents"`
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockContainerJson struct {
|
||||
Phase0Block *BeaconBlockJson `json:"phase0_block"`
|
||||
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
|
||||
BellatrixBlock *BlindedBeaconBlockBellatrixJson `json:"bellatrix_block"`
|
||||
CapellaBlock *BlindedBeaconBlockCapellaJson `json:"capella_block"`
|
||||
DenebContents *BlindedBeaconBlockContentsDenebJson `json:"deneb_contents"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockAltairJson struct {
|
||||
Message *BeaconBlockAltairJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockBellatrixJson struct {
|
||||
Message *BeaconBlockBellatrixJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockCapellaJson struct {
|
||||
Message *BeaconBlockCapellaJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockContentsDenebJson struct {
|
||||
SignedBlock *SignedBeaconBlockDenebJson `json:"signed_block"`
|
||||
SignedBlobSidecars []*SignedBlobSidecarJson `json:"signed_blob_sidecars"`
|
||||
}
|
||||
|
||||
type SignedBlobSidecarJson struct {
|
||||
Message *BlobSidecarJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type BlobSidecarJson struct {
|
||||
BlockRoot string `json:"block_root" hex:"true"`
|
||||
Index string `json:"index"`
|
||||
Slot string `json:"slot"`
|
||||
BlockParentRoot string `json:"block_parent_root" hex:"true"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
Blob string `json:"blob" hex:"true"` // pattern: "^0x[a-fA-F0-9]{262144}$"
|
||||
KzgCommitment string `json:"kzg_commitment" hex:"true"` // pattern: "^0x[a-fA-F0-9]{96}$" ssz-size:"48"
|
||||
KzgProof string `json:"kzg_proof,omitempty" hex:"true"` // pattern: "^0x[a-fA-F0-9]{96}$" ssz-size:"48"
|
||||
}
|
||||
|
||||
type BlindedBlobSidecarJson struct {
|
||||
BlockRoot string `json:"block_root" hex:"true"`
|
||||
Index string `json:"index"`
|
||||
Slot string `json:"slot"`
|
||||
BlockParentRoot string `json:"block_parent_root" hex:"true"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
BlobRoot string `json:"blob_root" hex:"true"`
|
||||
KzgCommitment string `json:"kzg_commitment" hex:"true"` // pattern: "^0x[a-fA-F0-9]{96}$" ssz-size:"48"
|
||||
KzgProof string `json:"kzg_proof,omitempty" hex:"true"` // pattern: "^0x[a-fA-F0-9]{96}$" ssz-size:"48"
|
||||
}
|
||||
|
||||
type SignedBeaconBlockDenebJson struct {
|
||||
Message *BeaconBlockDenebJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type SignedBlindedBeaconBlockBellatrixJson struct {
|
||||
Message *BlindedBeaconBlockBellatrixJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type SignedBlindedBeaconBlockCapellaJson struct {
|
||||
Message *BlindedBeaconBlockCapellaJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type SignedBlindedBeaconBlockContentsDenebJson struct {
|
||||
SignedBlindedBlock *SignedBlindedBeaconBlockDenebJson `json:"signed_blinded_block"`
|
||||
SignedBlindedBlobSidecars []*SignedBlindedBlobSidecarJson `json:"signed_blinded_blob_sidecars"`
|
||||
}
|
||||
|
||||
type SignedBlindedBeaconBlockDenebJson struct {
|
||||
Message *BlindedBeaconBlockDenebJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type SignedBlindedBlobSidecarJson struct {
|
||||
Message *BlindedBlobSidecarJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type BeaconBlockAltairJson struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
Body *BeaconBlockBodyAltairJson `json:"body"`
|
||||
}
|
||||
|
||||
type BeaconBlockBellatrixJson struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
Body *BeaconBlockBodyBellatrixJson `json:"body"`
|
||||
}
|
||||
|
||||
type BeaconBlockCapellaJson struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
Body *BeaconBlockBodyCapellaJson `json:"body"`
|
||||
}
|
||||
|
||||
type BeaconBlockContentsDenebJson struct {
|
||||
Block *BeaconBlockDenebJson `json:"block"`
|
||||
BlobSidecars []*BlobSidecarJson `json:"blob_sidecars"`
|
||||
}
|
||||
|
||||
type BeaconBlockDenebJson struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
Body *BeaconBlockBodyDenebJson `json:"body"`
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBellatrixJson struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
Body *BlindedBeaconBlockBodyBellatrixJson `json:"body"`
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockCapellaJson struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
Body *BlindedBeaconBlockBodyCapellaJson `json:"body"`
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockDenebJson struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
Body *BlindedBeaconBlockBodyDenebJson `json:"body"`
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockContentsDenebJson struct {
|
||||
BlindedBlock *BlindedBeaconBlockDenebJson `json:"blinded_block"`
|
||||
BlindedBlobSidecars []*BlindedBlobSidecarJson `json:"blinded_blob_sidecars"`
|
||||
}
|
||||
|
||||
type BeaconBlockBodyAltairJson struct {
|
||||
RandaoReveal string `json:"randao_reveal" hex:"true"`
|
||||
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti" hex:"true"`
|
||||
ProposerSlashings []*ProposerSlashingJson `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingJson `json:"attester_slashings"`
|
||||
Attestations []*AttestationJson `json:"attestations"`
|
||||
Deposits []*DepositJson `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExitJson `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregateJson `json:"sync_aggregate"`
|
||||
}
|
||||
|
||||
type BeaconBlockBodyBellatrixJson struct {
|
||||
RandaoReveal string `json:"randao_reveal" hex:"true"`
|
||||
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti" hex:"true"`
|
||||
ProposerSlashings []*ProposerSlashingJson `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingJson `json:"attester_slashings"`
|
||||
Attestations []*AttestationJson `json:"attestations"`
|
||||
Deposits []*DepositJson `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExitJson `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregateJson `json:"sync_aggregate"`
|
||||
ExecutionPayload *ExecutionPayloadJson `json:"execution_payload"`
|
||||
}
|
||||
|
||||
type BeaconBlockBodyCapellaJson struct {
|
||||
RandaoReveal string `json:"randao_reveal" hex:"true"`
|
||||
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti" hex:"true"`
|
||||
ProposerSlashings []*ProposerSlashingJson `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingJson `json:"attester_slashings"`
|
||||
Attestations []*AttestationJson `json:"attestations"`
|
||||
Deposits []*DepositJson `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExitJson `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregateJson `json:"sync_aggregate"`
|
||||
ExecutionPayload *ExecutionPayloadCapellaJson `json:"execution_payload"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChangeJson `json:"bls_to_execution_changes"`
|
||||
}
|
||||
|
||||
type BeaconBlockBodyDenebJson struct {
|
||||
RandaoReveal string `json:"randao_reveal" hex:"true"`
|
||||
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti" hex:"true"`
|
||||
ProposerSlashings []*ProposerSlashingJson `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingJson `json:"attester_slashings"`
|
||||
Attestations []*AttestationJson `json:"attestations"`
|
||||
Deposits []*DepositJson `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExitJson `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregateJson `json:"sync_aggregate"`
|
||||
ExecutionPayload *ExecutionPayloadDenebJson `json:"execution_payload"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChangeJson `json:"bls_to_execution_changes"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments" hex:"true"`
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBodyBellatrixJson struct {
|
||||
RandaoReveal string `json:"randao_reveal" hex:"true"`
|
||||
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti" hex:"true"`
|
||||
ProposerSlashings []*ProposerSlashingJson `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingJson `json:"attester_slashings"`
|
||||
Attestations []*AttestationJson `json:"attestations"`
|
||||
Deposits []*DepositJson `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExitJson `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregateJson `json:"sync_aggregate"`
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeaderJson `json:"execution_payload_header"`
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBodyCapellaJson struct {
|
||||
RandaoReveal string `json:"randao_reveal" hex:"true"`
|
||||
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti" hex:"true"`
|
||||
ProposerSlashings []*ProposerSlashingJson `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingJson `json:"attester_slashings"`
|
||||
Attestations []*AttestationJson `json:"attestations"`
|
||||
Deposits []*DepositJson `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExitJson `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregateJson `json:"sync_aggregate"`
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeaderCapellaJson `json:"execution_payload_header"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChangeJson `json:"bls_to_execution_changes"`
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBodyDenebJson struct {
|
||||
RandaoReveal string `json:"randao_reveal" hex:"true"`
|
||||
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti" hex:"true"`
|
||||
ProposerSlashings []*ProposerSlashingJson `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingJson `json:"attester_slashings"`
|
||||
Attestations []*AttestationJson `json:"attestations"`
|
||||
Deposits []*DepositJson `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExitJson `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregateJson `json:"sync_aggregate"`
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeaderDenebJson `json:"execution_payload_header"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChangeJson `json:"bls_to_execution_changes"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments" hex:"true"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadJson struct {
|
||||
ParentHash string `json:"parent_hash" hex:"true"`
|
||||
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
ReceiptsRoot string `json:"receipts_root" hex:"true"`
|
||||
LogsBloom string `json:"logs_bloom" hex:"true"`
|
||||
PrevRandao string `json:"prev_randao" hex:"true"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
TimeStamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data" hex:"true"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas" uint256:"true"`
|
||||
BlockHash string `json:"block_hash" hex:"true"`
|
||||
Transactions []string `json:"transactions" hex:"true"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadCapellaJson struct {
|
||||
ParentHash string `json:"parent_hash" hex:"true"`
|
||||
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
ReceiptsRoot string `json:"receipts_root" hex:"true"`
|
||||
LogsBloom string `json:"logs_bloom" hex:"true"`
|
||||
PrevRandao string `json:"prev_randao" hex:"true"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
TimeStamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data" hex:"true"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas" uint256:"true"`
|
||||
BlockHash string `json:"block_hash" hex:"true"`
|
||||
Transactions []string `json:"transactions" hex:"true"`
|
||||
Withdrawals []*WithdrawalJson `json:"withdrawals"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadDenebJson struct {
|
||||
ParentHash string `json:"parent_hash" hex:"true"`
|
||||
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
ReceiptsRoot string `json:"receipts_root" hex:"true"`
|
||||
LogsBloom string `json:"logs_bloom" hex:"true"`
|
||||
PrevRandao string `json:"prev_randao" hex:"true"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
TimeStamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data" hex:"true"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas" uint256:"true"`
|
||||
BlobGasUsed string `json:"blob_gas_used"` // new in deneb
|
||||
ExcessBlobGas string `json:"excess_blob_gas"` // new in deneb
|
||||
BlockHash string `json:"block_hash" hex:"true"`
|
||||
Transactions []string `json:"transactions" hex:"true"`
|
||||
Withdrawals []*WithdrawalJson `json:"withdrawals"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeaderJson struct {
|
||||
ParentHash string `json:"parent_hash" hex:"true"`
|
||||
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
ReceiptsRoot string `json:"receipts_root" hex:"true"`
|
||||
LogsBloom string `json:"logs_bloom" hex:"true"`
|
||||
PrevRandao string `json:"prev_randao" hex:"true"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
TimeStamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data" hex:"true"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas" uint256:"true"`
|
||||
BlockHash string `json:"block_hash" hex:"true"`
|
||||
TransactionsRoot string `json:"transactions_root" hex:"true"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeaderCapellaJson struct {
|
||||
ParentHash string `json:"parent_hash" hex:"true"`
|
||||
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
ReceiptsRoot string `json:"receipts_root" hex:"true"`
|
||||
LogsBloom string `json:"logs_bloom" hex:"true"`
|
||||
PrevRandao string `json:"prev_randao" hex:"true"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
TimeStamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data" hex:"true"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas" uint256:"true"`
|
||||
BlockHash string `json:"block_hash" hex:"true"`
|
||||
TransactionsRoot string `json:"transactions_root" hex:"true"`
|
||||
WithdrawalsRoot string `json:"withdrawals_root" hex:"true"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeaderDenebJson struct {
|
||||
ParentHash string `json:"parent_hash" hex:"true"`
|
||||
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
ReceiptsRoot string `json:"receipts_root" hex:"true"`
|
||||
LogsBloom string `json:"logs_bloom" hex:"true"`
|
||||
PrevRandao string `json:"prev_randao" hex:"true"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
TimeStamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data" hex:"true"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas" uint256:"true"`
|
||||
BlobGasUsed string `json:"blob_gas_used"` // new in deneb
|
||||
ExcessBlobGas string `json:"excess_blob_gas"` // new in deneb
|
||||
BlockHash string `json:"block_hash" hex:"true"`
|
||||
TransactionsRoot string `json:"transactions_root" hex:"true"`
|
||||
WithdrawalsRoot string `json:"withdrawals_root" hex:"true"`
|
||||
}
|
||||
|
||||
type SyncAggregateJson struct {
|
||||
SyncCommitteeBits string `json:"sync_committee_bits" hex:"true"`
|
||||
SyncCommitteeSignature string `json:"sync_committee_signature" hex:"true"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockHeaderJson struct {
|
||||
Header *BeaconBlockHeaderJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type BeaconBlockHeaderJson struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
BodyRoot string `json:"body_root" hex:"true"`
|
||||
}
|
||||
|
||||
type Eth1DataJson struct {
|
||||
DepositRoot string `json:"deposit_root" hex:"true"`
|
||||
DepositCount string `json:"deposit_count"`
|
||||
BlockHash string `json:"block_hash" hex:"true"`
|
||||
}
|
||||
|
||||
type ProposerSlashingJson struct {
|
||||
Header_1 *SignedBeaconBlockHeaderJson `json:"signed_header_1"`
|
||||
Header_2 *SignedBeaconBlockHeaderJson `json:"signed_header_2"`
|
||||
}
|
||||
|
||||
type AttesterSlashingJson struct {
|
||||
Attestation_1 *IndexedAttestationJson `json:"attestation_1"`
|
||||
Attestation_2 *IndexedAttestationJson `json:"attestation_2"`
|
||||
}
|
||||
|
||||
type IndexedAttestationJson struct {
|
||||
AttestingIndices []string `json:"attesting_indices"`
|
||||
Data *AttestationDataJson `json:"data"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type AttestationJson struct {
|
||||
AggregationBits string `json:"aggregation_bits" hex:"true"`
|
||||
Data *AttestationDataJson `json:"data"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type AttestationDataJson struct {
|
||||
Slot string `json:"slot"`
|
||||
CommitteeIndex string `json:"index"`
|
||||
BeaconBlockRoot string `json:"beacon_block_root" hex:"true"`
|
||||
Source *CheckpointJson `json:"source"`
|
||||
Target *CheckpointJson `json:"target"`
|
||||
}
|
||||
|
||||
type SignedBLSToExecutionChangeJson struct {
|
||||
Message *BLSToExecutionChangeJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type BLSToExecutionChangeJson struct {
|
||||
ValidatorIndex string `json:"validator_index"`
|
||||
FromBLSPubkey string `json:"from_bls_pubkey" hex:"true"`
|
||||
ToExecutionAddress string `json:"to_execution_address" hex:"true"`
|
||||
}
|
||||
|
||||
type DepositJson struct {
|
||||
Proof []string `json:"proof" hex:"true"`
|
||||
Data *Deposit_DataJson `json:"data"`
|
||||
}
|
||||
|
||||
type Deposit_DataJson struct {
|
||||
PublicKey string `json:"pubkey" hex:"true"`
|
||||
WithdrawalCredentials string `json:"withdrawal_credentials" hex:"true"`
|
||||
Amount string `json:"amount"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type SignedVoluntaryExitJson struct {
|
||||
Exit *VoluntaryExitJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type VoluntaryExitJson struct {
|
||||
Epoch string `json:"epoch"`
|
||||
ValidatorIndex string `json:"validator_index"`
|
||||
}
|
||||
|
||||
type WithdrawalJson struct {
|
||||
WithdrawalIndex string `json:"index"`
|
||||
ValidatorIndex string `json:"validator_index"`
|
||||
ExecutionAddress string `json:"address" hex:"true"`
|
||||
Amount string `json:"amount"`
|
||||
}
|
||||
|
||||
type ForkJson struct {
|
||||
PreviousVersion string `json:"previous_version" hex:"true"`
|
||||
CurrentVersion string `json:"current_version" hex:"true"`
|
||||
Epoch string `json:"epoch"`
|
||||
}
|
||||
|
||||
type ValidatorJson struct {
|
||||
PublicKey string `json:"pubkey" hex:"true"`
|
||||
WithdrawalCredentials string `json:"withdrawal_credentials" hex:"true"`
|
||||
EffectiveBalance string `json:"effective_balance"`
|
||||
Slashed bool `json:"slashed"`
|
||||
ActivationEligibilityEpoch string `json:"activation_eligibility_epoch"`
|
||||
ActivationEpoch string `json:"activation_epoch"`
|
||||
ExitEpoch string `json:"exit_epoch"`
|
||||
WithdrawableEpoch string `json:"withdrawable_epoch"`
|
||||
}
|
||||
|
||||
type SyncCommitteeJson struct {
|
||||
Pubkeys []string `json:"pubkeys" hex:"true"`
|
||||
AggregatePubkey string `json:"aggregate_pubkey" hex:"true"`
|
||||
}
|
||||
|
||||
type PendingAttestationJson struct {
|
||||
AggregationBits string `json:"aggregation_bits" hex:"true"`
|
||||
Data *AttestationDataJson `json:"data"`
|
||||
InclusionDelay string `json:"inclusion_delay"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
}
|
||||
|
||||
type DepositContractJson struct {
|
||||
ChainId string `json:"chain_id"`
|
||||
Address string `json:"address"`
|
||||
}
|
||||
|
||||
type SignedAggregateAttestationAndProofJson struct {
|
||||
Message *AggregateAttestationAndProofJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type AggregateAttestationAndProofJson struct {
|
||||
AggregatorIndex string `json:"aggregator_index"`
|
||||
Aggregate *AttestationJson `json:"aggregate"`
|
||||
SelectionProof string `json:"selection_proof" hex:"true"`
|
||||
}
|
||||
|
||||
type SignedContributionAndProofJson struct {
|
||||
Message *ContributionAndProofJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type ContributionAndProofJson struct {
|
||||
AggregatorIndex string `json:"aggregator_index"`
|
||||
Contribution *SyncCommitteeContributionJson `json:"contribution"`
|
||||
SelectionProof string `json:"selection_proof" hex:"true"`
|
||||
}
|
||||
|
||||
type SyncCommitteeContributionJson struct {
|
||||
Slot string `json:"slot"`
|
||||
BeaconBlockRoot string `json:"beacon_block_root" hex:"true"`
|
||||
SubcommitteeIndex string `json:"subcommittee_index"`
|
||||
AggregationBits string `json:"aggregation_bits" hex:"true"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type HistoricalSummaryJson struct {
|
||||
BlockSummaryRoot string `json:"block_summary_root" hex:"true"`
|
||||
StateSummaryRoot string `json:"state_summary_root" hex:"true"`
|
||||
}
|
||||
|
||||
//----------------
|
||||
// SSZ
|
||||
// ---------------
|
||||
|
||||
type SszRequestJson struct {
|
||||
Data string `json:"data"`
|
||||
}
|
||||
|
||||
// SszResponse is a common abstraction over all SSZ responses.
|
||||
type SszResponse interface {
|
||||
SSZVersion() string
|
||||
SSZOptimistic() bool
|
||||
SSZData() string
|
||||
SSZFinalized() bool
|
||||
}
|
||||
|
||||
// ---------------
|
||||
// Events.
|
||||
// ---------------
|
||||
|
||||
type EventHeadJson struct {
|
||||
Slot string `json:"slot"`
|
||||
Block string `json:"block" hex:"true"`
|
||||
State string `json:"state" hex:"true"`
|
||||
EpochTransition bool `json:"epoch_transition"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
PreviousDutyDependentRoot string `json:"previous_duty_dependent_root" hex:"true"`
|
||||
CurrentDutyDependentRoot string `json:"current_duty_dependent_root" hex:"true"`
|
||||
}
|
||||
|
||||
type ReceivedBlockDataJson struct {
|
||||
Slot string `json:"slot"`
|
||||
Block string `json:"block" hex:"true"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
type AggregatedAttReceivedDataJson struct {
|
||||
Aggregate *AttestationJson `json:"aggregate"`
|
||||
}
|
||||
|
||||
type UnaggregatedAttReceivedDataJson struct {
|
||||
AggregationBits string `json:"aggregation_bits" hex:"true"`
|
||||
Data *AttestationDataJson `json:"data"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type EventFinalizedCheckpointJson struct {
|
||||
Block string `json:"block" hex:"true"`
|
||||
State string `json:"state" hex:"true"`
|
||||
Epoch string `json:"epoch"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
type EventChainReorgJson struct {
|
||||
Slot string `json:"slot"`
|
||||
Depth string `json:"depth"`
|
||||
OldHeadBlock string `json:"old_head_block" hex:"true"`
|
||||
NewHeadBlock string `json:"old_head_state" hex:"true"`
|
||||
OldHeadState string `json:"new_head_block" hex:"true"`
|
||||
NewHeadState string `json:"new_head_state" hex:"true"`
|
||||
Epoch string `json:"epoch"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
type EventPayloadAttributeStreamV1Json struct {
|
||||
Version string `json:"version"`
|
||||
Data *EventPayloadAttributeV1Json
|
||||
}
|
||||
|
||||
type EventPayloadAttributeStreamV2Json struct {
|
||||
Version string `json:"version"`
|
||||
Data *EventPayloadAttributeV2Json `json:"data"`
|
||||
}
|
||||
|
||||
type EventPayloadAttributeV1Json struct {
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ProposalSlot string `json:"proposal_slot"`
|
||||
ParentBlockNumber string `json:"parent_block_number"`
|
||||
ParentBlockRoot string `json:"parent_block_root" hex:"true"`
|
||||
ParentBlockHash string `json:"parent_block_hash" hex:"true"`
|
||||
PayloadAttributes *PayloadAttributesV1Json `json:"payload_attributes"`
|
||||
}
|
||||
|
||||
type EventPayloadAttributeV2Json struct {
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ProposalSlot string `json:"proposal_slot"`
|
||||
ParentBlockNumber string `json:"parent_block_number"`
|
||||
ParentBlockRoot string `json:"parent_block_root" hex:"true"`
|
||||
ParentBlockHash string `json:"parent_block_hash" hex:"true"`
|
||||
PayloadAttributes *PayloadAttributesV2Json `json:"payload_attributes"`
|
||||
}
|
||||
|
||||
type PayloadAttributesV1Json struct {
|
||||
Timestamp string `json:"timestamp"`
|
||||
Random string `json:"prev_randao" hex:"true"`
|
||||
SuggestedFeeRecipient string `json:"suggested_fee_recipient" hex:"true"`
|
||||
}
|
||||
|
||||
type PayloadAttributesV2Json struct {
|
||||
Timestamp string `json:"timestamp"`
|
||||
Random string `json:"prev_randao" hex:"true"`
|
||||
SuggestedFeeRecipient string `json:"suggested_fee_recipient" hex:"true"`
|
||||
Withdrawals []*WithdrawalJson `json:"withdrawals"`
|
||||
}
|
||||
|
||||
type EventBlobSidecarJson struct {
|
||||
BlockRoot string `json:"block_root" hex:"true"`
|
||||
Index string `json:"index"`
|
||||
Slot string `json:"slot"`
|
||||
KzgCommitment string `json:"kzg_commitment" hex:"true"`
|
||||
VersionedHash string `json:"versioned_hash" hex:"true"`
|
||||
}
|
||||
|
||||
// ---------------
|
||||
// Error handling.
|
||||
// ---------------
|
||||
|
||||
// IndexedVerificationFailureErrorJson is a JSON representation of the error returned when verifying an indexed object.
|
||||
type IndexedVerificationFailureErrorJson struct {
|
||||
apimiddleware.DefaultErrorJson
|
||||
Failures []*SingleIndexedVerificationFailureJson `json:"failures"`
|
||||
}
|
||||
|
||||
// SingleIndexedVerificationFailureJson is a JSON representation of a an issue when verifying a single indexed object e.g. an item in an array.
|
||||
type SingleIndexedVerificationFailureJson struct {
|
||||
Index int `json:"index"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type EventErrorJson struct {
|
||||
StatusCode int `json:"status_code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
package apimiddleware
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// EpochParticipation represents participation of validators in their duties.
|
||||
type EpochParticipation []string
|
||||
|
||||
func (p *EpochParticipation) UnmarshalJSON(b []byte) error {
|
||||
if string(b) == "null" {
|
||||
return nil
|
||||
}
|
||||
if len(b) < 2 {
|
||||
return errors.New("epoch participation length must be at least 2")
|
||||
}
|
||||
if b[0] != '"' || b[len(b)-1] != '"' {
|
||||
return errors.Errorf("provided epoch participation json string is malformed: %s", string(b))
|
||||
}
|
||||
|
||||
// Remove leading and trailing quotation marks.
|
||||
jsonString := string(b)
|
||||
jsonString = strings.Trim(jsonString, "\"")
|
||||
decoded, err := base64.StdEncoding.DecodeString(jsonString)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not decode epoch participation base64 value")
|
||||
}
|
||||
|
||||
*p = make([]string, len(decoded))
|
||||
for i, participation := range decoded {
|
||||
(*p)[i] = strconv.FormatUint(uint64(participation), 10)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
package apimiddleware
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestUnmarshalEpochParticipation(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
b := []byte{3, 3, 0}
|
||||
b64 := []byte("\"" + base64.StdEncoding.EncodeToString(b) + "\"")
|
||||
ep := EpochParticipation{}
|
||||
require.NoError(t, ep.UnmarshalJSON(b64))
|
||||
require.Equal(t, 3, len(ep))
|
||||
assert.Equal(t, "3", ep[0])
|
||||
assert.Equal(t, "3", ep[1])
|
||||
assert.Equal(t, "0", ep[2])
|
||||
})
|
||||
t.Run("incorrect value", func(t *testing.T) {
|
||||
ep := EpochParticipation{}
|
||||
err := ep.UnmarshalJSON([]byte(":illegal:"))
|
||||
require.NotNil(t, err)
|
||||
assert.ErrorContains(t, "provided epoch participation json string is malformed", err)
|
||||
})
|
||||
t.Run("length too small", func(t *testing.T) {
|
||||
ep := EpochParticipation{}
|
||||
err := ep.UnmarshalJSON([]byte("x"))
|
||||
require.NotNil(t, err)
|
||||
assert.ErrorContains(t, "epoch participation length must be at least 2", err)
|
||||
})
|
||||
t.Run("null value", func(t *testing.T) {
|
||||
ep := EpochParticipation{}
|
||||
require.NoError(t, ep.UnmarshalJSON([]byte("null")))
|
||||
assert.DeepEqual(t, EpochParticipation([]string{}), ep)
|
||||
})
|
||||
t.Run("invalid value", func(t *testing.T) {
|
||||
ep := EpochParticipation{}
|
||||
require.ErrorContains(t, "provided epoch participation json string is malformed", ep.UnmarshalJSON([]byte("XdHJ1ZQ==X")))
|
||||
})
|
||||
}
|
||||
@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"blocks.go",
|
||||
"handlers.go",
|
||||
"handlers_pool.go",
|
||||
"handlers_state.go",
|
||||
@@ -51,32 +50,25 @@ go_library(
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/http:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_golang_protobuf//ptypes/empty",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"blinded_blocks_test.go",
|
||||
"blocks_test.go",
|
||||
"handlers_pool_test.go",
|
||||
"handlers_state_test.go",
|
||||
"handlers_test.go",
|
||||
"handlers_validators_test.go",
|
||||
"init_test.go",
|
||||
"server_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
@@ -95,7 +87,6 @@ go_test(
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits/mock:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/rpc/apimiddleware:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared/testing:go_default_library",
|
||||
@@ -115,8 +106,6 @@ go_test(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//network/http:go_default_library",
|
||||
"//proto/eth/service:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
package beacon
|
||||
@@ -1,62 +0,0 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
rpchelpers "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
var (
|
||||
errNilBlock = errors.New("nil block")
|
||||
)
|
||||
|
||||
// GetWeakSubjectivity computes the starting epoch of the current weak subjectivity period, and then also
|
||||
// determines the best block root and state root to use for a Checkpoint Sync starting from that point.
|
||||
// DEPRECATED: GetWeakSubjectivity endpoint will no longer be supported
|
||||
func (bs *Server) GetWeakSubjectivity(ctx context.Context, _ *empty.Empty) (*ethpbv1.WeakSubjectivityResponse, error) {
|
||||
if err := rpchelpers.ValidateSyncGRPC(ctx, bs.SyncChecker, bs.HeadFetcher, bs.GenesisTimeFetcher, bs.OptimisticModeFetcher); err != nil {
|
||||
// This is already a grpc error, so we can't wrap it any further
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hs, err := bs.HeadFetcher.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, "could not get head state")
|
||||
}
|
||||
wsEpoch, err := helpers.LatestWeakSubjectivityEpoch(ctx, hs, params.BeaconConfig())
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "could not get weak subjectivity epoch: %v", err)
|
||||
}
|
||||
wsSlot, err := slots.EpochStart(wsEpoch)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "could not get weak subjectivity slot: %v", err)
|
||||
}
|
||||
cbr, err := bs.CanonicalHistory.BlockRootForSlot(ctx, wsSlot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("could not find highest block below slot %d", wsSlot))
|
||||
}
|
||||
cb, err := bs.BeaconDB.Block(ctx, cbr)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("block with root %#x from slot index %d not found in db", cbr, wsSlot))
|
||||
}
|
||||
stateRoot := cb.Block().StateRoot()
|
||||
log.Printf("weak subjectivity checkpoint reported as epoch=%d, block root=%#x, state root=%#x", wsEpoch, cbr, stateRoot)
|
||||
return ðpbv1.WeakSubjectivityResponse{
|
||||
Data: ðpbv1.WeakSubjectivityData{
|
||||
WsCheckpoint: ðpbv1.Checkpoint{
|
||||
Epoch: wsEpoch,
|
||||
Root: cbr[:],
|
||||
},
|
||||
StateRoot: stateRoot[:],
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
package beacon
|
||||
@@ -39,6 +39,10 @@ const (
|
||||
broadcastValidationConsensusAndEquivocation = "consensus_and_equivocation"
|
||||
)
|
||||
|
||||
var (
|
||||
errNilBlock = errors.New("nil block")
|
||||
)
|
||||
|
||||
type handled bool
|
||||
|
||||
// GetBlock retrieves block details for given block ID.
|
||||
@@ -1211,7 +1215,7 @@ func (s *Server) publishBlockSSZ(ctx context.Context, w http.ResponseWriter, r *
|
||||
http2.HandleError(w, "Could not read request body", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
denebBlockContents := ð.SignedBeaconBlockAndBlobsDeneb{}
|
||||
denebBlockContents := ð.SignedBeaconBlockDeneb{}
|
||||
if err := denebBlockContents.UnmarshalSSZ(body); err == nil {
|
||||
genericBlock := ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Deneb{
|
||||
@@ -1545,9 +1549,7 @@ func (s *Server) GetBlockRoot(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
response := &BlockRootResponse{
|
||||
Data: &struct {
|
||||
Root string `json:"root"`
|
||||
}{
|
||||
Data: &BlockRoot{
|
||||
Root: hexutil.Encode(root),
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits/mock"
|
||||
p2pMock "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
@@ -306,7 +305,7 @@ func TestSubmitAttestations(t *testing.T) {
|
||||
|
||||
s.SubmitAttestations(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &apimiddleware.IndexedVerificationFailureErrorJson{}
|
||||
e := &shared.IndexedVerificationFailureError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
require.Equal(t, 1, len(e.Failures))
|
||||
|
||||
@@ -35,7 +35,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
@@ -1083,7 +1082,7 @@ func TestPublishBlock(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
block, ok := req.Block.(*eth.GenericSignedBeaconBlock_Deneb)
|
||||
converted, err := shared.BeaconBlockDenebFromConsensus(block.Deneb.Block.Block)
|
||||
converted, err := shared.BeaconBlockDenebFromConsensus(block.Deneb.Block)
|
||||
require.NoError(t, err)
|
||||
var signedblock *shared.SignedBeaconBlockContentsDeneb
|
||||
err = json.Unmarshal([]byte(rpctesting.DenebBlockContents), &signedblock)
|
||||
@@ -1200,31 +1199,33 @@ func TestPublishBlockSSZ(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
_, ok := req.Block.(*eth.GenericSignedBeaconBlock_Deneb)
|
||||
return ok
|
||||
}))
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
// TODO: Fix this as part of beacon API changes
|
||||
|
||||
var dblock shared.SignedBeaconBlockContentsDeneb
|
||||
err := json.Unmarshal([]byte(rpctesting.DenebBlockContents), &dblock)
|
||||
require.NoError(t, err)
|
||||
genericBlock, err := dblock.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
v2block, err := migration.V1Alpha1SignedBeaconBlockDenebAndBlobsToV2(genericBlock.GetDeneb())
|
||||
require.NoError(t, err)
|
||||
sszvalue, err := v2block.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(sszvalue))
|
||||
request.Header.Set("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlock(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
//v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
//v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
// _, ok := req.Block.(*eth.GenericSignedBeaconBlock_Deneb)
|
||||
// return ok
|
||||
//}))
|
||||
//server := &Server{
|
||||
// V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
// SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
//}
|
||||
//
|
||||
//var dblock shared.SignedBeaconBlockContentsDeneb
|
||||
//err := json.Unmarshal([]byte(rpctesting.DenebBlockContents), &dblock)
|
||||
//require.NoError(t, err)
|
||||
//genericBlock, err := dblock.ToGeneric()
|
||||
//require.NoError(t, err)
|
||||
//v2block, err := migration.V1Alpha1SignedBeaconBlockDenebAndBlobsToV2(genericBlock.GetDeneb())
|
||||
//require.NoError(t, err)
|
||||
//sszvalue, err := v2block.MarshalSSZ()
|
||||
//require.NoError(t, err)
|
||||
//request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(sszvalue))
|
||||
//request.Header.Set("Accept", "application/octet-stream")
|
||||
//writer := httptest.NewRecorder()
|
||||
//writer.Body = &bytes.Buffer{}
|
||||
//server.PublishBlock(writer, request)
|
||||
//assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("invalid block", func(t *testing.T) {
|
||||
server := &Server{
|
||||
@@ -1455,33 +1456,34 @@ func TestPublishBlindedBlockSSZ(t *testing.T) {
|
||||
server.PublishBlindedBlock(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
_, ok := req.Block.(*eth.GenericSignedBeaconBlock_BlindedDeneb)
|
||||
return ok
|
||||
}))
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
var cblock shared.SignedBlindedBeaconBlockContentsDeneb
|
||||
err := json.Unmarshal([]byte(rpctesting.BlindedDenebBlockContents), &cblock)
|
||||
require.NoError(t, err)
|
||||
genericBlock, err := cblock.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
v1block, err := migration.V1Alpha1SignedBlindedBlockAndBlobsDenebToV2Blinded(genericBlock.GetBlindedDeneb())
|
||||
require.NoError(t, err)
|
||||
sszvalue, err := v1block.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(sszvalue))
|
||||
request.Header.Set("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlindedBlock(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
// TODO: update this test when updating beacon apis
|
||||
//t.Run("Deneb", func(t *testing.T) {
|
||||
// v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
// v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
// _, ok := req.Block.(*eth.GenericSignedBeaconBlock_BlindedDeneb)
|
||||
// return ok
|
||||
// }))
|
||||
// server := &Server{
|
||||
// V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
// SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
// }
|
||||
//
|
||||
// var cblock shared.SignedBlindedBeaconBlockContentsDeneb
|
||||
// err := json.Unmarshal([]byte(rpctesting.BlindedDenebBlockContents), &cblock)
|
||||
// require.NoError(t, err)
|
||||
// genericBlock, err := cblock.ToGeneric()
|
||||
// require.NoError(t, err)
|
||||
// v1block, err := migration.V1Alpha1SignedBlindedBlockAndBlobsDenebToV2Blinded(genericBlock.GetBlindedDeneb())
|
||||
// require.NoError(t, err)
|
||||
// sszvalue, err := v1block.MarshalSSZ()
|
||||
// require.NoError(t, err)
|
||||
// request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(sszvalue))
|
||||
// request.Header.Set("Accept", "application/octet-stream")
|
||||
// writer := httptest.NewRecorder()
|
||||
// writer.Body = &bytes.Buffer{}
|
||||
// server.PublishBlindedBlock(writer, request)
|
||||
// assert.Equal(t, http.StatusOK, writer.Code)
|
||||
//})
|
||||
t.Run("invalid block", func(t *testing.T) {
|
||||
server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
@@ -1595,7 +1597,7 @@ func TestPublishBlockV2(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
block, ok := req.Block.(*eth.GenericSignedBeaconBlock_Deneb)
|
||||
converted, err := shared.BeaconBlockDenebFromConsensus(block.Deneb.Block.Block)
|
||||
converted, err := shared.BeaconBlockDenebFromConsensus(block.Deneb.Block)
|
||||
require.NoError(t, err)
|
||||
var signedblock *shared.SignedBeaconBlockContentsDeneb
|
||||
err = json.Unmarshal([]byte(rpctesting.DenebBlockContents), &signedblock)
|
||||
@@ -1712,31 +1714,33 @@ func TestPublishBlockV2SSZ(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
_, ok := req.Block.(*eth.GenericSignedBeaconBlock_Deneb)
|
||||
return ok
|
||||
}))
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
// TODO: Fix this as part of beacon API changes
|
||||
|
||||
var dblock shared.SignedBeaconBlockContentsDeneb
|
||||
err := json.Unmarshal([]byte(rpctesting.DenebBlockContents), &dblock)
|
||||
require.NoError(t, err)
|
||||
genericBlock, err := dblock.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
v2block, err := migration.V1Alpha1SignedBeaconBlockDenebAndBlobsToV2(genericBlock.GetDeneb())
|
||||
require.NoError(t, err)
|
||||
sszvalue, err := v2block.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(sszvalue))
|
||||
request.Header.Set("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
//v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
//v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
// _, ok := req.Block.(*eth.GenericSignedBeaconBlock_Deneb)
|
||||
// return ok
|
||||
//}))
|
||||
//server := &Server{
|
||||
// V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
// SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
//}
|
||||
//
|
||||
//var dblock shared.SignedBeaconBlockContentsDeneb
|
||||
//err := json.Unmarshal([]byte(rpctesting.DenebBlockContents), &dblock)
|
||||
//require.NoError(t, err)
|
||||
//genericBlock, err := dblock.ToGeneric()
|
||||
//require.NoError(t, err)
|
||||
//v2block, err := migration.V1Alpha1SignedBeaconBlockDenebAndBlobsToV2(genericBlock.GetDeneb())
|
||||
//require.NoError(t, err)
|
||||
//sszvalue, err := v2block.MarshalSSZ()
|
||||
//require.NoError(t, err)
|
||||
//request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(sszvalue))
|
||||
//request.Header.Set("Accept", "application/octet-stream")
|
||||
//writer := httptest.NewRecorder()
|
||||
//writer.Body = &bytes.Buffer{}
|
||||
//server.PublishBlockV2(writer, request)
|
||||
//assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("invalid block", func(t *testing.T) {
|
||||
server := &Server{
|
||||
@@ -1967,33 +1971,34 @@ func TestPublishBlindedBlockV2SSZ(t *testing.T) {
|
||||
server.PublishBlindedBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
_, ok := req.Block.(*eth.GenericSignedBeaconBlock_BlindedDeneb)
|
||||
return ok
|
||||
}))
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
var cblock shared.SignedBlindedBeaconBlockContentsDeneb
|
||||
err := json.Unmarshal([]byte(rpctesting.BlindedDenebBlockContents), &cblock)
|
||||
require.NoError(t, err)
|
||||
genericBlock, err := cblock.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
v1block, err := migration.V1Alpha1SignedBlindedBlockAndBlobsDenebToV2Blinded(genericBlock.GetBlindedDeneb())
|
||||
require.NoError(t, err)
|
||||
sszvalue, err := v1block.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(sszvalue))
|
||||
request.Header.Set("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlindedBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
// TODO: update this test when updating beacon apis
|
||||
//t.Run("Deneb", func(t *testing.T) {
|
||||
// v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
// v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
// _, ok := req.Block.(*eth.GenericSignedBeaconBlock_BlindedDeneb)
|
||||
// return ok
|
||||
// }))
|
||||
// server := &Server{
|
||||
// V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
// SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
// }
|
||||
//
|
||||
// var cblock shared.SignedBlindedBeaconBlockContentsDeneb
|
||||
// err := json.Unmarshal([]byte(rpctesting.BlindedDenebBlockContents), &cblock)
|
||||
// require.NoError(t, err)
|
||||
// genericBlock, err := cblock.ToGeneric()
|
||||
// require.NoError(t, err)
|
||||
// v1block, err := migration.V1Alpha1SignedBlindedBlockAndBlobsDenebToV2Blinded(genericBlock.GetBlindedDeneb())
|
||||
// require.NoError(t, err)
|
||||
// sszvalue, err := v1block.MarshalSSZ()
|
||||
// require.NoError(t, err)
|
||||
// request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(sszvalue))
|
||||
// request.Header.Set("Accept", "application/octet-stream")
|
||||
// writer := httptest.NewRecorder()
|
||||
// writer.Body = &bytes.Buffer{}
|
||||
// server.PublishBlindedBlockV2(writer, request)
|
||||
// assert.Equal(t, http.StatusOK, writer.Code)
|
||||
//})
|
||||
t.Run("invalid block", func(t *testing.T) {
|
||||
server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
package beacon
|
||||
|
||||
import ethpbservice "github.com/prysmaticlabs/prysm/v4/proto/eth/service"
|
||||
|
||||
var _ ethpbservice.BeaconChainServer = (*Server)(nil)
|
||||
@@ -7,11 +7,13 @@ import (
|
||||
)
|
||||
|
||||
type BlockRootResponse struct {
|
||||
Data *struct {
|
||||
Root string `json:"root"`
|
||||
} `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data *BlockRoot `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type BlockRoot struct {
|
||||
Root string `json:"root"`
|
||||
}
|
||||
|
||||
type GetCommitteesResponse struct {
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"events.go",
|
||||
"server.go",
|
||||
"structs.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/events",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
@@ -16,20 +17,14 @@ go_library(
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/service:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//network/http:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_grpc_ecosystem_grpc_gateway_v2//proto/gateway:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/anypb:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -38,31 +33,18 @@ go_test(
|
||||
srcs = ["events_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/mock:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_golang_mock//gomock:go_default_library",
|
||||
"@com_github_grpc_ecosystem_grpc_gateway_v2//proto/gateway:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/anypb:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user