Compare commits

..

5 Commits

Author SHA1 Message Date
terence tsao
e9922bb1da Fix consensus_spec sha256 2024-01-18 15:11:37 -08:00
Aditya Asgaonkar
29ad26d4db run bazel gazelle 2024-01-18 14:59:02 -08:00
Aditya Asgaonkar
d14cc5633f update WORKSPACE for spec v1.4.0-beta.6 2024-01-18 13:40:56 -08:00
Aditya Asgaonkar
144f24f63f update tests 2024-01-18 13:40:56 -08:00
Aditya Asgaonkar
f5bdb4f544 implement confirmation rule prerequisite - f.c. filter changes 2024-01-18 13:40:56 -08:00
435 changed files with 6145 additions and 10949 deletions

View File

@@ -80,6 +80,7 @@ linters:
- thelper
- unparam
- varnamelen
- wastedassign
- wrapcheck
- wsl

View File

@@ -55,7 +55,7 @@ bazel build //beacon-chain --config=release
## Adding / updating dependencies
1. Add your dependency as you would with go modules. I.e. `go get ...`
1. Run `bazel run //:gazelle -- update-repos -from_file=go.mod` to update the bazel managed dependencies.
1. Run `gazelle update-repos -from_file=go.mod` to update the bazel managed dependencies.
Example:

View File

@@ -106,13 +106,6 @@ load("@rules_distroless//distroless:dependencies.bzl", "rules_distroless_depende
rules_distroless_dependencies()
http_archive(
name = "distroless",
integrity = "sha256-Cf00kUp1NyXA3LzbdyYy4Kda27wbkB8+A9MliTxq4jE=",
strip_prefix = "distroless-9dc924b9fe812eec2fa0061824dcad39eb09d0d6",
url = "https://github.com/GoogleContainerTools/distroless/archive/9dc924b9fe812eec2fa0061824dcad39eb09d0d6.tar.gz", # 2024-01-24
)
load("@aspect_bazel_lib//lib:repositories.bzl", "aspect_bazel_lib_dependencies", "aspect_bazel_lib_register_toolchains")
aspect_bazel_lib_dependencies()
@@ -151,10 +144,6 @@ http_archive(
],
)
load("//:distroless_deps.bzl", "distroless_deps")
distroless_deps()
# Override default import in rules_go with special patch until
# https://github.com/gogo/protobuf/pull/582 is merged.
git_repository(
@@ -234,9 +223,7 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.4.0-beta.7"
consensus_spec_test_version = "v1.4.0-beta.7-hotfix"
consensus_spec_version = "v1.4.0-beta.6"
bls_test_version = "v0.1.1"
@@ -252,8 +239,8 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "c282c0f86f23f3d2e0f71f5975769a4077e62a7e3c7382a16bd26a7e589811a0",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_test_version,
sha256 = "7dc467d7be97525c88a1d3683665c1354cc86297fd62009e7cf5000905b25652",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
http_archive(
@@ -268,8 +255,8 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "4649c35aa3b8eb0cfdc81bee7c05649f90ef36bede5b0513e1f2e8baf37d6033",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_test_version,
sha256 = "e163011254b6ce100205fb779ba660faedc9bc9f7bb4408c25746a7aa5e8d8bc",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
http_archive(
@@ -284,8 +271,8 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "c5a03f724f757456ffaabd2a899992a71d2baf45ee4db65ca3518f2b7ee928c8",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_test_version,
sha256 = "b73c81b6386053a2141f6f43b457489668621c7013f740ed93edf9ac0e34f091",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
http_archive(
@@ -299,7 +286,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "049c29267310e6b88280f4f834a75866c2f5b9036fa97acb9d9c6db8f64d9118",
sha256 = "47726c527512d03ef3e706a8e7f8d5db6a5f2153351db0470dab780f6a87c4dd",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)
@@ -330,9 +317,9 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "77e7e3ed65e33b7bb19d30131f4c2bb39e4dfeb188ab9ae84651c3cc7600131d",
strip_prefix = "eth2-networks-934c948e69205dcf2deb87e4ae6cc140c335f94d",
url = "https://github.com/eth-clients/eth2-networks/archive/934c948e69205dcf2deb87e4ae6cc140c335f94d.tar.gz",
sha256 = "2701e1e1a3ec10c673fe7dbdbbe6f02c8ae8c922aebbf6e720d8c72d5458aafe",
strip_prefix = "eth2-networks-7b4897888cebef23801540236f73123e21774954",
url = "https://github.com/eth-clients/eth2-networks/archive/7b4897888cebef23801540236f73123e21774954.tar.gz",
)
http_archive(
@@ -362,9 +349,9 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "5f4be6fd088683ea9db45c863b9c5a1884422449e5b59fd2d561d3ba0f73ffd9",
strip_prefix = "holesky-9d9aabf2d4de51334ee5fed6c79a4d55097d1a43",
url = "https://github.com/eth-clients/holesky/archive/9d9aabf2d4de51334ee5fed6c79a4d55097d1a43.tar.gz", # 2024-01-22
sha256 = "9f66d8d5644982d3d0d2e3d2b9ebe77a5f96638a5d7fcd715599c32818195cb3",
strip_prefix = "holesky-ea39b9006210848e13f28d92e12a30548cecd41d",
url = "https://github.com/eth-clients/holesky/archive/ea39b9006210848e13f28d92e12a30548cecd41d.tar.gz", # 2023-09-21
)
http_archive(

View File

@@ -12,8 +12,11 @@ go_library(
deps = [
"//api/client:go_default_library",
"//api/server:go_default_library",
"//api/server/structs:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/rpc/eth/beacon:go_default_library",
"//beacon-chain/rpc/eth/config:go_default_library",
"//beacon-chain/rpc/eth/shared:go_default_library",
"//beacon-chain/rpc/prysm/beacon:go_default_library",
"//beacon-chain/state:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",

View File

@@ -17,7 +17,10 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/api/server"
"github.com/prysmaticlabs/prysm/v4/api/server/structs"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/beacon"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/config"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
apibeacon "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/beacon"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/network/forks"
@@ -147,8 +150,8 @@ func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fo
if err != nil {
return nil, errors.Wrapf(err, "error requesting fork by state id = %s", stateId)
}
fr := &structs.Fork{}
dataWrapper := &struct{ Data *structs.Fork }{Data: fr}
fr := &shared.Fork{}
dataWrapper := &struct{ Data *shared.Fork }{Data: fr}
err = json.Unmarshal(body, dataWrapper)
if err != nil {
return nil, errors.Wrap(err, "error decoding json response in GetFork")
@@ -176,12 +179,12 @@ func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, er
}
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
func (c *Client) GetConfigSpec(ctx context.Context) (*structs.GetSpecResponse, error) {
func (c *Client) GetConfigSpec(ctx context.Context) (*config.GetSpecResponse, error) {
body, err := c.Get(ctx, getConfigSpecPath)
if err != nil {
return nil, errors.Wrap(err, "error requesting configSpecPath")
}
fsr := &structs.GetSpecResponse{}
fsr := &config.GetSpecResponse{}
err = json.Unmarshal(body, fsr)
if err != nil {
return nil, err
@@ -256,7 +259,7 @@ func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData
if err != nil {
return nil, err
}
v := &structs.GetWeakSubjectivityResponse{}
v := &apibeacon.GetWeakSubjectivityResponse{}
err = json.Unmarshal(body, v)
if err != nil {
return nil, err
@@ -282,7 +285,7 @@ func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData
// SubmitChangeBLStoExecution calls a beacon API endpoint to set the withdrawal addresses based on the given signed messages.
// If the API responds with something other than OK there will be failure messages associated to the corresponding request message.
func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*structs.SignedBLSToExecutionChange) error {
func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*shared.SignedBLSToExecutionChange) error {
u := c.BaseURL().ResolveReference(&url.URL{Path: changeBLStoExecutionPath})
body, err := json.Marshal(request)
if err != nil {
@@ -321,12 +324,12 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*stru
// GetBLStoExecutionChanges gets all the set withdrawal messages in the node's operation pool.
// Returns a struct representation of json response.
func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*structs.BLSToExecutionChangesPoolResponse, error) {
func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*beacon.BLSToExecutionChangesPoolResponse, error) {
body, err := c.Get(ctx, changeBLStoExecutionPath)
if err != nil {
return nil, err
}
poolResponse := &structs.BLSToExecutionChangesPoolResponse{}
poolResponse := &beacon.BLSToExecutionChangesPoolResponse{}
err = json.Unmarshal(body, poolResponse)
if err != nil {
return nil, err
@@ -335,7 +338,7 @@ func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*structs.BLSToEx
}
type forkScheduleResponse struct {
Data []structs.Fork
Data []shared.Fork
}
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {

View File

@@ -11,7 +11,7 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/builder",
visibility = ["//visibility:public"],
deps = [
"//api/server/structs:go_default_library",
"//beacon-chain/rpc/eth/shared:go_default_library",
"//config/fieldparams:go_default_library",
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
@@ -40,7 +40,7 @@ go_test(
data = glob(["testdata/**"]),
embed = [":go_default_library"],
deps = [
"//api/server/structs:go_default_library",
"//beacon-chain/rpc/eth/shared:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",

View File

@@ -165,7 +165,7 @@ func WrappedBuilderBidCapella(p *ethpb.BuilderBidCapella) (Bid, error) {
// Header returns the execution data interface.
func (b builderBidCapella) Header() (interfaces.ExecutionData, error) {
// We have to convert big endian to little endian because the value is coming from the execution layer.
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, blocks.PayloadValueToWei(b.p.Value))
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, blocks.PayloadValueToGwei(b.p.Value))
}
// BlobKzgCommitments --
@@ -249,7 +249,7 @@ func (b builderBidDeneb) HashTreeRootWith(hh *ssz.Hasher) error {
// Header --
func (b builderBidDeneb) Header() (interfaces.ExecutionData, error) {
// We have to convert big endian to little endian because the value is coming from the execution layer.
return blocks.WrappedExecutionPayloadHeaderDeneb(b.p.Header, blocks.PayloadValueToWei(b.p.Value))
return blocks.WrappedExecutionPayloadHeaderDeneb(b.p.Header, blocks.PayloadValueToGwei(b.p.Value))
}
// BlobKzgCommitments --

View File

@@ -6,7 +6,6 @@ import (
"encoding/json"
"fmt"
"io"
"math/big"
"net"
"net/http"
"net/url"
@@ -14,7 +13,7 @@ import (
"text/template"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api/server/structs"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
@@ -267,9 +266,9 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
tracing.AnnotateError(span, err)
return err
}
vs := make([]*structs.SignedValidatorRegistration, len(svr))
vs := make([]*shared.SignedValidatorRegistration, len(svr))
for i := 0; i < len(svr); i++ {
vs[i] = structs.SignedValidatorRegistrationFromConsensus(svr[i])
vs[i] = shared.SignedValidatorRegistrationFromConsensus(svr[i])
}
body, err := json.Marshal(vs)
if err != nil {
@@ -294,7 +293,7 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
if err != nil {
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
}
b, err := structs.SignedBlindedBeaconBlockBellatrixFromConsensus(&ethpb.SignedBlindedBeaconBlockBellatrix{Block: psb.Block, Signature: bytesutil.SafeCopyBytes(psb.Signature)})
b, err := shared.SignedBlindedBeaconBlockBellatrixFromConsensus(&ethpb.SignedBlindedBeaconBlockBellatrix{Block: psb.Block, Signature: bytesutil.SafeCopyBytes(psb.Signature)})
if err != nil {
return nil, nil, errors.Wrapf(err, "could not convert SignedBlindedBeaconBlockBellatrix to json marshalable type")
}
@@ -331,7 +330,7 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
if err != nil {
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
}
b, err := structs.SignedBlindedBeaconBlockCapellaFromConsensus(&ethpb.SignedBlindedBeaconBlockCapella{Block: psb.Block, Signature: bytesutil.SafeCopyBytes(psb.Signature)})
b, err := shared.SignedBlindedBeaconBlockCapellaFromConsensus(&ethpb.SignedBlindedBeaconBlockCapella{Block: psb.Block, Signature: bytesutil.SafeCopyBytes(psb.Signature)})
if err != nil {
return nil, nil, errors.Wrapf(err, "could not convert SignedBlindedBeaconBlockCapella to json marshalable type")
}
@@ -358,7 +357,7 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
if err != nil {
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
}
payload, err := blocks.WrappedExecutionPayloadCapella(p, big.NewInt(0))
payload, err := blocks.WrappedExecutionPayloadCapella(p, 0)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
}
@@ -368,7 +367,7 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
if err != nil {
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
}
b, err := structs.SignedBlindedBeaconBlockDenebFromConsensus(&ethpb.SignedBlindedBeaconBlockDeneb{Message: psb.Message, Signature: bytesutil.SafeCopyBytes(psb.Signature)})
b, err := shared.SignedBlindedBeaconBlockDenebFromConsensus(&ethpb.SignedBlindedBeaconBlockDeneb{Message: psb.Message, Signature: bytesutil.SafeCopyBytes(psb.Signature)})
if err != nil {
return nil, nil, errors.Wrapf(err, "could not convert SignedBlindedBeaconBlockDeneb to json marshalable type")
}
@@ -395,7 +394,7 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
if err != nil {
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
}
payload, err := blocks.WrappedExecutionPayloadDeneb(p, big.NewInt(0))
payload, err := blocks.WrappedExecutionPayloadDeneb(p, 0)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
}

View File

@@ -13,7 +13,7 @@ import (
"testing"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v4/api/server/structs"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
@@ -376,7 +376,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "deneb", r.Header.Get("Eth-Consensus-Version"))
var req structs.SignedBlindedBeaconBlockDeneb
var req shared.SignedBlindedBeaconBlockDeneb
err := json.NewDecoder(r.Body).Decode(&req)
require.NoError(t, err)
block, err := req.ToConsensus()

View File

@@ -13,7 +13,7 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v4/api/server/structs"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/math"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
@@ -38,7 +38,7 @@ func TestSignedValidatorRegistration_MarshalJSON(t *testing.T) {
},
Signature: make([]byte, 96),
}
a := structs.SignedValidatorRegistrationFromConsensus(svr)
a := shared.SignedValidatorRegistrationFromConsensus(svr)
je, err := json.Marshal(a)
require.NoError(t, err)
// decode with a struct w/ plain strings so we can check the string encoding of the hex fields
@@ -55,7 +55,7 @@ func TestSignedValidatorRegistration_MarshalJSON(t *testing.T) {
require.Equal(t, "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", un.Message.Pubkey)
t.Run("roundtrip", func(t *testing.T) {
b := &structs.SignedValidatorRegistration{}
b := &shared.SignedValidatorRegistration{}
if err := json.Unmarshal(je, b); err != nil {
require.NoError(t, err)
}
@@ -1718,7 +1718,7 @@ func TestUint256UnmarshalTooBig(t *testing.T) {
func TestMarshalBlindedBeaconBlockBodyBellatrix(t *testing.T) {
expected, err := os.ReadFile("testdata/blinded-block.json")
require.NoError(t, err)
b, err := structs.BlindedBeaconBlockBellatrixFromConsensus(&eth.BlindedBeaconBlockBellatrix{
b, err := shared.BlindedBeaconBlockBellatrixFromConsensus(&eth.BlindedBeaconBlockBellatrix{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
@@ -1748,7 +1748,7 @@ func TestMarshalBlindedBeaconBlockBodyBellatrix(t *testing.T) {
func TestMarshalBlindedBeaconBlockBodyCapella(t *testing.T) {
expected, err := os.ReadFile("testdata/blinded-block-capella.json")
require.NoError(t, err)
b, err := structs.BlindedBeaconBlockCapellaFromConsensus(&eth.BlindedBeaconBlockCapella{
b, err := shared.BlindedBeaconBlockCapellaFromConsensus(&eth.BlindedBeaconBlockCapella{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),

View File

@@ -1,7 +1,3 @@
package api
const (
WebUrlPrefix = "/v2/validator/"
WebApiUrlPrefix = "/api/v2/validator/"
KeymanagerApiPrefix = "/eth/v1"
)
const WebUrlPrefix = "/v2/validator/"

View File

@@ -1,50 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"block.go",
"conversions.go",
"conversions_block.go",
"conversions_state.go",
"endpoints_beacon.go",
"endpoints_blob.go",
"endpoints_builder.go",
"endpoints_config.go",
"endpoints_debug.go",
"endpoints_events.go",
"endpoints_lightclient.go",
"endpoints_node.go",
"endpoints_rewards.go",
"endpoints_validator.go",
"other.go",
"state.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/api/server/structs",
visibility = ["//visibility:public"],
deps = [
"//api/server:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/fieldparams:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//container/slice:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["conversions_test.go"],
embed = [":go_default_library"],
deps = [
"//proto/prysm/v1alpha1:go_default_library",
"//testing/require:go_default_library",
],
)

View File

@@ -1,26 +0,0 @@
package structs
import (
"testing"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func TestDepositSnapshotFromConsensus(t *testing.T) {
ds := &eth.DepositSnapshot{
Finalized: [][]byte{{0xde, 0xad, 0xbe, 0xef}, {0xca, 0xfe, 0xba, 0xbe}},
DepositRoot: []byte{0xab, 0xcd},
DepositCount: 12345,
ExecutionHash: []byte{0x12, 0x34},
ExecutionDepth: 67890,
}
res := DepositSnapshotFromConsensus(ds)
require.NotNil(t, res)
require.DeepEqual(t, []string{"0xdeadbeef", "0xcafebabe"}, res.Finalized)
require.Equal(t, "0xabcd", res.DepositRoot)
require.Equal(t, "12345", res.DepositCount)
require.Equal(t, "0x1234", res.ExecutionBlockHash)
require.Equal(t, "67890", res.ExecutionBlockHeight)
}

View File

@@ -1,14 +0,0 @@
package structs
type SidecarsResponse struct {
Data []*Sidecar `json:"data"`
}
type Sidecar struct {
Index string `json:"index"`
Blob string `json:"blob"`
SignedBeaconBlockHeader *SignedBeaconBlockHeader `json:"signed_block_header"`
KzgCommitment string `json:"kzg_commitment"`
KzgProof string `json:"kzg_proof"`
CommitmentInclusionProof []string `json:"kzg_commitment_inclusion_proof"`
}

View File

@@ -1,31 +0,0 @@
package structs
type LightClientBootstrapResponse struct {
Version string `json:"version"`
Data *LightClientBootstrap `json:"data"`
}
type LightClientBootstrap struct {
Header *BeaconBlockHeader `json:"header"`
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
CurrentSyncCommitteeBranch []string `json:"current_sync_committee_branch"`
}
type LightClientUpdate struct {
AttestedHeader *BeaconBlockHeader `json:"attested_header"`
NextSyncCommittee *SyncCommittee `json:"next_sync_committee,omitempty"`
FinalizedHeader *BeaconBlockHeader `json:"finalized_header,omitempty"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
NextSyncCommitteeBranch []string `json:"next_sync_committee_branch,omitempty"`
FinalityBranch []string `json:"finality_branch,omitempty"`
SignatureSlot string `json:"signature_slot"`
}
type LightClientUpdateWithVersion struct {
Version string `json:"version"`
Data *LightClientUpdate `json:"data"`
}
type LightClientUpdatesByRangeResponse struct {
Updates []*LightClientUpdateWithVersion `json:"updates"`
}

View File

@@ -557,9 +557,17 @@ func (s *Service) RecentBlockSlot(root [32]byte) (primitives.Slot, error) {
return s.cfg.ForkChoiceStore.Slot(root)
}
// inRegularSync queries the initial sync service to
// determine if the node is in regular sync or is still
// syncing to the head of the chain.
// inRegularSync applies the following heuristics to decide if the node is in
// regular sync mode vs init sync mode using only forkchoice.
// It checks that the highest received block is behind the current time by at least 2 epochs
// and that it was imported at least one epoch late if both of these
// tests pass then the node is in init sync. The caller of this function MUST
// have a lock on forkchoice
func (s *Service) inRegularSync() bool {
return s.cfg.SyncChecker.Synced()
currentSlot := s.CurrentSlot()
fc := s.cfg.ForkChoiceStore
if currentSlot-fc.HighestReceivedBlockSlot() < 2*params.BeaconConfig().SlotsPerEpoch {
return true
}
return fc.HighestReceivedBlockDelay() < params.BeaconConfig().SlotsPerEpoch
}

View File

@@ -593,3 +593,26 @@ func TestService_IsFinalized(t *testing.T) {
require.Equal(t, true, c.IsFinalized(ctx, br))
require.Equal(t, false, c.IsFinalized(ctx, [32]byte{'c'}))
}
func TestService_inRegularSync(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
require.Equal(t, false, c.inRegularSync())
c.SetGenesisTime(time.Now().Add(time.Second * time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot))))
st, blkRoot, err = prepareForkchoiceState(ctx, 128, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
require.Equal(t, false, c.inRegularSync())
c.SetGenesisTime(time.Now().Add(time.Second * time.Duration(-5*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot))))
require.Equal(t, true, c.inRegularSync())
c.SetGenesisTime(time.Now().Add(time.Second * time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot))))
c.cfg.ForkChoiceStore.SetGenesisTime(uint64(time.Now().Unix()))
require.Equal(t, true, c.inRegularSync())
}

View File

@@ -182,10 +182,6 @@ var (
Name: "chain_service_processing_milliseconds",
Help: "Total time to call a chain service in ReceiveBlock()",
})
dataAvailWaitedTime = promauto.NewSummary(prometheus.SummaryOpts{
Name: "da_waited_time_milliseconds",
Help: "Total time spent waiting for a data availability check in ReceiveBlock()",
})
processAttsElapsedTime = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "process_attestations_milliseconds",

View File

@@ -198,10 +198,3 @@ func WithBlobStorage(b *filesystem.BlobStorage) Option {
return nil
}
}
func WithSyncChecker(checker Checker) Option {
return func(s *Service) error {
s.cfg.SyncChecker = checker
return nil
}
}

View File

@@ -325,10 +325,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
}
// The proposer indices cache takes the target root for the previous
// epoch as key
if e > 0 {
e = e - 1
}
target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e)
target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e-1)
if err != nil {
log.WithError(err).Error("could not update proposer index state-root map")
return nil

View File

@@ -911,6 +911,7 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
name: "state older than Bellatrix, nil payload",
stateVersion: 1,
payload: nil,
errString: "attempted to wrap nil",
},
{
name: "state older than Bellatrix, empty payload",
@@ -939,6 +940,7 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
name: "state is Bellatrix, nil payload",
stateVersion: 2,
payload: nil,
errString: "attempted to wrap nil",
},
{
name: "state is Bellatrix, empty payload",
@@ -2046,9 +2048,7 @@ func TestFillMissingBlockPayloadId_PrepareAllPayloads(t *testing.T) {
// boost. It alters the genesisTime tracked by the store.
func driftGenesisTime(s *Service, slot, delay int64) {
offset := slot*int64(params.BeaconConfig().SecondsPerSlot) + delay
newTime := time.Unix(time.Now().Unix()-offset, 0)
s.SetGenesisTime(newTime)
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(newTime.Unix()))
s.SetGenesisTime(time.Unix(time.Now().Unix()-offset, 0))
}
func TestMissingIndices(t *testing.T) {

View File

@@ -122,7 +122,6 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
}
}
daWaitedTime := time.Since(daStartTime)
dataAvailWaitedTime.Observe(float64(daWaitedTime.Milliseconds()))
// Defragment the state before continuing block processing.
s.defragmentState(postState)

View File

@@ -93,13 +93,6 @@ type config struct {
BlockFetcher execution.POWBlockFetcher
FinalizedStateAtStartUp state.BeaconState
ExecutionEngineCaller execution.EngineCaller
SyncChecker Checker
}
// Checker is an interface used to determine if a node is in initial sync
// or regular sync.
type Checker interface {
Synced() bool
}
var ErrMissingClockSetter = errors.New("blockchain Service initialized without a startup.ClockSetter")

View File

@@ -6,7 +6,6 @@ import (
"testing"
"github.com/prysmaticlabs/prysm/v4/async/event"
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
@@ -119,7 +118,6 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
WithDepositCache(dc),
WithTrackedValidatorsCache(cache.NewTrackedValidatorsCache()),
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
WithSyncChecker(mock.MockChecker{}),
}
// append the variadic opts so they override the defaults by being processed afterwards
opts = append(defOpts, opts...)

View File

@@ -180,14 +180,6 @@ func (mon *MockOperationNotifier) OperationFeed() *event.Feed {
return mon.feed
}
// MockChecker is a mock sync checker.
type MockChecker struct{}
// Synced returns true.
func (_ MockChecker) Synced() bool {
return true
}
// ReceiveBlockInitialSync mocks ReceiveBlockInitialSync method in chain service.
func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, _ [32]byte) error {
if s.State == nil {

View File

@@ -2,7 +2,6 @@ package testing
import (
"context"
"math/big"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api/client/builder"
@@ -55,13 +54,13 @@ func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.
}
return w, nil, s.ErrSubmitBlindedBlock
case version.Capella:
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella, big.NewInt(0))
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella, 0)
if err != nil {
return nil, nil, errors.Wrap(err, "could not wrap capella payload")
}
return w, nil, s.ErrSubmitBlindedBlock
case version.Deneb:
w, err := blocks.WrappedExecutionPayloadDeneb(s.PayloadDeneb, big.NewInt(0))
w, err := blocks.WrappedExecutionPayloadDeneb(s.PayloadDeneb, 0)
if err != nil {
return nil, nil, errors.Wrap(err, "could not wrap deneb payload")
}

View File

@@ -796,7 +796,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
err = dc.InsertFinalizedDeposits(context.Background(), 4, [32]byte{}, 0)
require.NoError(t, err)
// Mimic finalized deposit trie fetch.
// Mimick finalized deposit trie fetch.
fd, err := dc.FinalizedDeposits(context.Background())
require.NoError(t, err)
deps := dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex(), nil)

View File

@@ -115,7 +115,6 @@ func (p *ProposerIndicesCache) IndicesFromCheckpoint(c forkchoicetypes.Checkpoin
root, ok := p.rootMap[c]
p.Unlock()
if !ok {
ProposerIndicesCacheMiss.Inc()
return emptyIndices, ok
}
return p.ProposerIndices(c.Epoch+1, root)

View File

@@ -37,69 +37,70 @@ func TestProposerCache_Set(t *testing.T) {
func TestProposerCache_CheckpointAndPrune(t *testing.T) {
cache := NewProposerIndicesCache()
indices := [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex{}
root := [32]byte{'a'}
cpRoot := [32]byte{'b'}
copy(indices[3:], []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6})
for i := 1; i < 10; i++ {
root := [32]byte{byte(i)}
cache.Set(primitives.Epoch(i), root, indices)
cpRoot := [32]byte{byte(i - 1)}
cache.SetCheckpoint(forkchoicetypes.Checkpoint{Epoch: primitives.Epoch(i - 1), Root: cpRoot}, root)
}
received, ok := cache.ProposerIndices(1, [32]byte{1})
received, ok := cache.ProposerIndices(1, root)
require.Equal(t, true, ok)
require.Equal(t, indices, received)
received, ok = cache.ProposerIndices(4, [32]byte{4})
received, ok = cache.ProposerIndices(4, root)
require.Equal(t, true, ok)
require.Equal(t, indices, received)
received, ok = cache.ProposerIndices(9, [32]byte{9})
received, ok = cache.ProposerIndices(9, root)
require.Equal(t, true, ok)
require.Equal(t, indices, received)
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{})
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 0, Root: cpRoot})
require.Equal(t, true, ok)
require.Equal(t, indices, received)
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{3}})
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 3, Root: cpRoot})
require.Equal(t, true, ok)
require.Equal(t, indices, received)
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 4, Root: [32]byte{4}})
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 4, Root: cpRoot})
require.Equal(t, true, ok)
require.Equal(t, indices, received)
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 8, Root: [32]byte{8}})
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 8, Root: cpRoot})
require.Equal(t, true, ok)
require.Equal(t, indices, received)
cache.Prune(5)
emptyIndices := [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex{}
received, ok = cache.ProposerIndices(1, [32]byte{1})
received, ok = cache.ProposerIndices(1, root)
require.Equal(t, false, ok)
require.Equal(t, emptyIndices, received)
received, ok = cache.ProposerIndices(4, [32]byte{4})
received, ok = cache.ProposerIndices(4, root)
require.Equal(t, false, ok)
require.Equal(t, emptyIndices, received)
received, ok = cache.ProposerIndices(9, [32]byte{9})
received, ok = cache.ProposerIndices(9, root)
require.Equal(t, true, ok)
require.Equal(t, indices, received)
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 0, Root: [32]byte{0}})
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 0, Root: cpRoot})
require.Equal(t, false, ok)
require.Equal(t, emptyIndices, received)
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{3}})
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 3, Root: cpRoot})
require.Equal(t, false, ok)
require.Equal(t, emptyIndices, received)
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 4, Root: [32]byte{4}})
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 4, Root: cpRoot})
require.Equal(t, true, ok)
require.Equal(t, indices, received)
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 8, Root: [32]byte{8}})
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 8, Root: cpRoot})
require.Equal(t, true, ok)
require.Equal(t, indices, received)
}

View File

@@ -1,7 +1,6 @@
package blocks_test
import (
"math/big"
"testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
@@ -610,7 +609,7 @@ func Test_ProcessPayloadCapella(t *testing.T) {
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
require.NoError(t, err)
payload.PrevRandao = random
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload, big.NewInt(0))
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload, 0)
require.NoError(t, err)
_, err = blocks.ProcessPayload(st, wrapped)
require.NoError(t, err)
@@ -874,7 +873,7 @@ func emptyPayloadHeaderCapella() (interfaces.ExecutionData, error) {
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: make([]byte, fieldparams.RootLength),
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
}, big.NewInt(0))
}, 0)
}
func emptyPayload() *enginev1.ExecutionPayload {

View File

@@ -1,7 +1,6 @@
package blocks_test
import (
"math/big"
"math/rand"
"testing"
@@ -643,10 +642,7 @@ func TestProcessBlindWithdrawals(t *testing.T) {
require.NoError(t, err)
wdRoot, err := ssz.WithdrawalSliceRoot(test.Args.Withdrawals, fieldparams.MaxWithdrawalsPerPayload)
require.NoError(t, err)
p, err := consensusblocks.WrappedExecutionPayloadHeaderCapella(
&enginev1.ExecutionPayloadHeaderCapella{WithdrawalsRoot: wdRoot[:]},
big.NewInt(0),
)
p, err := consensusblocks.WrappedExecutionPayloadHeaderCapella(&enginev1.ExecutionPayloadHeaderCapella{WithdrawalsRoot: wdRoot[:]}, 0)
require.NoError(t, err)
post, err := blocks.ProcessWithdrawals(st, p)
if test.Control.ExpectedError {
@@ -1064,7 +1060,7 @@ func TestProcessWithdrawals(t *testing.T) {
}
st, err := prepareValidators(spb, test.Args)
require.NoError(t, err)
p, err := consensusblocks.WrappedExecutionPayloadCapella(&enginev1.ExecutionPayloadCapella{Withdrawals: test.Args.Withdrawals}, big.NewInt(0))
p, err := consensusblocks.WrappedExecutionPayloadCapella(&enginev1.ExecutionPayloadCapella{Withdrawals: test.Args.Withdrawals}, 0)
require.NoError(t, err)
post, err := blocks.ProcessWithdrawals(st, p)
if test.Control.ExpectedError {

View File

@@ -50,6 +50,7 @@ go_test(
"attestation_test.go",
"beacon_committee_test.go",
"block_test.go",
"main_test.go",
"randao_test.go",
"rewards_penalties_test.go",
"shuffle_test.go",

View File

@@ -20,8 +20,6 @@ import (
func TestAttestation_IsAggregator(t *testing.T) {
t.Run("aggregator", func(t *testing.T) {
helpers.ClearCache()
beaconState, privKeys := util.DeterministicGenesisState(t, 100)
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, 0, 0)
require.NoError(t, err)
@@ -32,8 +30,6 @@ func TestAttestation_IsAggregator(t *testing.T) {
})
t.Run("not aggregator", func(t *testing.T) {
helpers.ClearCache()
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MinimalSpecConfig())
beaconState, privKeys := util.DeterministicGenesisState(t, 2048)
@@ -48,8 +44,6 @@ func TestAttestation_IsAggregator(t *testing.T) {
}
func TestAttestation_ComputeSubnetForAttestation(t *testing.T) {
helpers.ClearCache()
// Create 10 committees
committeeCount := uint64(10)
validatorCount := committeeCount * params.BeaconConfig().TargetCommitteeSize
@@ -210,8 +204,6 @@ func Test_ValidateAttestationTime(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
helpers.ClearCache()
err := helpers.ValidateAttestationTime(tt.args.attSlot, tt.args.genesisTime,
params.BeaconConfig().MaximumGossipClockDisparityDuration())
if tt.wantedErr != "" {
@@ -224,8 +216,6 @@ func Test_ValidateAttestationTime(t *testing.T) {
}
func TestVerifyCheckpointEpoch_Ok(t *testing.T) {
helpers.ClearCache()
// Genesis was 6 epochs ago exactly.
offset := params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot * 6)
genesis := time.Now().Add(-1 * time.Second * time.Duration(offset))
@@ -295,8 +285,6 @@ func TestValidateNilAttestation(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
helpers.ClearCache()
if tt.errString != "" {
require.ErrorContains(t, tt.errString, helpers.ValidateNilAttestation(tt.attestation))
} else {
@@ -338,8 +326,6 @@ func TestValidateSlotTargetEpoch(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
helpers.ClearCache()
if tt.errString != "" {
require.ErrorContains(t, tt.errString, helpers.ValidateSlotTargetEpoch(tt.attestation.Data))
} else {

View File

@@ -379,7 +379,7 @@ func UpdateCachedCheckpointToStateRoot(state state.ReadOnlyBeaconState, cp *fork
if cp.Epoch <= params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
return nil
}
slot, err := slots.EpochEnd(cp.Epoch)
slot, err := slots.EpochEnd(cp.Epoch - 1)
if err != nil {
return err
}

View File

@@ -21,8 +21,6 @@ import (
)
func TestComputeCommittee_WithoutCache(t *testing.T) {
ClearCache()
// Create 10 committees
committeeCount := uint64(10)
validatorCount := committeeCount * params.BeaconConfig().TargetCommitteeSize
@@ -73,8 +71,6 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
}
func TestComputeCommittee_RegressionTest(t *testing.T) {
ClearCache()
indices := []primitives.ValidatorIndex{1, 3, 8, 16, 18, 19, 20, 23, 30, 35, 43, 46, 47, 54, 56, 58, 69, 70, 71, 83, 84, 85, 91, 96, 100, 103, 105, 106, 112, 121, 127, 128, 129, 140, 142, 144, 146, 147, 149, 152, 153, 154, 157, 160, 173, 175, 180, 182, 188, 189, 191, 194, 201, 204, 217, 221, 226, 228, 230, 231, 239, 241, 249, 250, 255}
seed := [32]byte{68, 110, 161, 250, 98, 230, 161, 172, 227, 226, 99, 11, 138, 124, 201, 134, 38, 197, 0, 120, 6, 165, 122, 34, 19, 216, 43, 226, 210, 114, 165, 183}
index := uint64(215)
@@ -84,8 +80,6 @@ func TestComputeCommittee_RegressionTest(t *testing.T) {
}
func TestVerifyBitfieldLength_OK(t *testing.T) {
ClearCache()
bf := bitfield.Bitlist{0xFF, 0x01}
committeeSize := uint64(8)
assert.NoError(t, VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
@@ -97,7 +91,7 @@ func TestVerifyBitfieldLength_OK(t *testing.T) {
func TestCommitteeAssignments_CannotRetrieveFutureEpoch(t *testing.T) {
ClearCache()
defer ClearCache()
epoch := primitives.Epoch(1)
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Slot: 0, // Epoch 0.
@@ -109,7 +103,7 @@ func TestCommitteeAssignments_CannotRetrieveFutureEpoch(t *testing.T) {
func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
for i := 0; i < len(validators); i++ {
var activationEpoch primitives.Epoch
@@ -196,10 +190,10 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
},
}
defer ClearCache()
for i, tt := range tests {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
ClearCache()
validatorIndexToCommittee, proposerIndexToSlots, err := CommitteeAssignments(context.Background(), state, slots.ToEpoch(tt.slot))
require.NoError(t, err, "Failed to determine CommitteeAssignments")
cac := validatorIndexToCommittee[tt.index]
@@ -215,8 +209,6 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
}
func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
ClearCache()
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
for i := 0; i < len(validators); i++ {
@@ -247,8 +239,6 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
}
func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *testing.T) {
ClearCache()
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
for i := 0; i < len(validators); i++ {
@@ -269,7 +259,7 @@ func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *t
func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
ClearCache()
defer ClearCache()
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
for i := 0; i < len(validators); i++ {
@@ -390,9 +380,9 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
},
}
defer ClearCache()
for i, tt := range tests {
ClearCache()
require.NoError(t, state.SetSlot(tt.stateSlot))
err := VerifyAttestationBitfieldLengths(context.Background(), state, tt.attestation)
if tt.verificationFailure {
@@ -405,7 +395,7 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
ClearCache()
defer ClearCache()
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
validators := make([]*ethpb.Validator, validatorCount)
indices := make([]primitives.ValidatorIndex, validatorCount)
@@ -435,7 +425,7 @@ func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
func TestUpdateCommitteeCache_CanUpdateAcrossEpochs(t *testing.T) {
ClearCache()
defer ClearCache()
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
validators := make([]*ethpb.Validator, validatorCount)
indices := make([]primitives.ValidatorIndex, validatorCount)

View File

@@ -60,8 +60,6 @@ func TestBlockRootAtSlot_CorrectBlockRoot(t *testing.T) {
}
for i, tt := range tests {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
helpers.ClearCache()
s.Slot = tt.stateSlot
state, err := state_native.InitializeFromProtoPhase0(s)
require.NoError(t, err)
@@ -112,8 +110,6 @@ func TestBlockRootAtSlot_OutOfBounds(t *testing.T) {
},
}
for _, tt := range tests {
helpers.ClearCache()
state.Slot = tt.stateSlot
s, err := state_native.InitializeFromProtoPhase0(state)
require.NoError(t, err)

View File

@@ -0,0 +1,13 @@
package helpers
import (
"os"
"testing"
)
// run ClearCache before each test to prevent cross-test side effects
func TestMain(m *testing.M) {
ClearCache()
code := m.Run()
os.Exit(code)
}

View File

@@ -40,8 +40,6 @@ func TestRandaoMix_OK(t *testing.T) {
},
}
for _, test := range tests {
ClearCache()
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(test.epoch+1))))
mix, err := RandaoMix(state, test.epoch)
require.NoError(t, err)
@@ -76,8 +74,6 @@ func TestRandaoMix_CopyOK(t *testing.T) {
},
}
for _, test := range tests {
ClearCache()
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(test.epoch+1))))
mix, err := RandaoMix(state, test.epoch)
require.NoError(t, err)
@@ -92,8 +88,6 @@ func TestRandaoMix_CopyOK(t *testing.T) {
}
func TestGenerateSeed_OK(t *testing.T) {
ClearCache()
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
for i := 0; i < len(randaoMixes); i++ {
intInBytes := make([]byte, 32)

View File

@@ -14,8 +14,6 @@ import (
)
func TestTotalBalance_OK(t *testing.T) {
ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Validators: []*ethpb.Validator{
{EffectiveBalance: 27 * 1e9}, {EffectiveBalance: 28 * 1e9},
{EffectiveBalance: 32 * 1e9}, {EffectiveBalance: 40 * 1e9},
@@ -29,8 +27,6 @@ func TestTotalBalance_OK(t *testing.T) {
}
func TestTotalBalance_ReturnsEffectiveBalanceIncrement(t *testing.T) {
ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Validators: []*ethpb.Validator{}})
require.NoError(t, err)
@@ -51,8 +47,6 @@ func TestGetBalance_OK(t *testing.T) {
{i: 2, b: []uint64{0, 0, 0}},
}
for _, test := range tests {
ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Balances: test.b})
require.NoError(t, err)
assert.Equal(t, test.b[test.i], state.Balances()[test.i], "Incorrect Validator balance")
@@ -68,8 +62,6 @@ func TestTotalActiveBalance(t *testing.T) {
{10000},
}
for _, test := range tests {
ClearCache()
validators := make([]*ethpb.Validator, 0)
for i := 0; i < test.vCount; i++ {
validators = append(validators, &ethpb.Validator{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance, ExitEpoch: 1})
@@ -83,6 +75,8 @@ func TestTotalActiveBalance(t *testing.T) {
}
func TestTotalActiveBal_ReturnMin(t *testing.T) {
ClearCache()
defer ClearCache()
tests := []struct {
vCount int
}{
@@ -91,8 +85,6 @@ func TestTotalActiveBal_ReturnMin(t *testing.T) {
{10000},
}
for _, test := range tests {
ClearCache()
validators := make([]*ethpb.Validator, 0)
for i := 0; i < test.vCount; i++ {
validators = append(validators, &ethpb.Validator{EffectiveBalance: 1, ExitEpoch: 1})
@@ -106,6 +98,8 @@ func TestTotalActiveBal_ReturnMin(t *testing.T) {
}
func TestTotalActiveBalance_WithCache(t *testing.T) {
ClearCache()
defer ClearCache()
tests := []struct {
vCount int
wantCount int
@@ -115,8 +109,6 @@ func TestTotalActiveBalance_WithCache(t *testing.T) {
{vCount: 10000, wantCount: 10000},
}
for _, test := range tests {
ClearCache()
validators := make([]*ethpb.Validator, 0)
for i := 0; i < test.vCount; i++ {
validators = append(validators, &ethpb.Validator{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance, ExitEpoch: 1})
@@ -141,8 +133,6 @@ func TestIncreaseBalance_OK(t *testing.T) {
{i: 2, b: []uint64{27 * 1e9, 28 * 1e9, 32 * 1e9}, nb: 33 * 1e9, eb: 65 * 1e9},
}
for _, test := range tests {
ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: []*ethpb.Validator{
{EffectiveBalance: 4}, {EffectiveBalance: 4}, {EffectiveBalance: 4}},
@@ -167,8 +157,6 @@ func TestDecreaseBalance_OK(t *testing.T) {
{i: 3, b: []uint64{27 * 1e9, 28 * 1e9, 1, 28 * 1e9}, nb: 28 * 1e9, eb: 0},
}
for _, test := range tests {
ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: []*ethpb.Validator{
{EffectiveBalance: 4}, {EffectiveBalance: 4}, {EffectiveBalance: 4}, {EffectiveBalance: 3}},
@@ -181,8 +169,6 @@ func TestDecreaseBalance_OK(t *testing.T) {
}
func TestFinalityDelay(t *testing.T) {
ClearCache()
base := buildState(params.BeaconConfig().SlotsPerEpoch*10, 1)
base.FinalizedCheckpoint = &ethpb.Checkpoint{Epoch: 3}
beaconState, err := state_native.InitializeFromProtoPhase0(base)
@@ -213,8 +199,6 @@ func TestFinalityDelay(t *testing.T) {
}
func TestIsInInactivityLeak(t *testing.T) {
ClearCache()
base := buildState(params.BeaconConfig().SlotsPerEpoch*10, 1)
base.FinalizedCheckpoint = &ethpb.Checkpoint{Epoch: 3}
beaconState, err := state_native.InitializeFromProtoPhase0(base)
@@ -285,8 +269,6 @@ func TestIncreaseBadBalance_NotOK(t *testing.T) {
{i: 2, b: []uint64{math.MaxUint64, math.MaxUint64, math.MaxUint64}, nb: 33 * 1e9},
}
for _, test := range tests {
ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: []*ethpb.Validator{
{EffectiveBalance: 4}, {EffectiveBalance: 4}, {EffectiveBalance: 4}},

View File

@@ -13,8 +13,6 @@ import (
)
func TestShuffleList_InvalidValidatorCount(t *testing.T) {
ClearCache()
maxShuffleListSize = 20
list := make([]primitives.ValidatorIndex, 21)
if _, err := ShuffleList(list, [32]byte{123, 125}); err == nil {
@@ -25,8 +23,6 @@ func TestShuffleList_InvalidValidatorCount(t *testing.T) {
}
func TestShuffleList_OK(t *testing.T) {
ClearCache()
var list1 []primitives.ValidatorIndex
seed1 := [32]byte{1, 128, 12}
seed2 := [32]byte{2, 128, 12}
@@ -51,8 +47,6 @@ func TestShuffleList_OK(t *testing.T) {
}
func TestSplitIndices_OK(t *testing.T) {
ClearCache()
var l []uint64
numValidators := uint64(64000)
for i := uint64(0); i < numValidators; i++ {
@@ -67,8 +61,6 @@ func TestSplitIndices_OK(t *testing.T) {
}
func TestShuffleList_Vs_ShuffleIndex(t *testing.T) {
ClearCache()
var list []primitives.ValidatorIndex
listSize := uint64(1000)
seed := [32]byte{123, 42}
@@ -133,8 +125,6 @@ func BenchmarkShuffleList(b *testing.B) {
}
func TestShuffledIndex(t *testing.T) {
ClearCache()
var list []primitives.ValidatorIndex
listSize := uint64(399)
for i := primitives.ValidatorIndex(0); uint64(i) < listSize; i++ {
@@ -157,8 +147,6 @@ func TestShuffledIndex(t *testing.T) {
}
func TestSplitIndicesAndOffset_OK(t *testing.T) {
ClearCache()
var l []uint64
validators := uint64(64000)
for i := uint64(0); i < validators; i++ {

View File

@@ -18,7 +18,7 @@ import (
func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -49,7 +49,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -77,7 +77,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -105,7 +105,7 @@ func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -135,8 +135,6 @@ func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
}
func TestIsNextEpochSyncCommittee_UsingCommittee(t *testing.T) {
ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -163,8 +161,6 @@ func TestIsNextEpochSyncCommittee_UsingCommittee(t *testing.T) {
}
func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -192,7 +188,7 @@ func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -223,7 +219,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -264,7 +260,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -292,7 +288,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -322,8 +318,6 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
}
func TestNextEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -351,7 +345,7 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -378,8 +372,6 @@ func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
}
func TestUpdateSyncCommitteeCache_BadSlot(t *testing.T) {
ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Slot: 1,
})
@@ -396,8 +388,6 @@ func TestUpdateSyncCommitteeCache_BadSlot(t *testing.T) {
}
func TestUpdateSyncCommitteeCache_BadRoot(t *testing.T) {
ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Slot: primitives.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*params.BeaconConfig().SlotsPerEpoch - 1,
LatestBlockHeader: &ethpb.BeaconBlockHeader{StateRoot: params.BeaconConfig().ZeroHash[:]},
@@ -409,7 +399,7 @@ func TestUpdateSyncCommitteeCache_BadRoot(t *testing.T) {
func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),

View File

@@ -179,6 +179,8 @@ func TestIsSlashableValidator_OK(t *testing.T) {
func TestBeaconProposerIndex_OK(t *testing.T) {
params.SetupTestConfigCleanup(t)
ClearCache()
defer ClearCache()
c := params.BeaconConfig()
c.MinGenesisActiveValidatorCount = 16384
params.OverrideBeaconConfig(c)
@@ -222,9 +224,9 @@ func TestBeaconProposerIndex_OK(t *testing.T) {
},
}
defer ClearCache()
for _, tt := range tests {
ClearCache()
require.NoError(t, state.SetSlot(tt.slot))
result, err := BeaconProposerIndex(context.Background(), state)
require.NoError(t, err, "Failed to get shard and committees at slot")
@@ -233,9 +235,9 @@ func TestBeaconProposerIndex_OK(t *testing.T) {
}
func TestBeaconProposerIndex_BadState(t *testing.T) {
ClearCache()
params.SetupTestConfigCleanup(t)
ClearCache()
defer ClearCache()
c := params.BeaconConfig()
c.MinGenesisActiveValidatorCount = 16384
params.OverrideBeaconConfig(c)
@@ -266,8 +268,6 @@ func TestBeaconProposerIndex_BadState(t *testing.T) {
}
func TestComputeProposerIndex_Compatibility(t *testing.T) {
ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
@@ -309,16 +309,12 @@ func TestComputeProposerIndex_Compatibility(t *testing.T) {
}
func TestDelayedActivationExitEpoch_OK(t *testing.T) {
ClearCache()
epoch := primitives.Epoch(9999)
wanted := epoch + 1 + params.BeaconConfig().MaxSeedLookahead
assert.Equal(t, wanted, ActivationExitEpoch(epoch))
}
func TestActiveValidatorCount_Genesis(t *testing.T) {
ClearCache()
c := 1000
validators := make([]*ethpb.Validator, c)
for i := 0; i < len(validators); i++ {
@@ -352,6 +348,7 @@ func TestChurnLimit_OK(t *testing.T) {
{validatorCount: 1000000, wantedChurn: 15 /* validatorCount/churnLimitQuotient */},
{validatorCount: 2000000, wantedChurn: 30 /* validatorCount/churnLimitQuotient */},
}
defer ClearCache()
for _, test := range tests {
ClearCache()
@@ -385,6 +382,9 @@ func TestChurnLimitDeneb_OK(t *testing.T) {
{1000000, params.BeaconConfig().MaxPerEpochActivationChurnLimit},
{2000000, params.BeaconConfig().MaxPerEpochActivationChurnLimit},
}
defer ClearCache()
for _, test := range tests {
ClearCache()
@@ -417,7 +417,7 @@ func TestChurnLimitDeneb_OK(t *testing.T) {
// Test basic functionality of ActiveValidatorIndices without caching. This test will need to be
// rewritten when releasing some cache flag.
func TestActiveValidatorIndices(t *testing.T) {
//farFutureEpoch := params.BeaconConfig().FarFutureEpoch
farFutureEpoch := params.BeaconConfig().FarFutureEpoch
type args struct {
state *ethpb.BeaconState
epoch primitives.Epoch
@@ -428,7 +428,7 @@ func TestActiveValidatorIndices(t *testing.T) {
want []primitives.ValidatorIndex
wantedErr string
}{
/*{
{
name: "all_active_epoch_10",
args: args{
state: &ethpb.BeaconState{
@@ -559,7 +559,7 @@ func TestActiveValidatorIndices(t *testing.T) {
epoch: 10,
},
want: []primitives.ValidatorIndex{0, 2, 3},
},*/
},
{
name: "impossible_zero_validators", // Regression test for issue #13051
args: args{
@@ -569,21 +569,22 @@ func TestActiveValidatorIndices(t *testing.T) {
},
epoch: 10,
},
wantedErr: "state has nil validator slice",
wantedErr: "no active validator indices",
},
}
defer ClearCache()
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ClearCache()
s, err := state_native.InitializeFromProtoPhase0(tt.args.state)
require.NoError(t, err)
require.NoError(t, s.SetValidators(tt.args.state.Validators))
got, err := ActiveValidatorIndices(context.Background(), s, tt.args.epoch)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
return
}
assert.DeepEqual(t, tt.want, got, "ActiveValidatorIndices()")
ClearCache()
})
}
}
@@ -684,8 +685,6 @@ func TestComputeProposerIndex(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ClearCache()
bState := &ethpb.BeaconState{Validators: tt.args.validators}
stTrie, err := state_native.InitializeFromProtoUnsafePhase0(bState)
require.NoError(t, err)
@@ -718,8 +717,6 @@ func TestIsEligibleForActivationQueue(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ClearCache()
assert.Equal(t, tt.want, IsEligibleForActivationQueue(tt.validator), "IsEligibleForActivationQueue()")
})
}
@@ -747,8 +744,6 @@ func TestIsIsEligibleForActivation(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ClearCache()
s, err := state_native.InitializeFromProtoPhase0(tt.state)
require.NoError(t, err)
assert.Equal(t, tt.want, IsEligibleForActivation(s, tt.validator), "IsEligibleForActivation()")
@@ -787,8 +782,6 @@ func computeProposerIndexWithValidators(validators []*ethpb.Validator, activeInd
}
func TestLastActivatedValidatorIndex_OK(t *testing.T) {
ClearCache()
beaconState, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{})
require.NoError(t, err)
@@ -812,8 +805,6 @@ func TestLastActivatedValidatorIndex_OK(t *testing.T) {
}
func TestProposerIndexFromCheckpoint(t *testing.T) {
ClearCache()
e := primitives.Epoch(2)
r := [32]byte{'a'}
root := [32]byte{'b'}

View File

@@ -202,14 +202,3 @@ func ParseWeakSubjectivityInputString(wsCheckpointString string) (*v1alpha1.Chec
Root: bRoot,
}, nil
}
// MinEpochsForBlockRequests computes the number of epochs of block history that we need to maintain,
// relative to the current epoch, per the p2p specs. This is used to compute the slot where backfill is complete.
// value defined:
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#configuration
// MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2 (= 33024, ~5 months)
// detailed rationale: https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
func MinEpochsForBlockRequests() primitives.Epoch {
return params.BeaconConfig().MinValidatorWithdrawabilityDelay +
primitives.Epoch(params.BeaconConfig().ChurnLimitQuotient/2)
}

View File

@@ -48,7 +48,6 @@ func TestWeakSubjectivity_ComputeWeakSubjectivityPeriod(t *testing.T) {
t.Run(fmt.Sprintf("valCount: %d, avgBalance: %d", tt.valCount, tt.avgBalance), func(t *testing.T) {
// Reset committee cache - as we need to recalculate active validator set for each test.
helpers.ClearCache()
got, err := helpers.ComputeWeakSubjectivityPeriod(context.Background(), genState(t, tt.valCount, tt.avgBalance), params.BeaconConfig())
require.NoError(t, err)
assert.Equal(t, tt.want, got, "valCount: %v, avgBalance: %v", tt.valCount, tt.avgBalance)
@@ -178,8 +177,6 @@ func TestWeakSubjectivity_IsWithinWeakSubjectivityPeriod(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
helpers.ClearCache()
sr, _, e := tt.genWsCheckpoint()
got, err := helpers.IsWithinWeakSubjectivityPeriod(context.Background(), tt.epoch, tt.genWsState(), sr, e, params.BeaconConfig())
if tt.wantedErr != "" {
@@ -250,8 +247,6 @@ func TestWeakSubjectivity_ParseWeakSubjectivityInputString(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
helpers.ClearCache()
wsCheckpt, err := helpers.ParseWeakSubjectivityInputString(tt.input)
if tt.wantedErr != "" {
require.ErrorContains(t, tt.wantedErr, err)
@@ -286,21 +281,3 @@ func genState(t *testing.T, valCount, avgBalance uint64) state.BeaconState {
return beaconState
}
func TestMinEpochsForBlockRequests(t *testing.T) {
helpers.ClearCache()
params.SetActiveTestCleanup(t, params.MainnetConfig())
var expected primitives.Epoch = 33024
// expected value of 33024 via spec commentary:
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
// MIN_EPOCHS_FOR_BLOCK_REQUESTS is calculated using the arithmetic from compute_weak_subjectivity_period found in the weak subjectivity guide. Specifically to find this max epoch range, we use the worst case event of a very large validator size (>= MIN_PER_EPOCH_CHURN_LIMIT * CHURN_LIMIT_QUOTIENT).
//
// MIN_EPOCHS_FOR_BLOCK_REQUESTS = (
// MIN_VALIDATOR_WITHDRAWABILITY_DELAY
// + MAX_SAFETY_DECAY * CHURN_LIMIT_QUOTIENT // (2 * 100)
// )
//
// Where MAX_SAFETY_DECAY = 100 and thus MIN_EPOCHS_FOR_BLOCK_REQUESTS = 33024 (~5 months).
require.Equal(t, expected, helpers.MinEpochsForBlockRequests())
}

View File

@@ -22,6 +22,9 @@ var ErrNotFoundOriginBlockRoot = kv.ErrNotFoundOriginBlockRoot
// ErrNotFoundBackfillBlockRoot wraps ErrNotFound for an error specific to the backfill block root.
var ErrNotFoundBackfillBlockRoot = kv.ErrNotFoundBackfillBlockRoot
// ErrNotFoundGenesisBlockRoot means no genesis block root was found, indicating the db was not initialized with genesis
var ErrNotFoundGenesisBlockRoot = kv.ErrNotFoundGenesisBlockRoot
// IsNotFound allows callers to treat errors from a flat-file database, where the file record is missing,
// as equivalent to db.ErrNotFound.
func IsNotFound(err error) bool {

View File

@@ -13,11 +13,9 @@ go_library(
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/slasher/types:go_default_library",
"//beacon-chain/state:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//monitoring/backup:go_default_library",
"//proto/dbval:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
],

View File

@@ -11,11 +11,9 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filters"
slashertypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/slasher/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/monitoring/backup"
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
@@ -59,7 +57,7 @@ type ReadOnlyDatabase interface {
// origin checkpoint sync support
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
BackfillStatus(context.Context) (*dbval.BackfillStatus, error)
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
}
// NoHeadAccessDatabase defines a struct without access to chain head data.
@@ -70,7 +68,6 @@ type NoHeadAccessDatabase interface {
DeleteBlock(ctx context.Context, root [32]byte) error
SaveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) error
SaveBlocks(ctx context.Context, blocks []interfaces.ReadOnlySignedBeaconBlock) error
SaveROBlocks(ctx context.Context, blks []blocks.ROBlock, cache bool) error
SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) error
// State related methods.
SaveState(ctx context.Context, state state.ReadOnlyBeaconState, blockRoot [32]byte) error
@@ -109,10 +106,9 @@ type HeadAccessDatabase interface {
SaveGenesisData(ctx context.Context, state state.BeaconState) error
EnsureEmbeddedGenesis(ctx context.Context) error
// Support for checkpoint sync and backfill.
// initialization method needed for origin checkpoint sync
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
SaveBackfillStatus(context.Context, *dbval.BackfillStatus) error
BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBlock, finalizedChildRoot [32]byte) error
SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error
}
// SlasherDatabase interface for persisting data related to detecting slashable offenses on Ethereum.

View File

@@ -4,7 +4,6 @@ go_library(
name = "go_default_library",
srcs = [
"archived_point.go",
"backfill.go",
"backup.go",
"blocks.go",
"checkpoint.go",
@@ -49,7 +48,6 @@ go_library(
"//io/file:go_default_library",
"//monitoring/progress:go_default_library",
"//monitoring/tracing:go_default_library",
"//proto/dbval:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time:go_default_library",
@@ -75,7 +73,6 @@ go_test(
name = "go_default_test",
srcs = [
"archived_point_test.go",
"backfill_test.go",
"backup_test.go",
"blocks_test.go",
"checkpoint_test.go",
@@ -110,7 +107,6 @@ go_test(
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/dbval:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/testing:go_default_library",

View File

@@ -1,44 +0,0 @@
package kv
import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
"google.golang.org/protobuf/proto"
)
// SaveBackfillStatus encodes the given BackfillStatus protobuf struct and writes it to a single key in the db.
// This value is used by the backfill service to keep track of the range of blocks that need to be synced. It is also used by the
// code that serves blocks or regenerates states to keep track of what range of blocks are available.
func (s *Store) SaveBackfillStatus(ctx context.Context, bf *dbval.BackfillStatus) error {
_, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillStatus")
defer span.End()
bfb, err := proto.Marshal(bf)
if err != nil {
return err
}
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(blocksBucket)
return bucket.Put(backfillStatusKey, bfb)
})
}
// BackfillStatus retrieves the most recently saved version of the BackfillStatus protobuf struct.
// This is used to persist information about backfill status across restarts.
func (s *Store) BackfillStatus(ctx context.Context) (*dbval.BackfillStatus, error) {
_, span := trace.StartSpan(ctx, "BeaconDB.BackfillStatus")
defer span.End()
bf := &dbval.BackfillStatus{}
err := s.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(blocksBucket)
bs := bucket.Get(backfillStatusKey)
if len(bs) == 0 {
return errors.Wrap(ErrNotFound, "BackfillStatus not found")
}
return proto.Unmarshal(bs, bf)
})
return bf, err
}

View File

@@ -1,35 +0,0 @@
package kv
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"google.golang.org/protobuf/proto"
)
func TestBackfillRoundtrip(t *testing.T) {
db := setupDB(t)
b := &dbval.BackfillStatus{}
b.LowSlot = 23
b.LowRoot = bytesutil.PadTo([]byte("low"), 32)
b.LowParentRoot = bytesutil.PadTo([]byte("parent"), 32)
m, err := proto.Marshal(b)
require.NoError(t, err)
ub := &dbval.BackfillStatus{}
require.NoError(t, proto.Unmarshal(m, ub))
require.Equal(t, b.LowSlot, ub.LowSlot)
require.DeepEqual(t, b.LowRoot, ub.LowRoot)
require.DeepEqual(t, b.LowParentRoot, ub.LowParentRoot)
ctx := context.Background()
require.NoError(t, db.SaveBackfillStatus(ctx, b))
dbub, err := db.BackfillStatus(ctx)
require.NoError(t, err)
require.Equal(t, b.LowSlot, dbub.LowSlot)
require.DeepEqual(t, b.LowRoot, dbub.LowRoot)
require.DeepEqual(t, b.LowParentRoot, dbub.LowParentRoot)
}

View File

@@ -70,6 +70,25 @@ func (s *Store) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
return root, err
}
// BackfillBlockRoot keeps track of the highest block available before the OriginCheckpointBlockRoot
func (s *Store) BackfillBlockRoot(ctx context.Context) ([32]byte, error) {
_, span := trace.StartSpan(ctx, "BeaconDB.BackfillBlockRoot")
defer span.End()
var root [32]byte
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket)
rootSlice := bkt.Get(backfillBlockRootKey)
if len(rootSlice) == 0 {
return ErrNotFoundBackfillBlockRoot
}
root = bytesutil.ToBytes32(rootSlice)
return nil
})
return root, err
}
// HeadBlock returns the latest canonical block in the Ethereum Beacon Chain.
func (s *Store) HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.HeadBlock")
@@ -273,95 +292,55 @@ func (s *Store) SaveBlocks(ctx context.Context, blks []interfaces.ReadOnlySigned
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlocks")
defer span.End()
robs := make([]blocks.ROBlock, len(blks))
for i := range blks {
rb, err := blocks.NewROBlock(blks[i])
// Performing marshaling, hashing, and indexing outside the bolt transaction
// to minimize the time we hold the DB lock.
blockRoots := make([][]byte, len(blks))
encodedBlocks := make([][]byte, len(blks))
indicesForBlocks := make([]map[string][]byte, len(blks))
for i, blk := range blks {
blockRoot, err := blk.Block().HashTreeRoot()
if err != nil {
return errors.Wrapf(err, "failed to make an ROBlock for a block in SaveBlocks")
return err
}
robs[i] = rb
}
return s.SaveROBlocks(ctx, robs, true)
}
type blockBatchEntry struct {
root []byte
block interfaces.ReadOnlySignedBeaconBlock
enc []byte
updated bool
indices map[string][]byte
}
func prepareBlockBatch(blks []blocks.ROBlock, shouldBlind bool) ([]blockBatchEntry, error) {
batch := make([]blockBatchEntry, len(blks))
for i := range blks {
batch[i].root, batch[i].block = blks[i].RootSlice(), blks[i].ReadOnlySignedBeaconBlock
batch[i].indices = blockIndices(batch[i].block.Block().Slot(), batch[i].block.Block().ParentRoot())
if shouldBlind {
blinded, err := batch[i].block.ToBlinded()
if err != nil {
if !errors.Is(err, blocks.ErrUnsupportedVersion) {
return nil, errors.Wrapf(err, "could not convert block to blinded format for root %#x", batch[i].root)
}
// Pre-deneb blocks give ErrUnsupportedVersion; use the full block already in the batch entry.
} else {
batch[i].block = blinded
}
}
enc, err := encodeBlock(batch[i].block)
enc, err := s.marshalBlock(ctx, blk)
if err != nil {
return nil, errors.Wrapf(err, "failed to encode block for root %#x", batch[i].root)
return err
}
batch[i].enc = enc
blockRoots[i] = blockRoot[:]
encodedBlocks[i] = enc
indicesByBucket := createBlockIndicesFromBlock(ctx, blk.Block())
indicesForBlocks[i] = indicesByBucket
}
return batch, nil
}
func (s *Store) SaveROBlocks(ctx context.Context, blks []blocks.ROBlock, cache bool) error {
shouldBlind, err := s.shouldSaveBlinded(ctx)
saveBlinded, err := s.shouldSaveBlinded(ctx)
if err != nil {
return err
}
// Precompute expensive values outside the db transaction.
batch, err := prepareBlockBatch(blks, shouldBlind)
if err != nil {
return errors.Wrap(err, "failed to encode all blocks in batch for saving to the db")
}
err = s.db.Update(func(tx *bolt.Tx) error {
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket)
for i := range batch {
if exists := bkt.Get(batch[i].root); exists != nil {
for i, blk := range blks {
if existingBlock := bkt.Get(blockRoots[i]); existingBlock != nil {
continue
}
if err := bkt.Put(batch[i].root, batch[i].enc); err != nil {
return errors.Wrapf(err, "could write block to db with root %#x", batch[i].root)
if err := updateValueForIndices(ctx, indicesForBlocks[i], blockRoots[i], tx); err != nil {
return errors.Wrap(err, "could not update DB indices")
}
if err := updateValueForIndices(ctx, batch[i].indices, batch[i].root, tx); err != nil {
return errors.Wrapf(err, "could not update DB indices for root %#x", batch[i].root)
if saveBlinded {
blindedBlock, err := blk.ToBlinded()
if err != nil {
if !errors.Is(err, blocks.ErrUnsupportedVersion) {
return err
}
} else {
blk = blindedBlock
}
}
s.blockCache.Set(string(blockRoots[i]), blk, int64(len(encodedBlocks[i])))
if err := bkt.Put(blockRoots[i], encodedBlocks[i]); err != nil {
return err
}
batch[i].updated = true
}
return nil
})
if !cache {
return err
}
for i := range batch {
if batch[i].updated {
s.blockCache.Set(string(batch[i].root), batch[i].block, int64(len(batch[i].enc)))
}
}
return err
}
// blockIndices takes in a beacon block and returns
// a map of bolt DB index buckets corresponding to each particular key for indices for
// data, such as (shard indices bucket -> shard 5).
func blockIndices(slot primitives.Slot, parentRoot [32]byte) map[string][]byte {
return map[string][]byte{
string(blockSlotIndicesBucket): bytesutil.SlotToBytesBigEndian(slot),
string(blockParentRootIndicesBucket): parentRoot[:],
}
}
// SaveHeadBlockRoot to the db.
@@ -438,6 +417,17 @@ func (s *Store) SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32
})
}
// SaveBackfillBlockRoot is used to keep track of the most recently backfilled block root when
// the node was initialized via checkpoint sync.
func (s *Store) SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error {
_, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillBlockRoot")
defer span.End()
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(blocksBucket)
return bucket.Put(backfillBlockRootKey, blockRoot[:])
})
}
// HighestRootsBelowSlot returns roots from the database slot index from the highest slot below the input slot.
// The slot value at the beginning of the return list is the slot where the roots were found. This is helpful so that
// calling code can make decisions based on the slot without resolving the blocks to discover their slot (for instance
@@ -736,6 +726,31 @@ func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot primitives.Slot) ([
return [][32]byte{}, nil
}
// createBlockIndicesFromBlock takes in a beacon block and returns
// a map of bolt DB index buckets corresponding to each particular key for indices for
// data, such as (shard indices bucket -> shard 5).
func createBlockIndicesFromBlock(ctx context.Context, block interfaces.ReadOnlyBeaconBlock) map[string][]byte {
_, span := trace.StartSpan(ctx, "BeaconDB.createBlockIndicesFromBlock")
defer span.End()
indicesByBucket := make(map[string][]byte)
// Every index has a unique bucket for fast, binary-search
// range scans for filtering across keys.
buckets := [][]byte{
blockSlotIndicesBucket,
}
indices := [][]byte{
bytesutil.SlotToBytesBigEndian(block.Slot()),
}
buckets = append(buckets, blockParentRootIndicesBucket)
parentRoot := block.ParentRoot()
indices = append(indices, parentRoot[:])
for i := 0; i < len(buckets); i++ {
indicesByBucket[string(buckets[i])] = indices[i]
}
return indicesByBucket
}
// createBlockFiltersFromIndices takes in filter criteria and returns
// a map with a single key-value pair: "block-parent-root-indices” -> parentRoot (array of bytes).
//
@@ -823,44 +838,74 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.ReadOnlySignedBea
return blocks.NewSignedBeaconBlock(rawBlock)
}
func encodeBlock(blk interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
key, err := keyForBlock(blk)
func (s *Store) marshalBlock(
ctx context.Context,
blk interfaces.ReadOnlySignedBeaconBlock,
) ([]byte, error) {
shouldBlind, err := s.shouldSaveBlinded(ctx)
if err != nil {
return nil, errors.Wrap(err, "could not determine version encoding key for block")
return nil, err
}
enc, err := blk.MarshalSSZ()
if err != nil {
return nil, errors.Wrap(err, "could not marshal block")
if shouldBlind {
return marshalBlockBlinded(ctx, blk)
}
dbfmt := make([]byte, len(key)+len(enc))
if len(key) > 0 {
copy(dbfmt, key)
}
copy(dbfmt[len(key):], enc)
return snappy.Encode(nil, dbfmt), nil
return marshalBlockFull(ctx, blk)
}
func keyForBlock(blk interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
// Encodes a full beacon block to the DB with its associated key.
func marshalBlockFull(
_ context.Context,
blk interfaces.ReadOnlySignedBeaconBlock,
) ([]byte, error) {
var encodedBlock []byte
var err error
encodedBlock, err = blk.MarshalSSZ()
if err != nil {
return nil, err
}
switch blk.Version() {
case version.Deneb:
if blk.IsBlinded() {
return denebBlindKey, nil
}
return denebKey, nil
return snappy.Encode(nil, append(denebKey, encodedBlock...)), nil
case version.Capella:
if blk.IsBlinded() {
return capellaBlindKey, nil
}
return capellaKey, nil
return snappy.Encode(nil, append(capellaKey, encodedBlock...)), nil
case version.Bellatrix:
if blk.IsBlinded() {
return bellatrixBlindKey, nil
}
return bellatrixKey, nil
return snappy.Encode(nil, append(bellatrixKey, encodedBlock...)), nil
case version.Altair:
return altairKey, nil
return snappy.Encode(nil, append(altairKey, encodedBlock...)), nil
case version.Phase0:
return nil, nil
return snappy.Encode(nil, encodedBlock), nil
default:
return nil, errors.New("unknown block version")
}
}
// Encodes a blinded beacon block with its associated key.
// If the block does not support blinding, we then encode it as a full
// block with its associated key by calling marshalBlockFull.
func marshalBlockBlinded(
ctx context.Context,
blk interfaces.ReadOnlySignedBeaconBlock,
) ([]byte, error) {
blindedBlock, err := blk.ToBlinded()
if err != nil {
switch {
case errors.Is(err, blocks.ErrUnsupportedVersion):
return marshalBlockFull(ctx, blk)
default:
return nil, errors.Wrap(err, "could not convert block to blinded format")
}
}
encodedBlock, err := blindedBlock.MarshalSSZ()
if err != nil {
return nil, errors.Wrap(err, "could not marshal blinded block")
}
switch blk.Version() {
case version.Deneb:
return snappy.Encode(nil, append(denebBlindKey, encodedBlock...)), nil
case version.Capella:
return snappy.Encode(nil, append(capellaBlindKey, encodedBlock...)), nil
case version.Bellatrix:
return snappy.Encode(nil, append(bellatrixBlindKey, encodedBlock...)), nil
default:
return nil, fmt.Errorf("unsupported block version: %v", blk.Version())
}

View File

@@ -126,6 +126,23 @@ var blockTests = []struct {
},
}
func TestStore_SaveBackfillBlockRoot(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
_, err := db.BackfillBlockRoot(ctx)
require.ErrorIs(t, err, ErrNotFoundBackfillBlockRoot)
var expected [32]byte
copy(expected[:], []byte{0x23})
err = db.SaveBackfillBlockRoot(ctx, expected)
require.NoError(t, err)
actual, err := db.BackfillBlockRoot(ctx)
require.NoError(t, err)
require.Equal(t, expected, actual)
}
func TestStore_SaveBlock_NoDuplicates(t *testing.T) {
BlockCacheSize = 1
slot := primitives.Slot(20)

View File

@@ -21,8 +21,3 @@ var ErrNotFoundBackfillBlockRoot = errors.Wrap(ErrNotFound, "BackfillBlockRoot")
// ErrNotFoundFeeRecipient is a not found error specifically for the fee recipient getter
var ErrNotFoundFeeRecipient = errors.Wrap(ErrNotFound, "fee recipient")
var errEmptyBlockSlice = errors.New("[]blocks.ROBlock is empty")
var errIncorrectBlockParent = errors.New("unexpected missing or forked blocks in a []ROBlock")
var errFinalizedChildNotFound = errors.New("unable to find finalized root descending from backfill batch")
var errNotConnectedToFinalized = errors.New("unable to finalize backfill blocks, finalized parent_root does not match")

View File

@@ -4,7 +4,6 @@ import (
"bytes"
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filters"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
@@ -164,83 +163,6 @@ func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, chec
return bkt.Put(previousFinalizedCheckpointKey, enc)
}
// BackfillFinalizedIndex updates the finalized index for a contiguous chain of blocks that are the ancestors of the
// given finalized child root. This is needed to update the finalized index during backfill, because the usual
// updateFinalizedBlockRoots has assumptions that are incompatible with backfill processing.
func (s *Store) BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBlock, finalizedChildRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.BackfillFinalizedIndex")
defer span.End()
if len(blocks) == 0 {
return errEmptyBlockSlice
}
fbrs := make([]*ethpb.FinalizedBlockRootContainer, len(blocks))
encs := make([][]byte, len(blocks))
for i := range blocks {
pr := blocks[i].Block().ParentRoot()
fbrs[i] = &ethpb.FinalizedBlockRootContainer{
ParentRoot: pr[:],
// ChildRoot: will be filled in on the next iteration when we look at the descendent block.
}
if i == 0 {
continue
}
if blocks[i-1].Root() != blocks[i].Block().ParentRoot() {
return errors.Wrapf(errIncorrectBlockParent, "previous root=%#x, slot=%d; child parent_root=%#x, root=%#x, slot=%d",
blocks[i-1].Root(), blocks[i-1].Block().Slot(), blocks[i].Block().ParentRoot(), blocks[i].Root(), blocks[i].Block().Slot())
}
// We know the previous index is the parent of this one thanks to the assertion above,
// so we can set the ChildRoot of the previous value to the root of the current value.
fbrs[i-1].ChildRoot = blocks[i].RootSlice()
// Now that the value for fbrs[i-1] is complete, perform encoding here to minimize time in Update,
// which holds the global db lock.
penc, err := encode(ctx, fbrs[i-1])
if err != nil {
tracing.AnnotateError(span, err)
return err
}
encs[i-1] = penc
// The final element is the parent of finalizedChildRoot. This is checked inside the db transaction using
// the parent_root value stored in the index data for finalizedChildRoot.
if i == len(blocks)-1 {
fbrs[i].ChildRoot = finalizedChildRoot[:]
// Final element is complete, so it is pre-encoded like the others.
enc, err := encode(ctx, fbrs[i])
if err != nil {
tracing.AnnotateError(span, err)
return err
}
encs[i] = enc
}
}
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
child := bkt.Get(finalizedChildRoot[:])
if len(child) == 0 {
return errFinalizedChildNotFound
}
fcc := &ethpb.FinalizedBlockRootContainer{}
if err := decode(ctx, child, fcc); err != nil {
return errors.Wrapf(err, "unable to decode finalized block root container for root=%#x", finalizedChildRoot)
}
// Ensure that the existing finalized chain descends from the new segment.
if !bytes.Equal(fcc.ParentRoot, blocks[len(blocks)-1].RootSlice()) {
return errors.Wrapf(errNotConnectedToFinalized, "finalized block root container for root=%#x has parent_root=%#x, not %#x",
finalizedChildRoot, fcc.ParentRoot, blocks[len(blocks)-1].RootSlice())
}
// Update the finalized index with entries for each block in the new segment.
for i := range fbrs {
if err := bkt.Put(blocks[i].RootSlice(), encs[i]); err != nil {
return err
}
}
return nil
})
}
// IsFinalizedBlock returns true if the block root is present in the finalized block root index.
// A beacon block root contained exists in this index if it is considered finalized and canonical.
// Note: beacon blocks from the latest finalized epoch return true, whether or not they are

View File

@@ -1,7 +1,6 @@
package kv
import (
"bytes"
"context"
"testing"
@@ -15,7 +14,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
bolt "go.etcd.io/bbolt"
)
var genesisBlockRoot = bytesutil.ToBytes32([]byte{'G', 'E', 'N', 'E', 'S', 'I', 'S'})
@@ -236,64 +234,3 @@ func makeBlocksAltair(t *testing.T, startIdx, num uint64, previousRoot [32]byte)
}
return ifaceBlocks
}
func TestStore_BackfillFinalizedIndex(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
require.ErrorIs(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{}, [32]byte{}), errEmptyBlockSlice)
blks, err := consensusblocks.NewROBlockSlice(makeBlocks(t, 0, 66, [32]byte{}))
require.NoError(t, err)
// set up existing finalized block
ebpr := blks[64].Block().ParentRoot()
ebr := blks[64].Root()
chldr := blks[65].Root()
ebf := &ethpb.FinalizedBlockRootContainer{
ParentRoot: ebpr[:],
ChildRoot: chldr[:],
}
disjoint := []consensusblocks.ROBlock{
blks[0],
blks[2],
}
enc, err := encode(ctx, ebf)
require.NoError(t, err)
err = db.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
return bkt.Put(ebr[:], enc)
})
// reslice to remove the existing blocks
blks = blks[0:64]
// check the other error conditions with a descendent root that really doesn't exist
require.NoError(t, err)
require.ErrorIs(t, db.BackfillFinalizedIndex(ctx, disjoint, [32]byte{}), errIncorrectBlockParent)
require.NoError(t, err)
require.ErrorIs(t, errFinalizedChildNotFound, db.BackfillFinalizedIndex(ctx, blks, [32]byte{}))
// use the real root so that it succeeds
require.NoError(t, db.BackfillFinalizedIndex(ctx, blks, ebr))
for i := range blks {
require.NoError(t, db.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
encfr := bkt.Get(blks[i].RootSlice())
require.Equal(t, true, len(encfr) > 0)
fr := &ethpb.FinalizedBlockRootContainer{}
require.NoError(t, decode(ctx, encfr, fr))
require.Equal(t, 32, len(fr.ParentRoot))
require.Equal(t, 32, len(fr.ChildRoot))
pr := blks[i].Block().ParentRoot()
require.Equal(t, true, bytes.Equal(fr.ParentRoot, pr[:]))
if i > 0 {
require.Equal(t, true, bytes.Equal(fr.ParentRoot, blks[i-1].RootSlice()))
}
if i < len(blks)-1 {
require.DeepEqual(t, fr.ChildRoot, blks[i+1].RootSlice())
}
if i == len(blks)-1 {
require.DeepEqual(t, fr.ChildRoot, ebr[:])
}
return nil
}))
}
}

View File

@@ -1,7 +1,6 @@
package kv
import (
"bytes"
"context"
"fmt"
"testing"
@@ -117,25 +116,13 @@ func Test_setupBlockStorageType(t *testing.T) {
root, err = wrappedBlock.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, store.SaveBlock(ctx, wrappedBlock))
wrappedBlinded, err := wrappedBlock.ToBlinded()
require.NoError(t, err)
retrievedBlk, err = store.Block(ctx, root)
require.NoError(t, err)
require.Equal(t, true, retrievedBlk.IsBlinded())
// Compare retrieved value by root, and marshaled bytes.
mSrc, err := wrappedBlinded.MarshalSSZ()
wrappedBlinded, err := wrappedBlock.ToBlinded()
require.NoError(t, err)
mTgt, err := retrievedBlk.MarshalSSZ()
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(mSrc, mTgt))
rSrc, err := wrappedBlinded.Block().HashTreeRoot()
require.NoError(t, err)
rTgt, err := retrievedBlk.Block().HashTreeRoot()
require.NoError(t, err)
require.Equal(t, rSrc, rTgt)
require.DeepEqual(t, wrappedBlinded, retrievedBlk)
})
t.Run("existing database with full blocks type should continue storing full blocks", func(t *testing.T) {
store := setupDB(t)
@@ -168,21 +155,10 @@ func Test_setupBlockStorageType(t *testing.T) {
root, err = wrappedBlock.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, store.SaveBlock(ctx, wrappedBlock))
retrievedBlk, err = store.Block(ctx, root)
require.NoError(t, err)
require.Equal(t, false, retrievedBlk.IsBlinded())
// Compare retrieved value by root, and marshaled bytes.
mSrc, err := wrappedBlock.MarshalSSZ()
require.NoError(t, err)
mTgt, err := retrievedBlk.MarshalSSZ()
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(mSrc, mTgt))
rTgt, err := retrievedBlk.Block().HashTreeRoot()
require.NoError(t, err)
require.Equal(t, root, rTgt)
require.DeepEqual(t, wrappedBlock, retrievedBlk)
})
t.Run("existing database with blinded blocks type should error if user enables full blocks feature flag", func(t *testing.T) {
store := setupDB(t)

View File

@@ -61,8 +61,8 @@ var (
// block root included in the beacon state used by weak subjectivity initial sync
originCheckpointBlockRootKey = []byte("origin-checkpoint-block-root")
// tracking data about an ongoing backfill
backfillStatusKey = []byte("backfill-status")
// block root tracking the progress of backfill, or pointing at genesis if backfill has not been initiated
backfillBlockRootKey = []byte("backfill-block-root")
// Deprecated: This index key was migrated in PR 6461. Do not use, except for migrations.
lastArchivedIndexKey = []byte("last-archived")

View File

@@ -893,7 +893,6 @@ func createStateIndicesFromStateSlot(ctx context.Context, slot primitives.Slot)
//
// 3.) state with current finalized root
// 4.) unfinalized States
// 5.) not origin root
func (s *Store) CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB. CleanUpDirtyStates")
defer span.End()
@@ -908,11 +907,6 @@ func (s *Store) CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint pr
}
deletedRoots := make([][32]byte, 0)
oRoot, err := s.OriginCheckpointBlockRoot(ctx)
if err != nil {
return err
}
err = s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(stateSlotIndicesBucket)
return bkt.ForEach(func(k, v []byte) error {
@@ -920,31 +914,15 @@ func (s *Store) CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint pr
return ctx.Err()
}
root := bytesutil.ToBytes32(v)
finalizedChkpt := bytesutil.ToBytes32(f.Root) == bytesutil.ToBytes32(v)
slot := bytesutil.BytesToSlotBigEndian(k)
mod := slot % slotsPerArchivedPoint
nonFinalized := slot > finalizedSlot
if mod == 0 {
return nil
// The following conditions cover 1, 2, 3 and 4 above.
if mod != 0 && mod <= slotsPerArchivedPoint-slotsPerArchivedPoint/3 && !finalizedChkpt && !nonFinalized {
deletedRoots = append(deletedRoots, bytesutil.ToBytes32(v))
}
if mod > slotsPerArchivedPoint-slotsPerArchivedPoint/3 {
return nil
}
if bytesutil.ToBytes32(f.Root) == root {
return nil
}
if slot > finalizedSlot {
return nil
}
if oRoot == root {
return nil
}
deletedRoots = append(deletedRoots, root)
return nil
})
})

View File

@@ -3,7 +3,6 @@ package kv
import (
"context"
"encoding/binary"
"math/big"
"math/rand"
"strconv"
"testing"
@@ -100,7 +99,7 @@ func TestState_CanSaveRetrieve(t *testing.T) {
BlockHash: make([]byte, 32),
TransactionsRoot: make([]byte, 32),
WithdrawalsRoot: make([]byte, 32),
}, big.NewInt(0))
}, 0)
require.NoError(t, err)
require.NoError(t, st.SetLatestExecutionPayloadHeader(p))
return st
@@ -125,7 +124,7 @@ func TestState_CanSaveRetrieve(t *testing.T) {
BlockHash: make([]byte, 32),
TransactionsRoot: make([]byte, 32),
WithdrawalsRoot: make([]byte, 32),
}, big.NewInt(0))
}, 0)
require.NoError(t, err)
require.NoError(t, st.SetLatestExecutionPayloadHeader(p))
return st
@@ -676,7 +675,6 @@ func TestStore_CleanUpDirtyStates_AboveThreshold(t *testing.T) {
genesisRoot := [32]byte{'a'}
require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), genesisRoot))
require.NoError(t, db.SaveState(context.Background(), genesisState, genesisRoot))
require.NoError(t, db.SaveOriginCheckpointBlockRoot(context.Background(), [32]byte{'a'}))
bRoots := make([][32]byte, 0)
slotsPerArchivedPoint := primitives.Slot(128)
@@ -722,7 +720,6 @@ func TestStore_CleanUpDirtyStates_Finalized(t *testing.T) {
genesisRoot := [32]byte{'a'}
require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), genesisRoot))
require.NoError(t, db.SaveState(context.Background(), genesisState, genesisRoot))
require.NoError(t, db.SaveOriginCheckpointBlockRoot(context.Background(), [32]byte{'a'}))
for i := primitives.Slot(1); i <= params.BeaconConfig().SlotsPerEpoch; i++ {
b := util.NewBeaconBlock()
@@ -744,35 +741,6 @@ func TestStore_CleanUpDirtyStates_Finalized(t *testing.T) {
require.Equal(t, true, db.HasState(context.Background(), genesisRoot))
}
func TestStore_CleanUpDirtyStates_OriginRoot(t *testing.T) {
db := setupDB(t)
genesisState, err := util.NewBeaconState()
require.NoError(t, err)
r := [32]byte{'a'}
require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), r))
require.NoError(t, db.SaveState(context.Background(), genesisState, r))
for i := primitives.Slot(1); i <= params.BeaconConfig().SlotsPerEpoch; i++ {
b := util.NewBeaconBlock()
b.Block.Slot = i
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wsb))
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(i))
require.NoError(t, db.SaveState(context.Background(), st, r))
}
require.NoError(t, db.SaveOriginCheckpointBlockRoot(context.Background(), r))
require.NoError(t, db.CleanUpDirtyStates(context.Background(), params.BeaconConfig().SlotsPerEpoch))
require.Equal(t, true, db.HasState(context.Background(), r))
}
func TestStore_CleanUpDirtyStates_DontDeleteNonFinalized(t *testing.T) {
db := setupDB(t)
@@ -781,7 +749,6 @@ func TestStore_CleanUpDirtyStates_DontDeleteNonFinalized(t *testing.T) {
genesisRoot := [32]byte{'a'}
require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), genesisRoot))
require.NoError(t, db.SaveState(context.Background(), genesisState, genesisRoot))
require.NoError(t, db.SaveOriginCheckpointBlockRoot(context.Background(), [32]byte{'a'}))
var unfinalizedRoots [][32]byte
for i := primitives.Slot(1); i <= params.BeaconConfig().SlotsPerEpoch; i++ {

View File

@@ -8,7 +8,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/ssz/detect"
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
)
@@ -18,6 +17,18 @@ import (
// syncing, using the provided values as their point of origin. This is an alternative
// to syncing from genesis, and should only be run on an empty database.
func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error {
genesisRoot, err := s.GenesisBlockRoot(ctx)
if err != nil {
if errors.Is(err, ErrNotFoundGenesisBlockRoot) {
return errors.Wrap(err, "genesis block root not found: genesis must be provided for checkpoint sync")
}
return errors.Wrap(err, "genesis block root query error: checkpoint sync must verify genesis to proceed")
}
err = s.SaveBackfillBlockRoot(ctx, genesisRoot)
if err != nil {
return errors.Wrap(err, "unable to save genesis root as initial backfill starting point for checkpoint sync")
}
cf, err := detect.FromState(serState)
if err != nil {
return errors.Wrap(err, "could not sniff config+fork for origin state bytes")
@@ -39,24 +50,11 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
}
blk := wblk.Block()
// save block
blockRoot, err := blk.HashTreeRoot()
if err != nil {
return errors.Wrap(err, "could not compute HashTreeRoot of checkpoint block")
}
pr := blk.ParentRoot()
bf := &dbval.BackfillStatus{
LowSlot: uint64(wblk.Block().Slot()),
LowRoot: blockRoot[:],
LowParentRoot: pr[:],
OriginRoot: blockRoot[:],
OriginSlot: uint64(wblk.Block().Slot()),
}
if err = s.SaveBackfillStatus(ctx, bf); err != nil {
return errors.Wrap(err, "unable to save backfill status data to db for checkpoint sync")
}
log.Infof("saving checkpoint block to db, w/ root=%#x", blockRoot)
if err := s.SaveBlock(ctx, wblk); err != nil {
return errors.Wrap(err, "could not save checkpoint block")

View File

@@ -7,21 +7,10 @@ package slasherkv
// it easy to scan for keys that have a certain shard number as a prefix and return those
// corresponding attestations.
var (
// key: (encoded) ValidatorIndex
// value: (encoded) Epoch
attestedEpochsByValidator = []byte("attested-epochs-by-validator")
// key: attestation SigningRoot
// value: (encoded + compressed) IndexedAttestation
attestationRecordsBucket = []byte("attestation-records")
// key: (encoded) Target Epoch + (encoded) ValidatorIndex
// value: attestation SigningRoot
// Slasher buckets.
attestedEpochsByValidator = []byte("attested-epochs-by-validator")
attestationRecordsBucket = []byte("attestation-records")
attestationDataRootsBucket = []byte("attestation-data-roots")
// key: Slot+ValidatorIndex
// value: (encoded) SignedBlockHeaderWrapper
proposalRecordsBucket = []byte("proposal-records")
slasherChunksBucket = []byte("slasher-chunks")
proposalRecordsBucket = []byte("proposal-records")
slasherChunksBucket = []byte("slasher-chunks")
)

View File

@@ -29,90 +29,72 @@ const (
// LastEpochWrittenForValidators given a list of validator indices returns the latest
// epoch we have recorded the validators writing data for.
func (s *Store) LastEpochWrittenForValidators(
ctx context.Context, validatorIndexes []primitives.ValidatorIndex,
ctx context.Context, validatorIndices []primitives.ValidatorIndex,
) ([]*slashertypes.AttestedEpochForValidator, error) {
_, span := trace.StartSpan(ctx, "BeaconDB.LastEpochWrittenForValidators")
defer span.End()
attestedEpochs := make([]*slashertypes.AttestedEpochForValidator, 0)
encodedIndexes := make([][]byte, len(validatorIndexes))
for i, validatorIndex := range validatorIndexes {
encodedIndexes[i] = encodeValidatorIndex(validatorIndex)
encodedIndices := make([][]byte, len(validatorIndices))
for i, valIdx := range validatorIndices {
encodedIndices[i] = encodeValidatorIndex(valIdx)
}
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(attestedEpochsByValidator)
for i, encodedIndex := range encodedIndexes {
for i, encodedIndex := range encodedIndices {
var epoch primitives.Epoch
epochBytes := bkt.Get(encodedIndex)
if epochBytes != nil {
if err := epoch.UnmarshalSSZ(epochBytes); err != nil {
return err
}
}
attestedEpochs = append(attestedEpochs, &slashertypes.AttestedEpochForValidator{
ValidatorIndex: validatorIndexes[i],
ValidatorIndex: validatorIndices[i],
Epoch: epoch,
})
}
return nil
})
return attestedEpochs, err
}
// SaveLastEpochsWrittenForValidators updates the latest epoch a slice
// of validator indices has attested to.
func (s *Store) SaveLastEpochsWrittenForValidators(
ctx context.Context, epochByValIndex map[primitives.ValidatorIndex]primitives.Epoch,
ctx context.Context, epochByValidator map[primitives.ValidatorIndex]primitives.Epoch,
) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveLastEpochsWrittenForValidators")
defer span.End()
const batchSize = 10000
encodedIndexes := make([][]byte, 0, len(epochByValIndex))
encodedEpochs := make([][]byte, 0, len(epochByValIndex))
for valIndex, epoch := range epochByValIndex {
encodedIndices := make([][]byte, 0, len(epochByValidator))
encodedEpochs := make([][]byte, 0, len(epochByValidator))
for valIdx, epoch := range epochByValidator {
if ctx.Err() != nil {
return ctx.Err()
}
encodedEpoch, err := epoch.MarshalSSZ()
if err != nil {
return err
}
encodedIndexes = append(encodedIndexes, encodeValidatorIndex(valIndex))
encodedIndices = append(encodedIndices, encodeValidatorIndex(valIdx))
encodedEpochs = append(encodedEpochs, encodedEpoch)
}
// The list of validators might be too massive for boltdb to handle in a single transaction,
// so instead we split it into batches and write each batch.
for i := 0; i < len(encodedIndexes); i += batchSize {
batchSize := 10000
for i := 0; i < len(encodedIndices); i += batchSize {
if ctx.Err() != nil {
return ctx.Err()
}
if err := s.db.Update(func(tx *bolt.Tx) error {
if ctx.Err() != nil {
return ctx.Err()
}
bkt := tx.Bucket(attestedEpochsByValidator)
minimum := i + batchSize
if minimum > len(encodedIndexes) {
minimum = len(encodedIndexes)
min := i + batchSize
if min > len(encodedIndices) {
min = len(encodedIndices)
}
for j, encodedIndex := range encodedIndexes[i:minimum] {
for j, encodedIndex := range encodedIndices[i:min] {
if ctx.Err() != nil {
return ctx.Err()
}
@@ -120,106 +102,79 @@ func (s *Store) SaveLastEpochsWrittenForValidators(
return err
}
}
return nil
}); err != nil {
return err
}
}
return nil
}
// CheckAttesterDoubleVotes retrieves any slashable double votes that exist
// for a series of input attestations with respect to the database.
// CheckAttesterDoubleVotes retries any slashable double votes that exist
// for a series of input attestations.
func (s *Store) CheckAttesterDoubleVotes(
ctx context.Context, attestations []*slashertypes.IndexedAttestationWrapper,
) ([]*slashertypes.AttesterDoubleVote, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.CheckAttesterDoubleVotes")
defer span.End()
doubleVotes := make([]*slashertypes.AttesterDoubleVote, 0)
mu := sync.Mutex{}
doubleVotesMu := sync.Mutex{}
eg, egctx := errgroup.WithContext(ctx)
for _, attestation := range attestations {
for _, att := range attestations {
// Copy the iteration instance to a local variable to give each go-routine its own copy to play with.
// See https://golang.org/doc/faq#closures_and_goroutines for more details.
attToProcess := attestation
// Process each attestation in parallel.
attToProcess := att
// process every attestation parallelly.
eg.Go(func() error {
err := s.db.View(func(tx *bolt.Tx) error {
signingRootsBkt := tx.Bucket(attestationDataRootsBucket)
attRecordsBkt := tx.Bucket(attestationRecordsBucket)
encEpoch := encodeTargetEpoch(attToProcess.IndexedAttestation.Data.Target.Epoch)
localDoubleVotes := []*slashertypes.AttesterDoubleVote{}
localDoubleVotes := make([]*slashertypes.AttesterDoubleVote, 0)
for _, valIdx := range attToProcess.IndexedAttestation.AttestingIndices {
// Check if there is signing root in the database for this combination
// of validator index and target epoch.
encIdx := encodeValidatorIndex(primitives.ValidatorIndex(valIdx))
validatorEpochKey := append(encEpoch, encIdx...)
attRecordsKey := signingRootsBkt.Get(validatorEpochKey)
// An attestation record key is comprised of a signing root (32 bytes).
if len(attRecordsKey) < attestationRecordKeySize {
// If there is no signing root for this combination,
// then there is no double vote. We can continue to the next validator.
continue
}
// Retrieve the attestation record corresponding to the signing root
// from the database.
encExistingAttRecord := attRecordsBkt.Get(attRecordsKey)
if encExistingAttRecord == nil {
continue
}
existingSigningRoot := bytesutil.ToBytes32(attRecordsKey[:signingRootSize])
if existingSigningRoot == attToProcess.SigningRoot {
continue
if existingSigningRoot != attToProcess.SigningRoot {
existingAttRecord, err := decodeAttestationRecord(encExistingAttRecord)
if err != nil {
return err
}
slashAtt := &slashertypes.AttesterDoubleVote{
ValidatorIndex: primitives.ValidatorIndex(valIdx),
Target: attToProcess.IndexedAttestation.Data.Target.Epoch,
PrevAttestationWrapper: existingAttRecord,
AttestationWrapper: attToProcess,
}
localDoubleVotes = append(localDoubleVotes, slashAtt)
}
// There is a double vote.
existingAttRecord, err := decodeAttestationRecord(encExistingAttRecord)
if err != nil {
return err
}
// Build the proof of double vote.
slashAtt := &slashertypes.AttesterDoubleVote{
ValidatorIndex: primitives.ValidatorIndex(valIdx),
Target: attToProcess.IndexedAttestation.Data.Target.Epoch,
PrevAttestationWrapper: existingAttRecord,
AttestationWrapper: attToProcess,
}
localDoubleVotes = append(localDoubleVotes, slashAtt)
}
// If any routine is cancelled, then cancel this routine too.
// if any routine is cancelled, then cancel this routine too
select {
case <-egctx.Done():
return egctx.Err()
default:
}
// If there are any double votes in this attestation, add it to the global double votes.
// if there are any doible votes in this attestation, add it to the global double votes
if len(localDoubleVotes) > 0 {
mu.Lock()
defer mu.Unlock()
doubleVotesMu.Lock()
defer doubleVotesMu.Unlock()
doubleVotes = append(doubleVotes, localDoubleVotes...)
}
return nil
})
return err
})
}
return doubleVotes, eg.Wait()
}
@@ -256,8 +211,6 @@ func (s *Store) AttestationRecordForValidator(
}
// SaveAttestationRecordsForValidators saves attestation records for the specified indices.
// If multiple attestations are provided for the same validator index + target epoch combination,
// then only the first one is (arbitrarily) saved in the `attestationDataRootsBucket` bucket.
func (s *Store) SaveAttestationRecordsForValidators(
ctx context.Context,
attestations []*slashertypes.IndexedAttestationWrapper,
@@ -266,40 +219,37 @@ func (s *Store) SaveAttestationRecordsForValidators(
defer span.End()
encodedTargetEpoch := make([][]byte, len(attestations))
encodedRecords := make([][]byte, len(attestations))
for i, attestation := range attestations {
encEpoch := encodeTargetEpoch(attestation.IndexedAttestation.Data.Target.Epoch)
value, err := encodeAttestationRecord(attestation)
encodedIndices := make([][]byte, len(attestations))
for i, att := range attestations {
encEpoch := encodeTargetEpoch(att.IndexedAttestation.Data.Target.Epoch)
value, err := encodeAttestationRecord(att)
if err != nil {
return err
}
indicesBytes := make([]byte, len(att.IndexedAttestation.AttestingIndices)*8)
for _, idx := range att.IndexedAttestation.AttestingIndices {
encodedIdx := encodeValidatorIndex(primitives.ValidatorIndex(idx))
indicesBytes = append(indicesBytes, encodedIdx...)
}
encodedIndices[i] = indicesBytes
encodedTargetEpoch[i] = encEpoch
encodedRecords[i] = value
}
return s.db.Update(func(tx *bolt.Tx) error {
attRecordsBkt := tx.Bucket(attestationRecordsBucket)
signingRootsBkt := tx.Bucket(attestationDataRootsBucket)
for i := len(attestations) - 1; i >= 0; i-- {
attestation := attestations[i]
if err := attRecordsBkt.Put(attestation.SigningRoot[:], encodedRecords[i]); err != nil {
for i, att := range attestations {
if err := attRecordsBkt.Put(att.SigningRoot[:], encodedRecords[i]); err != nil {
return err
}
for _, valIdx := range attestation.IndexedAttestation.AttestingIndices {
for _, valIdx := range att.IndexedAttestation.AttestingIndices {
encIdx := encodeValidatorIndex(primitives.ValidatorIndex(valIdx))
key := append(encodedTargetEpoch[i], encIdx...)
if err := signingRootsBkt.Put(key, attestation.SigningRoot[:]); err != nil {
if err := signingRootsBkt.Put(key, att.SigningRoot[:]); err != nil {
return err
}
}
}
return nil
})
}
@@ -364,60 +314,43 @@ func (s *Store) SaveSlasherChunks(
}
// CheckDoubleBlockProposals takes in a list of proposals and for each,
// checks if there already exists a proposal at the same slot+validatorIndex combination.
// If so, it checks if the existing signing root is not-empty and is different than
// the incoming proposal signing root.
// If so, it returns a double block proposal object.
// checks if there already exists a proposal at the same slot+validatorIndex combination. If so,
// We check if the existing signing root is not-empty and is different than the incoming
// proposal signing root. If so, we return a double block proposal object.
func (s *Store) CheckDoubleBlockProposals(
ctx context.Context, incomingProposals []*slashertypes.SignedBlockHeaderWrapper,
ctx context.Context, proposals []*slashertypes.SignedBlockHeaderWrapper,
) ([]*ethpb.ProposerSlashing, error) {
_, span := trace.StartSpan(ctx, "BeaconDB.CheckDoubleBlockProposals")
defer span.End()
proposerSlashings := make([]*ethpb.ProposerSlashing, 0, len(incomingProposals))
proposerSlashings := make([]*ethpb.ProposerSlashing, 0, len(proposals))
err := s.db.View(func(tx *bolt.Tx) error {
// Retrieve the proposal records bucket
bkt := tx.Bucket(proposalRecordsBucket)
for _, incomingProposal := range incomingProposals {
// Build the key corresponding to this slot + validator index combination
for _, proposal := range proposals {
key, err := keyForValidatorProposal(
incomingProposal.SignedBeaconBlockHeader.Header.Slot,
incomingProposal.SignedBeaconBlockHeader.Header.ProposerIndex,
proposal.SignedBeaconBlockHeader.Header.Slot,
proposal.SignedBeaconBlockHeader.Header.ProposerIndex,
)
if err != nil {
return err
}
// Retrieve the existing proposal record from the database
encExistingProposalWrapper := bkt.Get(key)
// If there is no existing proposal record (empty result), then there is no double proposal.
// We can continue to the next proposal.
if len(encExistingProposalWrapper) < signingRootSize {
continue
}
// Compare the proposal signing root in the DB with the incoming proposal signing root.
// If they differ, we have a double proposal.
existingSigningRoot := bytesutil.ToBytes32(encExistingProposalWrapper[:signingRootSize])
if existingSigningRoot != incomingProposal.SigningRoot {
if existingSigningRoot != proposal.SigningRoot {
existingProposalWrapper, err := decodeProposalRecord(encExistingProposalWrapper)
if err != nil {
return err
}
proposerSlashings = append(proposerSlashings, &ethpb.ProposerSlashing{
Header_1: existingProposalWrapper.SignedBeaconBlockHeader,
Header_2: incomingProposal.SignedBeaconBlockHeader,
Header_2: proposal.SignedBeaconBlockHeader,
})
}
}
return nil
})
return proposerSlashings, err
}
@@ -451,20 +384,14 @@ func (s *Store) BlockProposalForValidator(
// SaveBlockProposals takes in a list of block proposals and saves them to our
// proposal records bucket in the database.
// If multiple proposals are provided for the same slot + validatorIndex combination,
// then only the last one is saved in the database.
func (s *Store) SaveBlockProposals(
ctx context.Context, proposals []*slashertypes.SignedBlockHeaderWrapper,
) error {
_, span := trace.StartSpan(ctx, "BeaconDB.SaveBlockProposals")
defer span.End()
encodedKeys := make([][]byte, len(proposals))
encodedProposals := make([][]byte, len(proposals))
// Loop over all proposals to encode keys and proposals themselves.
for i, proposal := range proposals {
// Encode the key for this proposal.
key, err := keyForValidatorProposal(
proposal.SignedBeaconBlockHeader.Header.Slot,
proposal.SignedBeaconBlockHeader.Header.ProposerIndex,
@@ -472,29 +399,20 @@ func (s *Store) SaveBlockProposals(
if err != nil {
return err
}
// Encode the proposal itself.
enc, err := encodeProposalRecord(proposal)
if err != nil {
return err
}
encodedKeys[i] = key
encodedProposals[i] = enc
}
// All proposals are saved into the DB in a single transaction.
return s.db.Update(func(tx *bolt.Tx) error {
// Retrieve the proposal records bucket.
bkt := tx.Bucket(proposalRecordsBucket)
// Save all proposals.
for i := range proposals {
if err := bkt.Put(encodedKeys[i], encodedProposals[i]); err != nil {
return err
}
}
return nil
})
}
@@ -554,7 +472,7 @@ func suffixForAttestationRecordsKey(key, encodedValidatorIndex []byte) bool {
return bytes.Equal(encIdx, encodedValidatorIndex)
}
// keyForValidatorProposal returns a disk key for a validator proposal, including a slot+validatorIndex as a byte slice.
// Disk key for a validator proposal, including a slot+validatorIndex as a byte slice.
func keyForValidatorProposal(slot primitives.Slot, proposerIndex primitives.ValidatorIndex) ([]byte, error) {
encSlot, err := slot.MarshalSSZ()
if err != nil {
@@ -594,55 +512,37 @@ func decodeSlasherChunk(enc []byte) ([]uint16, error) {
return chunk, nil
}
// Encode attestation record to bytes.
// The output encoded attestation record consists in the signing root concatened with the compressed attestation record.
// Decode attestation record from bytes.
func encodeAttestationRecord(att *slashertypes.IndexedAttestationWrapper) ([]byte, error) {
if att == nil || att.IndexedAttestation == nil {
return []byte{}, errors.New("nil proposal record")
}
// Encode attestation.
encodedAtt, err := att.IndexedAttestation.MarshalSSZ()
if err != nil {
return nil, err
}
// Compress attestation.
compressedAtt := snappy.Encode(nil, encodedAtt)
return append(att.SigningRoot[:], compressedAtt...), nil
}
// Decode attestation record from bytes.
// The input encoded attestation record consists in the signing root concatened with the compressed attestation record.
func decodeAttestationRecord(encoded []byte) (*slashertypes.IndexedAttestationWrapper, error) {
if len(encoded) < signingRootSize {
return nil, fmt.Errorf("wrong length for encoded attestation record, want minimum %d, got %d", signingRootSize, len(encoded))
return nil, fmt.Errorf("wrong length for encoded attestation record, want 32, got %d", len(encoded))
}
// Decompress attestation.
signingRoot := encoded[:signingRootSize]
decodedAtt := &ethpb.IndexedAttestation{}
decodedAttBytes, err := snappy.Decode(nil, encoded[signingRootSize:])
if err != nil {
return nil, err
}
// Decode attestation.
decodedAtt := &ethpb.IndexedAttestation{}
if err := decodedAtt.UnmarshalSSZ(decodedAttBytes); err != nil {
return nil, err
}
// Decode signing root.
signingRootBytes := encoded[:signingRootSize]
signingRoot := bytesutil.ToBytes32(signingRootBytes)
// Return decoded attestation.
attestation := &slashertypes.IndexedAttestationWrapper{
return &slashertypes.IndexedAttestationWrapper{
IndexedAttestation: decodedAtt,
SigningRoot: signingRoot,
}
return attestation, nil
SigningRoot: bytesutil.ToBytes32(signingRoot),
}, nil
}
func encodeProposalRecord(blkHdr *slashertypes.SignedBlockHeaderWrapper) ([]byte, error) {

View File

@@ -113,7 +113,7 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
cursorNum := big.NewInt(0).SetUint64(latestBlkHeight)
cursorTime := latestBlkTime
var numOfBlocks uint64
numOfBlocks := uint64(0)
estimatedBlk := cursorNum.Uint64()
maxTimeBuffer := searchThreshold * params.BeaconConfig().SecondsPerETH1Block
// Terminate if we can't find an acceptable block after

View File

@@ -260,7 +260,7 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primit
if err != nil {
return nil, nil, false, handleRPCError(err)
}
ed, err := blocks.WrappedExecutionPayloadDeneb(result.Payload, blocks.PayloadValueToWei(result.Value))
ed, err := blocks.WrappedExecutionPayloadDeneb(result.Payload, blocks.PayloadValueToGwei(result.Value))
if err != nil {
return nil, nil, false, err
}
@@ -273,7 +273,7 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primit
if err != nil {
return nil, nil, false, handleRPCError(err)
}
ed, err := blocks.WrappedExecutionPayloadCapella(result.Payload, blocks.PayloadValueToWei(result.Value))
ed, err := blocks.WrappedExecutionPayloadCapella(result.Payload, blocks.PayloadValueToGwei(result.Value))
if err != nil {
return nil, nil, false, err
}
@@ -493,12 +493,7 @@ func (s *Service) GetPayloadBodiesByHash(ctx context.Context, executionBlockHash
return result, nil
}
err := s.rpcClient.CallContext(ctx, &result, GetPayloadBodiesByHashV1, executionBlockHashes)
if err != nil {
return nil, handleRPCError(err)
}
if len(result) != len(executionBlockHashes) {
return nil, fmt.Errorf("mismatch of payloads retrieved from the execution client: %d vs %d", len(result), len(executionBlockHashes))
}
for i, item := range result {
if item == nil {
result[i] = &pb.ExecutionPayloadBodyV1{
@@ -507,7 +502,7 @@ func (s *Service) GetPayloadBodiesByHash(ctx context.Context, executionBlockHash
}
}
}
return result, nil
return result, handleRPCError(err)
}
// GetPayloadBodiesByRange returns the relevant payload bodies for the provided range.
@@ -640,8 +635,6 @@ func (s *Service) retrievePayloadFromExecutionHash(ctx context.Context, executio
return fullPayloadFromPayloadBody(header, bdy, version)
}
// This method assumes that the provided execution hashes are all valid and part of the
// canonical chain.
func (s *Service) retrievePayloadsFromExecutionHashes(
ctx context.Context,
executionHashes []common.Hash,
@@ -741,7 +734,7 @@ func fullPayloadFromExecutionBlock(
BlockHash: blockHash[:],
Transactions: txs,
Withdrawals: block.Withdrawals,
}, big.NewInt(0)) // We can't get the block value and don't care about the block value for this instance
}, 0) // We can't get the block value and don't care about the block value for this instance
case version.Deneb:
ebg, err := header.ExcessBlobGas()
if err != nil {
@@ -770,7 +763,7 @@ func fullPayloadFromExecutionBlock(
Withdrawals: block.Withdrawals,
BlobGasUsed: bgu,
ExcessBlobGas: ebg,
}, big.NewInt(0)) // We can't get the block value and don't care about the block value for this instance
}, 0) // We can't get the block value and don't care about the block value for this instance
default:
return nil, fmt.Errorf("unknown execution block version %d", block.Version)
}
@@ -818,7 +811,7 @@ func fullPayloadFromPayloadBody(
BlockHash: header.BlockHash(),
Transactions: body.Transactions,
Withdrawals: body.Withdrawals,
}, big.NewInt(0)) // We can't get the block value and don't care about the block value for this instance
}, 0) // We can't get the block value and don't care about the block value for this instance
case version.Deneb:
ebg, err := header.ExcessBlobGas()
if err != nil {
@@ -847,7 +840,7 @@ func fullPayloadFromPayloadBody(
Withdrawals: body.Withdrawals,
ExcessBlobGas: ebg,
BlobGasUsed: bgu,
}, big.NewInt(0)) // We can't get the block value and don't care about the block value for this instance
}, 0) // We can't get the block value and don't care about the block value for this instance
default:
return nil, fmt.Errorf("unknown execution block version for payload %d", bVersion)
}

View File

@@ -127,7 +127,7 @@ func TestClient_IPC(t *testing.T) {
require.Equal(t, true, ok)
req, ok := fix["ExecutionPayloadCapella"].(*pb.ExecutionPayloadCapella)
require.Equal(t, true, ok)
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(req, big.NewInt(0))
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(req, 0)
require.NoError(t, err)
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
require.NoError(t, err)
@@ -476,7 +476,7 @@ func TestClient_HTTP(t *testing.T) {
client := newPayloadV2Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, big.NewInt(0))
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
require.NoError(t, err)
@@ -490,7 +490,7 @@ func TestClient_HTTP(t *testing.T) {
client := newPayloadV3Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, big.NewInt(0))
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
require.NoError(t, err)
@@ -518,7 +518,7 @@ func TestClient_HTTP(t *testing.T) {
client := newPayloadV2Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, big.NewInt(0))
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
@@ -532,7 +532,7 @@ func TestClient_HTTP(t *testing.T) {
client := newPayloadV3Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, big.NewInt(0))
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
@@ -560,7 +560,7 @@ func TestClient_HTTP(t *testing.T) {
client := newPayloadV2Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, big.NewInt(0))
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
@@ -574,7 +574,7 @@ func TestClient_HTTP(t *testing.T) {
client := newPayloadV3Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, big.NewInt(0))
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
@@ -602,7 +602,7 @@ func TestClient_HTTP(t *testing.T) {
client := newPayloadV2Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, big.NewInt(0))
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
@@ -616,7 +616,7 @@ func TestClient_HTTP(t *testing.T) {
client := newPayloadV3Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, big.NewInt(0))
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
@@ -912,73 +912,6 @@ func TestReconstructFullBellatrixBlockBatch(t *testing.T) {
require.NoError(t, err)
require.DeepEqual(t, payload, got.Proto())
})
t.Run("handles invalid response from EL", func(t *testing.T) {
fix := fixtures()
payload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
require.Equal(t, true, ok)
jsonPayload := make(map[string]interface{})
tx := gethtypes.NewTransaction(
0,
common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"),
big.NewInt(0), 0, big.NewInt(0),
nil,
)
txs := []*gethtypes.Transaction{tx}
encodedBinaryTxs := make([][]byte, 1)
var err error
encodedBinaryTxs[0], err = txs[0].MarshalBinary()
require.NoError(t, err)
payload.Transactions = encodedBinaryTxs
jsonPayload["transactions"] = []hexutil.Bytes{encodedBinaryTxs[0]}
wrappedPayload, err := blocks.WrappedExecutionPayload(payload)
require.NoError(t, err)
header, err := blocks.PayloadToHeader(wrappedPayload)
require.NoError(t, err)
bellatrixBlock := util.NewBlindedBeaconBlockBellatrix()
wanted := util.NewBeaconBlockBellatrix()
wanted.Block.Slot = 1
// Make sure block hash is the zero hash.
bellatrixBlock.Block.Body.ExecutionPayloadHeader.BlockHash = make([]byte, 32)
bellatrixBlock.Block.Slot = 1
wrappedEmpty, err := blocks.NewSignedBeaconBlock(bellatrixBlock)
require.NoError(t, err)
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
defer func() {
require.NoError(t, r.Body.Close())
}()
respJSON := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"result": []map[string]interface{}{},
}
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
}))
defer srv.Close()
rpcClient, err := rpc.DialHTTP(srv.URL)
require.NoError(t, err)
defer rpcClient.Close()
service := &Service{}
service.rpcClient = rpcClient
blindedBlock := util.NewBlindedBeaconBlockBellatrix()
blindedBlock.Block.Body.ExecutionPayloadHeader = header
wrapped, err := blocks.NewSignedBeaconBlock(blindedBlock)
require.NoError(t, err)
copiedWrapped, err := wrapped.Copy()
require.NoError(t, err)
_, err = service.ReconstructFullBellatrixBlockBatch(ctx, []interfaces.ReadOnlySignedBeaconBlock{wrappedEmpty, wrapped, copiedWrapped})
require.ErrorContains(t, "mismatch of payloads retrieved from the execution client", err)
})
}
func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
@@ -1604,7 +1537,7 @@ func Test_fullPayloadFromExecutionBlockCapella(t *testing.T) {
p, err := blocks.WrappedExecutionPayloadCapella(&pb.ExecutionPayloadCapella{
BlockHash: wantedHash[:],
Transactions: [][]byte{},
}, big.NewInt(0))
}, 0)
require.NoError(t, err)
return p
},
@@ -1612,7 +1545,7 @@ func Test_fullPayloadFromExecutionBlockCapella(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
wrapped, err := blocks.WrappedExecutionPayloadHeaderCapella(tt.args.header, big.NewInt(0))
wrapped, err := blocks.WrappedExecutionPayloadHeaderCapella(tt.args.header, 0)
require.NoError(t, err)
got, err := fullPayloadFromExecutionBlock(tt.args.version, wrapped, tt.args.block)
if err != nil {
@@ -1665,7 +1598,7 @@ func Test_fullPayloadFromExecutionBlockDeneb(t *testing.T) {
p, err := blocks.WrappedExecutionPayloadDeneb(&pb.ExecutionPayloadDeneb{
BlockHash: wantedHash[:],
Transactions: [][]byte{},
}, big.NewInt(0))
}, 0)
require.NoError(t, err)
return p
},
@@ -1673,7 +1606,7 @@ func Test_fullPayloadFromExecutionBlockDeneb(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
wrapped, err := blocks.WrappedExecutionPayloadHeaderDeneb(tt.args.header, big.NewInt(0))
wrapped, err := blocks.WrappedExecutionPayloadHeaderDeneb(tt.args.header, 0)
require.NoError(t, err)
got, err := fullPayloadFromExecutionBlock(tt.args.version, wrapped, tt.args.block)
if err != nil {
@@ -2233,7 +2166,7 @@ func TestCapella_PayloadBodiesByHash(t *testing.T) {
bRoot := [32]byte{}
copy(bRoot[:], "hash")
results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{bRoot, bRoot})
results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{bRoot})
require.NoError(t, err)
require.Equal(t, 2, len(results))

View File

@@ -479,10 +479,10 @@ func (s *Service) handleETH1FollowDistance() {
// If the last requested block has not changed,
// we do not request batched logs as this means there are no new
// logs for the execution service to process. Also it is a potential
// logs for the powchain service to process. Also it is a potential
// failure condition as would mean we have not respected the protocol threshold.
if s.latestEth1Data.LastRequestedBlock == s.latestEth1Data.BlockHeight {
log.WithField("lastBlockNumber", s.latestEth1Data.LastRequestedBlock).Error("Beacon node is not respecting the follow distance. EL client is syncing.")
log.Error("Beacon node is not respecting the follow distance")
return
}
if err := s.requestBatchedHeadersAndLogs(ctx); err != nil {
@@ -753,7 +753,7 @@ func (s *Service) initializeEth1Data(ctx context.Context, eth1DataInDB *ethpb.ET
} else {
if eth1DataInDB.Trie == nil && eth1DataInDB.DepositSnapshot != nil {
return errors.Errorf("trying to use old deposit trie after migration to the new trie. "+
"Remove the --%s flag to resume normal operations.", features.DisableEIP4881.Name)
"Run with the --%s flag to resume normal operations.", features.EnableEIP4881.Name)
}
s.depositTrie, err = trie.CreateTrieFromProto(eth1DataInDB.Trie)
}

View File

@@ -23,6 +23,7 @@ go_library(
"//consensus-types/payload-attribute:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//time/slots:go_default_library",

View File

@@ -14,6 +14,7 @@ import (
payloadattribute "github.com/prysmaticlabs/prysm/v4/consensus-types/payload-attribute"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/math"
pb "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
@@ -62,14 +63,14 @@ func (e *EngineClient) ForkchoiceUpdated(
// GetPayload --
func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, s primitives.Slot) (interfaces.ExecutionData, *pb.BlobsBundle, bool, error) {
if slots.ToEpoch(s) >= params.BeaconConfig().DenebForkEpoch {
ed, err := blocks.WrappedExecutionPayloadDeneb(e.ExecutionPayloadDeneb, big.NewInt(int64(e.BlockValue)))
ed, err := blocks.WrappedExecutionPayloadDeneb(e.ExecutionPayloadDeneb, math.Gwei(e.BlockValue))
if err != nil {
return nil, nil, false, err
}
return ed, e.BlobsBundle, e.BuilderOverride, nil
}
if slots.ToEpoch(s) >= params.BeaconConfig().CapellaForkEpoch {
ed, err := blocks.WrappedExecutionPayloadCapella(e.ExecutionPayloadCapella, big.NewInt(int64(e.BlockValue)))
ed, err := blocks.WrappedExecutionPayloadCapella(e.ExecutionPayloadCapella, math.Gwei(e.BlockValue))
if err != nil {
return nil, nil, false, err
}

View File

@@ -89,6 +89,16 @@ func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finaliz
return nil
}
// getVotingSource returns the voting source if we were to make an FFG vote for this node in the currentEpoch
func (n *Node) getVotingSource(currentEpoch primitives.Epoch) primitives.Epoch {
// If the block is from a past epoch, then return the unrealized justified epoch
if slots.ToEpoch(n.slot) < currentEpoch {
return n.unrealizedJustifiedEpoch
}
// Otherwise return the justified epoch
return n.justifiedEpoch
}
// viableForHead returns true if the node is viable to head.
// Any node with different finalized or justified epoch than
// the ones in fork choice store should not be viable to head.
@@ -96,10 +106,8 @@ func (n *Node) viableForHead(justifiedEpoch, currentEpoch primitives.Epoch) bool
if justifiedEpoch == 0 {
return true
}
// We use n.justifiedEpoch as the voting source because:
// 1. if this node is from current epoch, n.justifiedEpoch is the realized justification epoch.
// 2. if this node is from a previous epoch, n.justifiedEpoch has already been updated to the unrealized justification epoch.
return n.justifiedEpoch == justifiedEpoch || n.justifiedEpoch+2 >= currentEpoch
votingSource := n.getVotingSource(currentEpoch)
return votingSource == justifiedEpoch || votingSource+2 >= currentEpoch
}
func (n *Node) leadsToViableHead(justifiedEpoch, currentEpoch primitives.Epoch) bool {

View File

@@ -144,11 +144,15 @@ func TestNode_ViableForHead(t *testing.T) {
}{
{&Node{}, 0, true},
{&Node{}, 1, false},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1}, 1, true},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1}, 2, false},
{&Node{finalizedEpoch: 1, justifiedEpoch: 2}, 3, false},
{&Node{finalizedEpoch: 1, justifiedEpoch: 2}, 4, false},
{&Node{finalizedEpoch: 1, justifiedEpoch: 3}, 4, true},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1, unrealizedJustifiedEpoch: 1}, 1, true},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1, unrealizedJustifiedEpoch: 1}, 2, false},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1, unrealizedJustifiedEpoch: 2}, 2, true},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1, unrealizedJustifiedEpoch: 2}, 3, false},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1, unrealizedJustifiedEpoch: 3}, 3, true},
{&Node{finalizedEpoch: 1, justifiedEpoch: 2, unrealizedJustifiedEpoch: 3}, 3, true},
{&Node{finalizedEpoch: 1, justifiedEpoch: 2, unrealizedJustifiedEpoch: 2}, 4, false},
{&Node{finalizedEpoch: 1, justifiedEpoch: 2, unrealizedJustifiedEpoch: 3}, 4, true},
{&Node{finalizedEpoch: 1, justifiedEpoch: 3, unrealizedJustifiedEpoch: 4}, 4, true},
}
for _, tc := range tests {
got := tc.n.viableForHead(tc.justifiedEpoch, 5)

View File

@@ -40,7 +40,6 @@ go_library(
"//beacon-chain/operations/synccommittee:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/p2p/peers:go_default_library",
"//beacon-chain/rpc:go_default_library",
"//beacon-chain/slasher:go_default_library",
"//beacon-chain/startup:go_default_library",
@@ -48,7 +47,6 @@ go_library(
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/sync:go_default_library",
"//beacon-chain/sync/backfill:go_default_library",
"//beacon-chain/sync/backfill/coverage:go_default_library",
"//beacon-chain/sync/checkpoint:go_default_library",
"//beacon-chain/sync/genesis:go_default_library",
"//beacon-chain/sync/initial-sync:go_default_library",

View File

@@ -42,7 +42,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/slasher"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
@@ -50,7 +49,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
regularsync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/backfill"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/backfill/coverage"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/checkpoint"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/genesis"
initialsync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync"
@@ -115,12 +113,10 @@ type BeaconNode struct {
CheckpointInitializer checkpoint.Initializer
forkChoicer forkchoice.ForkChoicer
clockWaiter startup.ClockWaiter
BackfillOpts []backfill.ServiceOption
initialSyncComplete chan struct{}
BlobStorage *filesystem.BlobStorage
blobRetentionEpochs primitives.Epoch
verifyInitWaiter *verification.InitializerWaiter
syncChecker *initialsync.SyncChecker
}
// New creates a new node instance, sets up configuration options, and registers
@@ -193,7 +189,6 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
}
beacon.initialSyncComplete = make(chan struct{})
beacon.syncChecker = &initialsync.SyncChecker{}
for _, opt := range opts {
if err := opt(beacon); err != nil {
return nil, err
@@ -220,23 +215,10 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
return nil, err
}
log.Debugln("Registering P2P Service")
if err := beacon.registerP2P(cliCtx); err != nil {
return nil, err
}
bfs, err := backfill.NewUpdater(ctx, beacon.db)
if err != nil {
bfs := backfill.NewStatus(beacon.db)
if err := bfs.Reload(ctx); err != nil {
return nil, errors.Wrap(err, "backfill status initialization error")
}
pa := peers.NewAssigner(beacon.fetchP2P().Peers(), beacon.forkChoicer)
bf, err := backfill.NewService(ctx, bfs, beacon.clockWaiter, beacon.fetchP2P(), pa, beacon.BackfillOpts...)
if err != nil {
return nil, errors.Wrap(err, "error initializing backfill service")
}
if err := beacon.services.RegisterService(bf); err != nil {
return nil, errors.Wrap(err, "error registering backfill service")
}
log.Debugln("Starting State Gen")
if err := beacon.startStateGen(ctx, bfs, beacon.forkChoicer); err != nil {
@@ -251,6 +233,11 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
beacon.verifyInitWaiter = verification.NewInitializerWaiter(
beacon.clockWaiter, forkchoice.NewROForkChoice(beacon.forkChoicer), beacon.stateGen)
log.Debugln("Registering P2P Service")
if err := beacon.registerP2P(cliCtx); err != nil {
return nil, err
}
log.Debugln("Registering POW Chain Service")
if err := beacon.registerPOWChainService(); err != nil {
return nil, err
@@ -547,8 +534,8 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
return nil
}
func (b *BeaconNode) startStateGen(ctx context.Context, bfs coverage.AvailableBlocker, fc forkchoice.ForkChoicer) error {
opts := []stategen.Option{stategen.WithAvailableBlocker(bfs)}
func (b *BeaconNode) startStateGen(ctx context.Context, bfs *backfill.Status, fc forkchoice.ForkChoicer) error {
opts := []stategen.Option{stategen.WithBackfillStatus(bfs)}
sg := stategen.New(b.db, fc, opts...)
cp, err := b.db.FinalizedCheckpoint(ctx)
@@ -676,7 +663,6 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
blockchain.WithBlobStorage(b.BlobStorage),
blockchain.WithTrackedValidatorsCache(b.trackedValidatorsCache),
blockchain.WithPayloadIDCache(b.payloadIDCache),
blockchain.WithSyncChecker(b.syncChecker),
)
blockchainService, err := blockchain.NewService(b.ctx, opts...)
@@ -770,7 +756,6 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
opts := []initialsync.Option{
initialsync.WithVerifierWaiter(b.verifyInitWaiter),
initialsync.WithSyncChecker(b.syncChecker),
}
is := initialsync.NewService(b.ctx, &initialsync.Config{
DB: b.db,
@@ -896,6 +881,7 @@ func (b *BeaconNode) registerRPCService(router *mux.Router) error {
ExitPool: b.exitPool,
SlashingsPool: b.slashingsPool,
BLSChangesPool: b.blsToExecPool,
SlashingChecker: slasherService,
SyncCommitteeObjectPool: b.syncCommitteePool,
ExecutionChainService: web3Service,
ExecutionChainInfoFetcher: web3Service,

View File

@@ -27,7 +27,6 @@ import (
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime"
"github.com/prysmaticlabs/prysm/v4/runtime/interop"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
logTest "github.com/sirupsen/logrus/hooks/test"
"github.com/urfave/cli/v2"
@@ -92,30 +91,6 @@ func TestNodeStart_Ok(t *testing.T) {
require.LogsContain(t, hook, "Starting beacon node")
}
func TestNodeStart_SyncChecker(t *testing.T) {
hook := logTest.NewGlobal()
app := cli.App{}
tmp := fmt.Sprintf("%s/datadirtest2", t.TempDir())
set := flag.NewFlagSet("test", 0)
set.String("datadir", tmp, "node data directory")
set.String("suggested-fee-recipient", "0x6e35733c5af9B61374A128e6F85f553aF09ff89A", "fee recipient")
require.NoError(t, set.Set("suggested-fee-recipient", "0x6e35733c5af9B61374A128e6F85f553aF09ff89A"))
ctx, cancel := newCliContextWithCancel(&app, set)
node, err := New(ctx, cancel, WithBlockchainFlagOptions([]blockchain.Option{}),
WithBuilderFlagOptions([]builder.Option{}),
WithExecutionChainOptions([]execution.Option{}),
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)))
require.NoError(t, err)
go func() {
node.Start()
}()
time.Sleep(3 * time.Second)
assert.NotNil(t, node.syncChecker.Svc)
node.Close()
require.LogsContain(t, hook, "Starting beacon node")
}
func TestNodeStart_Ok_registerDeterministicGenesisService(t *testing.T) {
numValidators := uint64(1)
hook := logTest.NewGlobal()

View File

@@ -83,7 +83,7 @@ func postAltairMsgID(pmsg *pubsubpb.Message, fEpoch primitives.Epoch) string {
decodedData, err := encoder.DecodeSnappy(pmsg.Data, gossipPubSubSize)
if err != nil {
totalLength, err := math.AddInt(
len(params.BeaconConfig().MessageDomainInvalidSnappy),
len(params.BeaconConfig().MessageDomainValidSnappy),
len(topicLenBytes),
topicLen,
len(pmsg.Data),

View File

@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"assigner.go",
"log.go",
"status.go",
],
@@ -13,10 +12,8 @@ go_library(
"//cmd:__subpackages__",
],
deps = [
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/p2p/peers/peerdata:go_default_library",
"//beacon-chain/p2p/peers/scorers:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
@@ -31,7 +28,6 @@ go_library(
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_multiformats_go_multiaddr//:go_default_library",
"@com_github_multiformats_go_multiaddr//net:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
@@ -40,7 +36,6 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"assigner_test.go",
"benchmark_test.go",
"peers_test.go",
"status_test.go",

View File

@@ -1,78 +0,0 @@
package peers
import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/sirupsen/logrus"
)
// FinalizedCheckpointer describes the minimum capability that Assigner needs from forkchoice.
// That is, the ability to retrieve the latest finalized checkpoint to help with peer evaluation.
type FinalizedCheckpointer interface {
FinalizedCheckpoint() *forkchoicetypes.Checkpoint
}
// NewAssigner assists in the correct construction of an Assigner by code in other packages,
// assuring all the important private member fields are given values.
// The FinalizedCheckpointer is used to retrieve the latest finalized checkpoint each time peers are requested.
// Peers that report an older finalized checkpoint are filtered out.
func NewAssigner(s *Status, fc FinalizedCheckpointer) *Assigner {
return &Assigner{
ps: s,
fc: fc,
}
}
// Assigner uses the "BestFinalized" peer scoring method to pick the next-best peer to receive rpc requests.
type Assigner struct {
ps *Status
fc FinalizedCheckpointer
}
// ErrInsufficientSuitable is a sentinel error, signaling that a peer couldn't be assigned because there are currently
// not enough peers that match our selection criteria to serve rpc requests. It is the responsibility of the caller to
// look for this error and continue to try calling Assign with appropriate backoff logic.
var ErrInsufficientSuitable = errors.New("no suitable peers")
func (a *Assigner) freshPeers() ([]peer.ID, error) {
required := params.BeaconConfig().MaxPeersToSync
if flags.Get().MinimumSyncPeers < required {
required = flags.Get().MinimumSyncPeers
}
_, peers := a.ps.BestFinalized(params.BeaconConfig().MaxPeersToSync, a.fc.FinalizedCheckpoint().Epoch)
if len(peers) < required {
log.WithFields(logrus.Fields{
"suitable": len(peers),
"required": required}).Warn("Unable to assign peer while suitable peers < required ")
return nil, ErrInsufficientSuitable
}
return peers, nil
}
// Assign uses the "BestFinalized" method to select the best peers that agree on a canonical block
// for the configured finalized epoch. At most `n` peers will be returned. The `busy` param can be used
// to filter out peers that we know we don't want to connect to, for instance if we are trying to limit
// the number of outbound requests to each peer from a given component.
func (a *Assigner) Assign(busy map[peer.ID]bool, n int) ([]peer.ID, error) {
best, err := a.freshPeers()
if err != nil {
return nil, err
}
return pickBest(busy, n, best), nil
}
func pickBest(busy map[peer.ID]bool, n int, best []peer.ID) []peer.ID {
ps := make([]peer.ID, 0, n)
for _, p := range best {
if len(ps) == n {
return ps
}
if !busy[p] {
ps = append(ps, p)
}
}
return ps
}

View File

@@ -1,114 +0,0 @@
package peers
import (
"fmt"
"testing"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func TestPickBest(t *testing.T) {
best := testPeerIds(10)
cases := []struct {
name string
busy map[peer.ID]bool
n int
best []peer.ID
expected []peer.ID
}{
{
name: "",
n: 0,
},
{
name: "none busy",
n: 1,
expected: best[0:1],
},
{
name: "all busy except last",
n: 1,
busy: testBusyMap(best[0 : len(best)-1]),
expected: best[len(best)-1:],
},
{
name: "all busy except i=5",
n: 1,
busy: testBusyMap(append(append([]peer.ID{}, best[0:5]...), best[6:]...)),
expected: []peer.ID{best[5]},
},
{
name: "all busy - 0 results",
n: 1,
busy: testBusyMap(best),
},
{
name: "first half busy",
n: 5,
busy: testBusyMap(best[0:5]),
expected: best[5:],
},
{
name: "back half busy",
n: 5,
busy: testBusyMap(best[5:]),
expected: best[0:5],
},
{
name: "pick all ",
n: 10,
expected: best,
},
{
name: "none available",
n: 10,
best: []peer.ID{},
},
{
name: "not enough",
n: 10,
best: best[0:1],
expected: best[0:1],
},
{
name: "not enough, some busy",
n: 10,
best: best[0:6],
busy: testBusyMap(best[0:5]),
expected: best[5:6],
},
}
for _, c := range cases {
name := fmt.Sprintf("n=%d", c.n)
if c.name != "" {
name += " " + c.name
}
t.Run(name, func(t *testing.T) {
if c.best == nil {
c.best = best
}
pb := pickBest(c.busy, c.n, c.best)
require.Equal(t, len(c.expected), len(pb))
for i := range c.expected {
require.Equal(t, c.expected[i], pb[i])
}
})
}
}
func testBusyMap(b []peer.ID) map[peer.ID]bool {
m := make(map[peer.ID]bool)
for i := range b {
m[b[i]] = true
}
return m
}
func testPeerIds(n int) []peer.ID {
pids := make([]peer.ID, n)
for i := range pids {
pids[i] = peer.ID(fmt.Sprintf("%d", i))
}
return pids
}

View File

@@ -44,6 +44,7 @@ go_library(
"//beacon-chain/rpc/prysm/v1alpha1/node:go_default_library",
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
"//beacon-chain/rpc/prysm/validator:go_default_library",
"//beacon-chain/slasher:go_default_library",
"//beacon-chain/startup:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/sync:go_default_library",

View File

@@ -9,15 +9,14 @@ go_library(
"handlers_validator.go",
"log.go",
"server.go",
"structs.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/beacon",
visibility = ["//visibility:public"],
deps = [
"//api:go_default_library",
"//api/server:go_default_library",
"//api/server/structs:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/feed:go_default_library",
@@ -76,9 +75,7 @@ go_test(
deps = [
"//api:go_default_library",
"//api/server:go_default_library",
"//api/server/structs:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",
@@ -93,6 +90,7 @@ go_test(
"//beacon-chain/operations/voluntaryexits/mock:go_default_library",
"//beacon-chain/p2p/testing:go_default_library",
"//beacon-chain/rpc/core:go_default_library",
"//beacon-chain/rpc/eth/shared:go_default_library",
"//beacon-chain/rpc/eth/shared/testing:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",
"//beacon-chain/rpc/testutil:go_default_library",

View File

@@ -14,8 +14,6 @@ import (
"github.com/gorilla/mux"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api"
"github.com/prysmaticlabs/prysm/v4/api/server/structs"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositsnapshot"
corehelpers "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filters"
@@ -78,7 +76,7 @@ func (s *Server) getBlock(ctx context.Context, w http.ResponseWriter, blk interf
httputil.HandleError(w, "Could not get block: "+err.Error(), http.StatusInternalServerError)
return
}
resp := &structs.GetBlockResponse{Data: v2Resp.Data}
resp := &GetBlockResponse{Data: v2Resp.Data}
httputil.WriteJson(w, resp)
}
@@ -123,7 +121,7 @@ func (s *Server) getBlockV2(ctx context.Context, w http.ResponseWriter, blk inte
}
finalized := s.FinalizationFetcher.IsFinalized(ctx, blkRoot)
getBlockHandler := func(get func(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (*structs.GetBlockV2Response, error)) handled {
getBlockHandler := func(get func(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (*GetBlockV2Response, error)) handled {
result, err := get(ctx, blk)
if result != nil {
result.Finalized = finalized
@@ -223,7 +221,7 @@ func (s *Server) getBlindedBlock(ctx context.Context, w http.ResponseWriter, blk
}
finalized := s.FinalizationFetcher.IsFinalized(ctx, blkRoot)
getBlockHandler := func(get func(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (*structs.GetBlockV2Response, error)) handled {
getBlockHandler := func(get func(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (*GetBlockV2Response, error)) handled {
result, err := get(ctx, blk)
if result != nil {
result.Finalized = finalized
@@ -292,7 +290,7 @@ func (s *Server) getBlindedBlockSSZ(ctx context.Context, w http.ResponseWriter,
httputil.HandleError(w, fmt.Sprintf("Unknown block type %T", blk), http.StatusInternalServerError)
}
func (*Server) getBlockPhase0(_ context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*structs.GetBlockV2Response, error) {
func (*Server) getBlockPhase0(_ context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*GetBlockV2Response, error) {
consensusBlk, err := blk.PbPhase0Block()
if err != nil {
return nil, err
@@ -300,22 +298,22 @@ func (*Server) getBlockPhase0(_ context.Context, blk interfaces.ReadOnlySignedBe
if consensusBlk == nil {
return nil, errNilBlock
}
respBlk := structs.SignedBeaconBlockFromConsensus(consensusBlk)
respBlk := shared.SignedBeaconBlockFromConsensus(consensusBlk)
jsonBytes, err := json.Marshal(respBlk.Message)
if err != nil {
return nil, err
}
return &structs.GetBlockV2Response{
return &GetBlockV2Response{
Version: version.String(version.Phase0),
ExecutionOptimistic: false,
Data: &structs.SignedBlock{
Data: &SignedBlock{
Message: jsonBytes,
Signature: respBlk.Signature,
},
}, nil
}
func (*Server) getBlockAltair(_ context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*structs.GetBlockV2Response, error) {
func (*Server) getBlockAltair(_ context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*GetBlockV2Response, error) {
consensusBlk, err := blk.PbAltairBlock()
if err != nil {
return nil, err
@@ -323,22 +321,22 @@ func (*Server) getBlockAltair(_ context.Context, blk interfaces.ReadOnlySignedBe
if consensusBlk == nil {
return nil, errNilBlock
}
respBlk := structs.SignedBeaconBlockAltairFromConsensus(consensusBlk)
respBlk := shared.SignedBeaconBlockAltairFromConsensus(consensusBlk)
jsonBytes, err := json.Marshal(respBlk.Message)
if err != nil {
return nil, err
}
return &structs.GetBlockV2Response{
return &GetBlockV2Response{
Version: version.String(version.Altair),
ExecutionOptimistic: false,
Data: &structs.SignedBlock{
Data: &SignedBlock{
Message: jsonBytes,
Signature: respBlk.Signature,
},
}, nil
}
func (s *Server) getBlockBellatrix(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*structs.GetBlockV2Response, error) {
func (s *Server) getBlockBellatrix(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*GetBlockV2Response, error) {
consensusBlk, err := blk.PbBellatrixBlock()
if err != nil {
// ErrUnsupportedField means that we have another block type
@@ -374,7 +372,7 @@ func (s *Server) getBlockBellatrix(ctx context.Context, blk interfaces.ReadOnlyS
if err != nil {
return nil, errors.Wrapf(err, "could not check if block is optimistic")
}
respBlk, err := structs.SignedBeaconBlockBellatrixFromConsensus(consensusBlk)
respBlk, err := shared.SignedBeaconBlockBellatrixFromConsensus(consensusBlk)
if err != nil {
return nil, err
}
@@ -382,17 +380,17 @@ func (s *Server) getBlockBellatrix(ctx context.Context, blk interfaces.ReadOnlyS
if err != nil {
return nil, err
}
return &structs.GetBlockV2Response{
return &GetBlockV2Response{
Version: version.String(version.Bellatrix),
ExecutionOptimistic: isOptimistic,
Data: &structs.SignedBlock{
Data: &SignedBlock{
Message: jsonBytes,
Signature: respBlk.Signature,
},
}, nil
}
func (s *Server) getBlockCapella(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*structs.GetBlockV2Response, error) {
func (s *Server) getBlockCapella(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*GetBlockV2Response, error) {
consensusBlk, err := blk.PbCapellaBlock()
if err != nil {
// ErrUnsupportedField means that we have another block type
@@ -428,7 +426,7 @@ func (s *Server) getBlockCapella(ctx context.Context, blk interfaces.ReadOnlySig
if err != nil {
return nil, errors.Wrapf(err, "could not check if block is optimistic")
}
respBlk, err := structs.SignedBeaconBlockCapellaFromConsensus(consensusBlk)
respBlk, err := shared.SignedBeaconBlockCapellaFromConsensus(consensusBlk)
if err != nil {
return nil, err
}
@@ -436,17 +434,17 @@ func (s *Server) getBlockCapella(ctx context.Context, blk interfaces.ReadOnlySig
if err != nil {
return nil, err
}
return &structs.GetBlockV2Response{
return &GetBlockV2Response{
Version: version.String(version.Capella),
ExecutionOptimistic: isOptimistic,
Data: &structs.SignedBlock{
Data: &SignedBlock{
Message: jsonBytes,
Signature: respBlk.Signature,
},
}, nil
}
func (s *Server) getBlockDeneb(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*structs.GetBlockV2Response, error) {
func (s *Server) getBlockDeneb(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*GetBlockV2Response, error) {
consensusBlk, err := blk.PbDenebBlock()
if err != nil {
// ErrUnsupportedGetter means that we have another block type
@@ -482,7 +480,7 @@ func (s *Server) getBlockDeneb(ctx context.Context, blk interfaces.ReadOnlySigne
if err != nil {
return nil, errors.Wrapf(err, "could not check if block is optimistic")
}
respBlk, err := structs.SignedBeaconBlockDenebFromConsensus(consensusBlk)
respBlk, err := shared.SignedBeaconBlockDenebFromConsensus(consensusBlk)
if err != nil {
return nil, err
}
@@ -490,10 +488,10 @@ func (s *Server) getBlockDeneb(ctx context.Context, blk interfaces.ReadOnlySigne
if err != nil {
return nil, err
}
return &structs.GetBlockV2Response{
return &GetBlockV2Response{
Version: version.String(version.Deneb),
ExecutionOptimistic: isOptimistic,
Data: &structs.SignedBlock{
Data: &SignedBlock{
Message: jsonBytes,
Signature: respBlk.Signature,
},
@@ -635,7 +633,7 @@ func (s *Server) getBlockDenebSSZ(ctx context.Context, blk interfaces.ReadOnlySi
return sszData, nil
}
func (s *Server) getBlindedBlockBellatrix(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*structs.GetBlockV2Response, error) {
func (s *Server) getBlindedBlockBellatrix(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*GetBlockV2Response, error) {
blindedConsensusBlk, err := blk.PbBlindedBellatrixBlock()
if err != nil {
// ErrUnsupportedField means that we have another block type
@@ -671,7 +669,7 @@ func (s *Server) getBlindedBlockBellatrix(ctx context.Context, blk interfaces.Re
if err != nil {
return nil, errors.Wrapf(err, "could not check if block is optimistic")
}
respBlk, err := structs.SignedBlindedBeaconBlockBellatrixFromConsensus(blindedConsensusBlk)
respBlk, err := shared.SignedBlindedBeaconBlockBellatrixFromConsensus(blindedConsensusBlk)
if err != nil {
return nil, err
}
@@ -679,17 +677,17 @@ func (s *Server) getBlindedBlockBellatrix(ctx context.Context, blk interfaces.Re
if err != nil {
return nil, err
}
return &structs.GetBlockV2Response{
return &GetBlockV2Response{
Version: version.String(version.Bellatrix),
ExecutionOptimistic: isOptimistic,
Data: &structs.SignedBlock{
Data: &SignedBlock{
Message: jsonBytes,
Signature: respBlk.Signature,
},
}, nil
}
func (s *Server) getBlindedBlockCapella(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*structs.GetBlockV2Response, error) {
func (s *Server) getBlindedBlockCapella(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*GetBlockV2Response, error) {
blindedConsensusBlk, err := blk.PbBlindedCapellaBlock()
if err != nil {
// ErrUnsupportedField means that we have another block type
@@ -725,7 +723,7 @@ func (s *Server) getBlindedBlockCapella(ctx context.Context, blk interfaces.Read
if err != nil {
return nil, errors.Wrapf(err, "could not check if block is optimistic")
}
respBlk, err := structs.SignedBlindedBeaconBlockCapellaFromConsensus(blindedConsensusBlk)
respBlk, err := shared.SignedBlindedBeaconBlockCapellaFromConsensus(blindedConsensusBlk)
if err != nil {
return nil, err
}
@@ -733,17 +731,17 @@ func (s *Server) getBlindedBlockCapella(ctx context.Context, blk interfaces.Read
if err != nil {
return nil, err
}
return &structs.GetBlockV2Response{
return &GetBlockV2Response{
Version: version.String(version.Capella),
ExecutionOptimistic: isOptimistic,
Data: &structs.SignedBlock{
Data: &SignedBlock{
Message: jsonBytes,
Signature: respBlk.Signature,
},
}, nil
}
func (s *Server) getBlindedBlockDeneb(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*structs.GetBlockV2Response, error) {
func (s *Server) getBlindedBlockDeneb(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*GetBlockV2Response, error) {
blindedConsensusBlk, err := blk.PbBlindedDenebBlock()
if err != nil {
// ErrUnsupportedGetter means that we have another block type
@@ -779,7 +777,7 @@ func (s *Server) getBlindedBlockDeneb(ctx context.Context, blk interfaces.ReadOn
if err != nil {
return nil, errors.Wrapf(err, "could not check if block is optimistic")
}
respBlk, err := structs.SignedBlindedBeaconBlockDenebFromConsensus(blindedConsensusBlk)
respBlk, err := shared.SignedBlindedBeaconBlockDenebFromConsensus(blindedConsensusBlk)
if err != nil {
return nil, err
}
@@ -787,10 +785,10 @@ func (s *Server) getBlindedBlockDeneb(ctx context.Context, blk interfaces.ReadOn
if err != nil {
return nil, err
}
return &structs.GetBlockV2Response{
return &GetBlockV2Response{
Version: version.String(version.Deneb),
ExecutionOptimistic: isOptimistic,
Data: &structs.SignedBlock{
Data: &SignedBlock{
Message: jsonBytes,
Signature: respBlk.Signature,
},
@@ -918,9 +916,9 @@ func (s *Server) GetBlockAttestations(w http.ResponseWriter, r *http.Request) {
}
consensusAtts := blk.Block().Body().Attestations()
atts := make([]*structs.Attestation, len(consensusAtts))
atts := make([]*shared.Attestation, len(consensusAtts))
for i, att := range consensusAtts {
atts[i] = structs.AttFromConsensus(att)
atts[i] = shared.AttFromConsensus(att)
}
root, err := blk.Block().HashTreeRoot()
if err != nil {
@@ -933,7 +931,7 @@ func (s *Server) GetBlockAttestations(w http.ResponseWriter, r *http.Request) {
return
}
resp := &structs.GetBlockAttestationsResponse{
resp := &GetBlockAttestationsResponse{
Data: atts,
ExecutionOptimistic: isOptimistic,
Finalized: s.FinalizationFetcher.IsFinalized(ctx, root),
@@ -1128,7 +1126,7 @@ func (s *Server) publishBlindedBlock(ctx context.Context, w http.ResponseWriter,
var consensusBlock *eth.GenericSignedBeaconBlock
var denebBlock *structs.SignedBlindedBeaconBlockDeneb
var denebBlock *shared.SignedBlindedBeaconBlockDeneb
if err = unmarshalStrict(body, &denebBlock); err == nil {
consensusBlock, err = denebBlock.ToGeneric()
if err == nil {
@@ -1149,7 +1147,7 @@ func (s *Server) publishBlindedBlock(ctx context.Context, w http.ResponseWriter,
return
}
var capellaBlock *structs.SignedBlindedBeaconBlockCapella
var capellaBlock *shared.SignedBlindedBeaconBlockCapella
if err = unmarshalStrict(body, &capellaBlock); err == nil {
consensusBlock, err = capellaBlock.ToGeneric()
if err == nil {
@@ -1170,7 +1168,7 @@ func (s *Server) publishBlindedBlock(ctx context.Context, w http.ResponseWriter,
return
}
var bellatrixBlock *structs.SignedBlindedBeaconBlockBellatrix
var bellatrixBlock *shared.SignedBlindedBeaconBlockBellatrix
if err = unmarshalStrict(body, &bellatrixBlock); err == nil {
consensusBlock, err = bellatrixBlock.ToGeneric()
if err == nil {
@@ -1191,7 +1189,7 @@ func (s *Server) publishBlindedBlock(ctx context.Context, w http.ResponseWriter,
return
}
var altairBlock *structs.SignedBeaconBlockAltair
var altairBlock *shared.SignedBeaconBlockAltair
if err = unmarshalStrict(body, &altairBlock); err == nil {
consensusBlock, err = altairBlock.ToGeneric()
if err == nil {
@@ -1212,7 +1210,7 @@ func (s *Server) publishBlindedBlock(ctx context.Context, w http.ResponseWriter,
return
}
var phase0Block *structs.SignedBeaconBlock
var phase0Block *shared.SignedBeaconBlock
if err = unmarshalStrict(body, &phase0Block); err == nil {
consensusBlock, err = phase0Block.ToGeneric()
if err == nil {
@@ -1423,7 +1421,7 @@ func (s *Server) publishBlock(ctx context.Context, w http.ResponseWriter, r *htt
var consensusBlock *eth.GenericSignedBeaconBlock
var denebBlockContents *structs.SignedBeaconBlockContentsDeneb
var denebBlockContents *shared.SignedBeaconBlockContentsDeneb
if err = unmarshalStrict(body, &denebBlockContents); err == nil {
consensusBlock, err = denebBlockContents.ToGeneric()
if err == nil {
@@ -1444,7 +1442,7 @@ func (s *Server) publishBlock(ctx context.Context, w http.ResponseWriter, r *htt
return
}
var capellaBlock *structs.SignedBeaconBlockCapella
var capellaBlock *shared.SignedBeaconBlockCapella
if err = unmarshalStrict(body, &capellaBlock); err == nil {
consensusBlock, err = capellaBlock.ToGeneric()
if err == nil {
@@ -1465,7 +1463,7 @@ func (s *Server) publishBlock(ctx context.Context, w http.ResponseWriter, r *htt
return
}
var bellatrixBlock *structs.SignedBeaconBlockBellatrix
var bellatrixBlock *shared.SignedBeaconBlockBellatrix
if err = unmarshalStrict(body, &bellatrixBlock); err == nil {
consensusBlock, err = bellatrixBlock.ToGeneric()
if err == nil {
@@ -1486,7 +1484,7 @@ func (s *Server) publishBlock(ctx context.Context, w http.ResponseWriter, r *htt
return
}
var altairBlock *structs.SignedBeaconBlockAltair
var altairBlock *shared.SignedBeaconBlockAltair
if err = unmarshalStrict(body, &altairBlock); err == nil {
consensusBlock, err = altairBlock.ToGeneric()
if err == nil {
@@ -1507,7 +1505,7 @@ func (s *Server) publishBlock(ctx context.Context, w http.ResponseWriter, r *htt
return
}
var phase0Block *structs.SignedBeaconBlock
var phase0Block *shared.SignedBeaconBlock
if err = unmarshalStrict(body, &phase0Block); err == nil {
consensusBlock, err = phase0Block.ToGeneric()
if err == nil {
@@ -1702,8 +1700,8 @@ func (s *Server) GetBlockRoot(w http.ResponseWriter, r *http.Request) {
httputil.HandleError(w, "Could not check if block is optimistic: "+err.Error(), http.StatusInternalServerError)
return
}
response := &structs.BlockRootResponse{
Data: &structs.BlockRoot{
response := &BlockRootResponse{
Data: &BlockRoot{
Root: hexutil.Encode(root),
},
ExecutionOptimistic: isOptimistic,
@@ -1738,8 +1736,8 @@ func (s *Server) GetStateFork(w http.ResponseWriter, r *http.Request) {
return
}
isFinalized := s.FinalizationFetcher.IsFinalized(ctx, blockRoot)
response := &structs.GetStateForkResponse{
Data: &structs.Fork{
response := &GetStateForkResponse{
Data: &shared.Fork{
PreviousVersion: hexutil.Encode(fork.PreviousVersion),
CurrentVersion: hexutil.Encode(fork.CurrentVersion),
Epoch: fmt.Sprintf("%d", fork.Epoch),
@@ -1802,7 +1800,7 @@ func (s *Server) GetCommittees(w http.ResponseWriter, r *http.Request) {
return
}
committeesPerSlot := corehelpers.SlotCommitteeCount(activeCount)
committees := make([]*structs.Committee, 0)
committees := make([]*shared.Committee, 0)
for slot := startSlot; slot <= endSlot; slot++ {
if rawSlot != "" && slot != primitives.Slot(sl) {
continue
@@ -1820,7 +1818,7 @@ func (s *Server) GetCommittees(w http.ResponseWriter, r *http.Request) {
for _, v := range committee {
validators = append(validators, strconv.FormatUint(uint64(v), 10))
}
committeeContainer := &structs.Committee{
committeeContainer := &shared.Committee{
Index: strconv.FormatUint(uint64(index), 10),
Slot: strconv.FormatUint(uint64(slot), 10),
Validators: validators,
@@ -1841,7 +1839,7 @@ func (s *Server) GetCommittees(w http.ResponseWriter, r *http.Request) {
return
}
isFinalized := s.FinalizationFetcher.IsFinalized(ctx, blockRoot)
httputil.WriteJson(w, &structs.GetCommitteesResponse{Data: committees, ExecutionOptimistic: isOptimistic, Finalized: isFinalized})
httputil.WriteJson(w, &GetCommitteesResponse{Data: committees, ExecutionOptimistic: isOptimistic, Finalized: isFinalized})
}
// GetBlockHeaders retrieves block headers matching given query. By default it will fetch current head slot blocks.
@@ -1891,7 +1889,7 @@ func (s *Server) GetBlockHeaders(w http.ResponseWriter, r *http.Request) {
isOptimistic := false
isFinalized := true
blkHdrs := make([]*structs.SignedBeaconBlockHeaderContainer, len(blks))
blkHdrs := make([]*shared.SignedBeaconBlockHeaderContainer, len(blks))
for i, bl := range blks {
v1alpha1Header, err := bl.Header()
if err != nil {
@@ -1918,9 +1916,9 @@ func (s *Server) GetBlockHeaders(w http.ResponseWriter, r *http.Request) {
if isFinalized {
isFinalized = s.FinalizationFetcher.IsFinalized(ctx, blkRoots[i])
}
blkHdrs[i] = &structs.SignedBeaconBlockHeaderContainer{
Header: &structs.SignedBeaconBlockHeader{
Message: structs.BeaconBlockHeaderFromConsensus(v1alpha1Header.Header),
blkHdrs[i] = &shared.SignedBeaconBlockHeaderContainer{
Header: &shared.SignedBeaconBlockHeader{
Message: shared.BeaconBlockHeaderFromConsensus(v1alpha1Header.Header),
Signature: hexutil.Encode(v1alpha1Header.Signature),
},
Root: hexutil.Encode(headerRoot[:]),
@@ -1928,7 +1926,7 @@ func (s *Server) GetBlockHeaders(w http.ResponseWriter, r *http.Request) {
}
}
response := &structs.GetBlockHeadersResponse{
response := &GetBlockHeadersResponse{
Data: blkHdrs,
ExecutionOptimistic: isOptimistic,
Finalized: isFinalized,
@@ -1978,12 +1976,12 @@ func (s *Server) GetBlockHeader(w http.ResponseWriter, r *http.Request) {
return
}
resp := &structs.GetBlockHeaderResponse{
Data: &structs.SignedBeaconBlockHeaderContainer{
resp := &GetBlockHeaderResponse{
Data: &shared.SignedBeaconBlockHeaderContainer{
Root: hexutil.Encode(headerRoot[:]),
Canonical: canonical,
Header: &structs.SignedBeaconBlockHeader{
Message: structs.BeaconBlockHeaderFromConsensus(blockHeader.Header),
Header: &shared.SignedBeaconBlockHeader{
Message: shared.BeaconBlockHeaderFromConsensus(blockHeader.Header),
Signature: hexutil.Encode(blockHeader.Signature),
},
},
@@ -2025,17 +2023,17 @@ func (s *Server) GetFinalityCheckpoints(w http.ResponseWriter, r *http.Request)
pj := st.PreviousJustifiedCheckpoint()
cj := st.CurrentJustifiedCheckpoint()
f := st.FinalizedCheckpoint()
resp := &structs.GetFinalityCheckpointsResponse{
Data: &structs.FinalityCheckpoints{
PreviousJustified: &structs.Checkpoint{
resp := &GetFinalityCheckpointsResponse{
Data: &FinalityCheckpoints{
PreviousJustified: &shared.Checkpoint{
Epoch: strconv.FormatUint(uint64(pj.Epoch), 10),
Root: hexutil.Encode(pj.Root),
},
CurrentJustified: &structs.Checkpoint{
CurrentJustified: &shared.Checkpoint{
Epoch: strconv.FormatUint(uint64(cj.Epoch), 10),
Root: hexutil.Encode(cj.Root),
},
Finalized: &structs.Checkpoint{
Finalized: &shared.Checkpoint{
Epoch: strconv.FormatUint(uint64(f.Epoch), 10),
Root: hexutil.Encode(f.Root),
},
@@ -2063,8 +2061,8 @@ func (s *Server) GetGenesis(w http.ResponseWriter, r *http.Request) {
}
forkVersion := params.BeaconConfig().GenesisForkVersion
resp := &structs.GetGenesisResponse{
Data: &structs.Genesis{
resp := &GetGenesisResponse{
Data: &Genesis{
GenesisTime: strconv.FormatUint(uint64(genesisTime.Unix()), 10),
GenesisValidatorsRoot: hexutil.Encode(validatorsRoot[:]),
GenesisForkVersion: hexutil.Encode(forkVersion),
@@ -2072,48 +2070,3 @@ func (s *Server) GetGenesis(w http.ResponseWriter, r *http.Request) {
}
httputil.WriteJson(w, resp)
}
// GetDepositSnapshot retrieves the EIP-4881 Deposit Tree Snapshot. Either a JSON or,
// if the Accept header was added, bytes serialized by SSZ will be returned.
func (s *Server) GetDepositSnapshot(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "beacon.GetDepositSnapshot")
defer span.End()
if s.BeaconDB == nil {
httputil.HandleError(w, "Could not retrieve beaconDB", http.StatusInternalServerError)
return
}
eth1data, err := s.BeaconDB.ExecutionChainData(ctx)
if err != nil {
httputil.HandleError(w, "Could not retrieve execution chain data: "+err.Error(), http.StatusInternalServerError)
return
}
if eth1data == nil {
httputil.HandleError(w, "Could not retrieve execution chain data: empty Eth1Data", http.StatusInternalServerError)
return
}
snapshot := eth1data.DepositSnapshot
if snapshot == nil || len(snapshot.Finalized) == 0 {
httputil.HandleError(w, "No Finalized Snapshot Available", http.StatusNotFound)
return
}
if len(snapshot.Finalized) > depositsnapshot.DepositContractDepth {
httputil.HandleError(w, "Retrieved invalid deposit snapshot", http.StatusInternalServerError)
return
}
if httputil.RespondWithSsz(r) {
sszData, err := snapshot.MarshalSSZ()
if err != nil {
httputil.HandleError(w, "Could not marshal deposit snapshot into SSZ: "+err.Error(), http.StatusInternalServerError)
return
}
httputil.WriteSsz(w, sszData, "deposit_snapshot.ssz")
return
}
httputil.WriteJson(
w,
&structs.GetDepositSnapshotResponse{
Data: structs.DepositSnapshotFromConsensus(snapshot),
},
)
}

View File

@@ -11,7 +11,6 @@ import (
"time"
"github.com/prysmaticlabs/prysm/v4/api/server"
"github.com/prysmaticlabs/prysm/v4/api/server/structs"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
@@ -56,25 +55,25 @@ func (s *Server) ListAttestations(w http.ResponseWriter, r *http.Request) {
attestations = append(attestations, unaggAtts...)
isEmptyReq := rawSlot == "" && rawCommitteeIndex == ""
if isEmptyReq {
allAtts := make([]*structs.Attestation, len(attestations))
allAtts := make([]*shared.Attestation, len(attestations))
for i, att := range attestations {
allAtts[i] = structs.AttFromConsensus(att)
allAtts[i] = shared.AttFromConsensus(att)
}
httputil.WriteJson(w, &structs.ListAttestationsResponse{Data: allAtts})
httputil.WriteJson(w, &ListAttestationsResponse{Data: allAtts})
return
}
bothDefined := rawSlot != "" && rawCommitteeIndex != ""
filteredAtts := make([]*structs.Attestation, 0, len(attestations))
filteredAtts := make([]*shared.Attestation, 0, len(attestations))
for _, att := range attestations {
committeeIndexMatch := rawCommitteeIndex != "" && att.Data.CommitteeIndex == primitives.CommitteeIndex(committeeIndex)
slotMatch := rawSlot != "" && att.Data.Slot == primitives.Slot(slot)
shouldAppend := (bothDefined && committeeIndexMatch && slotMatch) || (!bothDefined && (committeeIndexMatch || slotMatch))
if shouldAppend {
filteredAtts = append(filteredAtts, structs.AttFromConsensus(att))
filteredAtts = append(filteredAtts, shared.AttFromConsensus(att))
}
}
httputil.WriteJson(w, &structs.ListAttestationsResponse{Data: filteredAtts})
httputil.WriteJson(w, &ListAttestationsResponse{Data: filteredAtts})
}
// SubmitAttestations submits an attestation object to node. If the attestation passes all validation
@@ -83,7 +82,7 @@ func (s *Server) SubmitAttestations(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitAttestations")
defer span.End()
var req structs.SubmitAttestationsRequest
var req SubmitAttestationsRequest
err := json.NewDecoder(r.Body).Decode(&req.Data)
switch {
case err == io.EOF:
@@ -187,12 +186,12 @@ func (s *Server) ListVoluntaryExits(w http.ResponseWriter, r *http.Request) {
httputil.HandleError(w, "Could not get exits from the pool: "+err.Error(), http.StatusInternalServerError)
return
}
exits := make([]*structs.SignedVoluntaryExit, len(sourceExits))
exits := make([]*shared.SignedVoluntaryExit, len(sourceExits))
for i, e := range sourceExits {
exits[i] = structs.SignedExitFromConsensus(e)
exits[i] = shared.SignedExitFromConsensus(e)
}
httputil.WriteJson(w, &structs.ListVoluntaryExitsResponse{Data: exits})
httputil.WriteJson(w, &ListVoluntaryExitsResponse{Data: exits})
}
// SubmitVoluntaryExit submits a SignedVoluntaryExit object to node's pool
@@ -201,7 +200,7 @@ func (s *Server) SubmitVoluntaryExit(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitVoluntaryExit")
defer span.End()
var req structs.SignedVoluntaryExit
var req shared.SignedVoluntaryExit
err := json.NewDecoder(r.Body).Decode(&req)
switch {
case err == io.EOF:
@@ -259,7 +258,7 @@ func (s *Server) SubmitSyncCommitteeSignatures(w http.ResponseWriter, r *http.Re
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitPoolSyncCommitteeSignatures")
defer span.End()
var req structs.SubmitSyncCommitteeSignaturesRequest
var req SubmitSyncCommitteeSignaturesRequest
err := json.NewDecoder(r.Body).Decode(&req.Data)
switch {
case err == io.EOF:
@@ -318,7 +317,7 @@ func (s *Server) SubmitBLSToExecutionChanges(w http.ResponseWriter, r *http.Requ
var failures []*server.IndexedVerificationFailure
var toBroadcast []*eth.SignedBLSToExecutionChange
var req []*structs.SignedBLSToExecutionChange
var req []*shared.SignedBLSToExecutionChange
err = json.NewDecoder(r.Body).Decode(&req)
switch {
case err == io.EOF:
@@ -438,8 +437,8 @@ func (s *Server) ListBLSToExecutionChanges(w http.ResponseWriter, r *http.Reques
return
}
httputil.WriteJson(w, &structs.BLSToExecutionChangesPoolResponse{
Data: structs.SignedBLSChangesFromConsensus(sourceChanges),
httputil.WriteJson(w, &BLSToExecutionChangesPoolResponse{
Data: shared.SignedBLSChangesFromConsensus(sourceChanges),
})
}
@@ -455,9 +454,9 @@ func (s *Server) GetAttesterSlashings(w http.ResponseWriter, r *http.Request) {
return
}
sourceSlashings := s.SlashingsPool.PendingAttesterSlashings(ctx, headState, true /* return unlimited slashings */)
slashings := structs.AttesterSlashingsFromConsensus(sourceSlashings)
slashings := shared.AttesterSlashingsFromConsensus(sourceSlashings)
httputil.WriteJson(w, &structs.GetAttesterSlashingsResponse{Data: slashings})
httputil.WriteJson(w, &GetAttesterSlashingsResponse{Data: slashings})
}
// SubmitAttesterSlashing submits an attester slashing object to node's pool and
@@ -466,7 +465,7 @@ func (s *Server) SubmitAttesterSlashing(w http.ResponseWriter, r *http.Request)
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitAttesterSlashing")
defer span.End()
var req structs.AttesterSlashing
var req shared.AttesterSlashing
err := json.NewDecoder(r.Body).Decode(&req)
switch {
case err == io.EOF:
@@ -529,9 +528,9 @@ func (s *Server) GetProposerSlashings(w http.ResponseWriter, r *http.Request) {
return
}
sourceSlashings := s.SlashingsPool.PendingProposerSlashings(ctx, headState, true /* return unlimited slashings */)
slashings := structs.ProposerSlashingsFromConsensus(sourceSlashings)
slashings := shared.ProposerSlashingsFromConsensus(sourceSlashings)
httputil.WriteJson(w, &structs.GetProposerSlashingsResponse{Data: slashings})
httputil.WriteJson(w, &GetProposerSlashingsResponse{Data: slashings})
}
// SubmitProposerSlashing submits a proposer slashing object to node's pool and if
@@ -540,7 +539,7 @@ func (s *Server) SubmitProposerSlashing(w http.ResponseWriter, r *http.Request)
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitProposerSlashing")
defer span.End()
var req structs.ProposerSlashing
var req shared.ProposerSlashing
err := json.NewDecoder(r.Body).Decode(&req)
switch {
case err == io.EOF:

View File

@@ -14,7 +14,6 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v4/api/server"
"github.com/prysmaticlabs/prysm/v4/api/server/structs"
blockchainmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
prysmtime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
@@ -27,6 +26,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits/mock"
p2pMock "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
@@ -126,7 +126,7 @@ func TestListAttestations(t *testing.T) {
s.ListAttestations(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.ListAttestationsResponse{}
resp := &ListAttestationsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.NotNil(t, resp)
require.NotNil(t, resp.Data)
@@ -140,7 +140,7 @@ func TestListAttestations(t *testing.T) {
s.ListAttestations(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.ListAttestationsResponse{}
resp := &ListAttestationsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.NotNil(t, resp)
require.NotNil(t, resp.Data)
@@ -157,7 +157,7 @@ func TestListAttestations(t *testing.T) {
s.ListAttestations(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.ListAttestationsResponse{}
resp := &ListAttestationsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.NotNil(t, resp)
require.NotNil(t, resp.Data)
@@ -174,7 +174,7 @@ func TestListAttestations(t *testing.T) {
s.ListAttestations(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.ListAttestationsResponse{}
resp := &ListAttestationsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.NotNil(t, resp)
require.NotNil(t, resp.Data)
@@ -340,7 +340,7 @@ func TestListVoluntaryExits(t *testing.T) {
s.ListVoluntaryExits(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.ListVoluntaryExitsResponse{}
resp := &ListVoluntaryExitsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.NotNil(t, resp)
require.NotNil(t, resp.Data)
@@ -660,11 +660,11 @@ func TestListBLSToExecutionChanges(t *testing.T) {
s.ListBLSToExecutionChanges(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.BLSToExecutionChangesPoolResponse{}
resp := &BLSToExecutionChangesPoolResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 2, len(resp.Data))
assert.DeepEqual(t, structs.SignedBLSChangeFromConsensus(change1), resp.Data[0])
assert.DeepEqual(t, structs.SignedBLSChangeFromConsensus(change2), resp.Data[1])
assert.DeepEqual(t, shared.SignedBLSChangeFromConsensus(change1), resp.Data[0])
assert.DeepEqual(t, shared.SignedBLSChangeFromConsensus(change2), resp.Data[1])
}
func TestSubmitSignedBLSToExecutionChanges_Ok(t *testing.T) {
@@ -722,12 +722,12 @@ func TestSubmitSignedBLSToExecutionChanges_Ok(t *testing.T) {
st, err := state_native.InitializeFromProtoCapella(spb)
require.NoError(t, err)
signedChanges := make([]*structs.SignedBLSToExecutionChange, numValidators)
signedChanges := make([]*shared.SignedBLSToExecutionChange, numValidators)
for i, message := range blsChanges {
signature, err := signing.ComputeDomainAndSign(st, prysmtime.CurrentEpoch(st), message, params.BeaconConfig().DomainBLSToExecutionChange, privKeys[i])
require.NoError(t, err)
signed := &structs.SignedBLSToExecutionChange{
Message: structs.BLSChangeFromConsensus(message),
signed := &shared.SignedBLSToExecutionChange{
Message: shared.BLSChangeFromConsensus(message),
Signature: hexutil.Encode(signature),
}
signedChanges[i] = signed
@@ -834,13 +834,13 @@ func TestSubmitSignedBLSToExecutionChanges_Bellatrix(t *testing.T) {
stc, err := state_native.InitializeFromProtoCapella(spc)
require.NoError(t, err)
signedChanges := make([]*structs.SignedBLSToExecutionChange, numValidators)
signedChanges := make([]*shared.SignedBLSToExecutionChange, numValidators)
for i, message := range blsChanges {
signature, err := signing.ComputeDomainAndSign(stc, prysmtime.CurrentEpoch(stc), message, params.BeaconConfig().DomainBLSToExecutionChange, privKeys[i])
require.NoError(t, err)
signedChanges[i] = &structs.SignedBLSToExecutionChange{
Message: structs.BLSChangeFromConsensus(message),
signedChanges[i] = &shared.SignedBLSToExecutionChange{
Message: shared.BLSChangeFromConsensus(message),
Signature: hexutil.Encode(signature),
}
}
@@ -934,15 +934,15 @@ func TestSubmitSignedBLSToExecutionChanges_Failures(t *testing.T) {
st, err := state_native.InitializeFromProtoCapella(spb)
require.NoError(t, err)
signedChanges := make([]*structs.SignedBLSToExecutionChange, numValidators)
signedChanges := make([]*shared.SignedBLSToExecutionChange, numValidators)
for i, message := range blsChanges {
signature, err := signing.ComputeDomainAndSign(st, prysmtime.CurrentEpoch(st), message, params.BeaconConfig().DomainBLSToExecutionChange, privKeys[i])
require.NoError(t, err)
if i == 1 {
signature[0] = 0x00
}
signedChanges[i] = &structs.SignedBLSToExecutionChange{
Message: structs.BLSChangeFromConsensus(message),
signedChanges[i] = &shared.SignedBLSToExecutionChange{
Message: shared.BLSChangeFromConsensus(message),
Signature: hexutil.Encode(signature),
}
}
@@ -975,10 +975,10 @@ func TestSubmitSignedBLSToExecutionChanges_Failures(t *testing.T) {
poolChanges, err := s.BLSChangesPool.PendingBLSToExecChanges()
require.Equal(t, len(poolChanges)+1, len(signedChanges))
require.NoError(t, err)
require.DeepEqual(t, structs.SignedBLSChangeFromConsensus(poolChanges[0]), signedChanges[0])
require.DeepEqual(t, shared.SignedBLSChangeFromConsensus(poolChanges[0]), signedChanges[0])
for i := 2; i < numValidators; i++ {
require.DeepEqual(t, structs.SignedBLSChangeFromConsensus(poolChanges[i-1]), signedChanges[i])
require.DeepEqual(t, shared.SignedBLSChangeFromConsensus(poolChanges[i-1]), signedChanges[i])
}
}
@@ -1069,7 +1069,7 @@ func TestGetAttesterSlashings(t *testing.T) {
s.GetAttesterSlashings(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetAttesterSlashingsResponse{}
resp := &GetAttesterSlashingsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.NotNil(t, resp)
require.NotNil(t, resp.Data)
@@ -1135,7 +1135,7 @@ func TestGetProposerSlashings(t *testing.T) {
s.GetProposerSlashings(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetProposerSlashingsResponse{}
resp := &GetProposerSlashingsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.NotNil(t, resp)
require.NotNil(t, resp.Data)
@@ -1213,7 +1213,7 @@ func TestSubmitAttesterSlashing_Ok(t *testing.T) {
OperationNotifier: chainmock.OperationNotifier(),
}
toSubmit := structs.AttesterSlashingsFromConsensus([]*ethpbv1alpha1.AttesterSlashing{slashing})
toSubmit := shared.AttesterSlashingsFromConsensus([]*ethpbv1alpha1.AttesterSlashing{slashing})
b, err := json.Marshal(toSubmit[0])
require.NoError(t, err)
var body bytes.Buffer
@@ -1305,7 +1305,7 @@ func TestSubmitAttesterSlashing_AcrossFork(t *testing.T) {
OperationNotifier: chainmock.OperationNotifier(),
}
toSubmit := structs.AttesterSlashingsFromConsensus([]*ethpbv1alpha1.AttesterSlashing{slashing})
toSubmit := shared.AttesterSlashingsFromConsensus([]*ethpbv1alpha1.AttesterSlashing{slashing})
b, err := json.Marshal(toSubmit[0])
require.NoError(t, err)
var body bytes.Buffer
@@ -1416,7 +1416,7 @@ func TestSubmitProposerSlashing_Ok(t *testing.T) {
OperationNotifier: chainmock.OperationNotifier(),
}
toSubmit := structs.ProposerSlashingsFromConsensus([]*ethpbv1alpha1.ProposerSlashing{slashing})
toSubmit := shared.ProposerSlashingsFromConsensus([]*ethpbv1alpha1.ProposerSlashing{slashing})
b, err := json.Marshal(toSubmit[0])
require.NoError(t, err)
var body bytes.Buffer
@@ -1500,7 +1500,7 @@ func TestSubmitProposerSlashing_AcrossFork(t *testing.T) {
OperationNotifier: chainmock.OperationNotifier(),
}
toSubmit := structs.ProposerSlashingsFromConsensus([]*ethpbv1alpha1.ProposerSlashing{slashing})
toSubmit := shared.ProposerSlashingsFromConsensus([]*ethpbv1alpha1.ProposerSlashing{slashing})
b, err := json.Marshal(toSubmit[0])
require.NoError(t, err)
var body bytes.Buffer

View File

@@ -8,7 +8,6 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/gorilla/mux"
"github.com/prysmaticlabs/prysm/v4/api/server/structs"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
@@ -68,8 +67,8 @@ func (s *Server) GetStateRoot(w http.ResponseWriter, r *http.Request) {
}
isFinalized := s.FinalizationFetcher.IsFinalized(ctx, blockRoot)
resp := &structs.GetStateRootResponse{
Data: &structs.StateRoot{
resp := &GetStateRootResponse{
Data: &StateRoot{
Root: hexutil.Encode(stateRoot),
},
ExecutionOptimistic: isOptimistic,
@@ -138,8 +137,8 @@ func (s *Server) GetRandao(w http.ResponseWriter, r *http.Request) {
}
isFinalized := s.FinalizationFetcher.IsFinalized(ctx, blockRoot)
resp := &structs.GetRandaoResponse{
Data: &structs.Randao{Randao: hexutil.Encode(randao)},
resp := &GetRandaoResponse{
Data: &Randao{Randao: hexutil.Encode(randao)},
ExecutionOptimistic: isOptimistic,
Finalized: isFinalized,
}
@@ -240,8 +239,8 @@ func (s *Server) GetSyncCommittees(w http.ResponseWriter, r *http.Request) {
}
isFinalized := s.FinalizationFetcher.IsFinalized(ctx, blockRoot)
resp := structs.GetSyncCommitteeResponse{
Data: &structs.SyncCommitteeValidators{
resp := GetSyncCommitteeResponse{
Data: &SyncCommitteeValidators{
Validators: committeeIndices,
ValidatorAggregates: subcommittees,
},

View File

@@ -13,7 +13,6 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/gorilla/mux"
"github.com/prysmaticlabs/prysm/v4/api/server/structs"
chainMock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
dbTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
@@ -62,7 +61,7 @@ func TestGetStateRoot(t *testing.T) {
s.GetStateRoot(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetStateRootResponse{}
resp := &GetStateRootResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, hexutil.Encode(stateRoot[:]), resp.Data.Root)
@@ -86,7 +85,7 @@ func TestGetStateRoot(t *testing.T) {
s.GetStateRoot(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetStateRootResponse{}
resp := &GetStateRootResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.DeepEqual(t, true, resp.ExecutionOptimistic)
})
@@ -117,7 +116,7 @@ func TestGetStateRoot(t *testing.T) {
s.GetStateRoot(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetStateRootResponse{}
resp := &GetStateRootResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.DeepEqual(t, true, resp.Finalized)
})
@@ -164,7 +163,7 @@ func TestGetRandao(t *testing.T) {
s.GetRandao(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetRandaoResponse{}
resp := &GetRandaoResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, hexutil.Encode(mixCurrent[:]), resp.Data.Randao)
})
@@ -176,7 +175,7 @@ func TestGetRandao(t *testing.T) {
s.GetRandao(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetRandaoResponse{}
resp := &GetRandaoResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, hexutil.Encode(mixCurrent[:]), resp.Data.Randao)
})
@@ -188,7 +187,7 @@ func TestGetRandao(t *testing.T) {
s.GetRandao(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetRandaoResponse{}
resp := &GetRandaoResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, hexutil.Encode(mixOld[:]), resp.Data.Randao)
})
@@ -204,7 +203,7 @@ func TestGetRandao(t *testing.T) {
s.GetRandao(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetRandaoResponse{}
resp := &GetRandaoResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, hexutil.Encode(headRandao[:]), resp.Data.Randao)
})
@@ -263,7 +262,7 @@ func TestGetRandao(t *testing.T) {
s.GetRandao(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetRandaoResponse{}
resp := &GetRandaoResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.DeepEqual(t, true, resp.ExecutionOptimistic)
})
@@ -300,7 +299,7 @@ func TestGetRandao(t *testing.T) {
s.GetRandao(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetRandaoResponse{}
resp := &GetRandaoResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.DeepEqual(t, true, resp.Finalized)
})
@@ -459,7 +458,7 @@ func TestGetSyncCommittees(t *testing.T) {
s.GetSyncCommittees(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetSyncCommitteeResponse{}
resp := &GetSyncCommitteeResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
committeeVals := resp.Data.Validators
require.Equal(t, params.BeaconConfig().SyncCommitteeSize, uint64(len(committeeVals)))
@@ -509,7 +508,7 @@ func TestGetSyncCommittees(t *testing.T) {
s.GetSyncCommittees(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetSyncCommitteeResponse{}
resp := &GetSyncCommitteeResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, true, resp.ExecutionOptimistic)
})
@@ -553,7 +552,7 @@ func TestGetSyncCommittees(t *testing.T) {
s.GetSyncCommittees(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetSyncCommitteeResponse{}
resp := &GetSyncCommitteeResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, true, resp.Finalized)
})
@@ -630,7 +629,7 @@ func TestGetSyncCommittees_Future(t *testing.T) {
writer.Body = &bytes.Buffer{}
s.GetSyncCommittees(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetSyncCommitteeResponse{}
resp := &GetSyncCommitteeResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
committeeVals := resp.Data.Validators
require.Equal(t, params.BeaconConfig().SyncCommitteeSize, uint64(len(committeeVals)))

File diff suppressed because it is too large Load Diff

View File

@@ -10,7 +10,6 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/gorilla/mux"
"github.com/prysmaticlabs/prysm/v4/api/server/structs"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
@@ -52,7 +51,7 @@ func (s *Server) GetValidators(w http.ResponseWriter, r *http.Request) {
}
isFinalized := s.FinalizationFetcher.IsFinalized(ctx, blockRoot)
var req structs.GetValidatorsRequest
var req GetValidatorsRequest
if r.Method == http.MethodPost {
err = json.NewDecoder(r.Body).Decode(&req)
switch {
@@ -84,8 +83,8 @@ func (s *Server) GetValidators(w http.ResponseWriter, r *http.Request) {
}
// return no data if all IDs are ignored
if len(rawIds) > 0 && len(ids) == 0 {
resp := &structs.GetValidatorsResponse{
Data: []*structs.ValidatorContainer{},
resp := &GetValidatorsResponse{
Data: []*ValidatorContainer{},
ExecutionOptimistic: isOptimistic,
Finalized: isFinalized,
}
@@ -101,7 +100,7 @@ func (s *Server) GetValidators(w http.ResponseWriter, r *http.Request) {
// Exit early if no matching validators were found or we don't want to further filter validators by status.
if len(readOnlyVals) == 0 || len(statuses) == 0 {
containers := make([]*structs.ValidatorContainer, len(readOnlyVals))
containers := make([]*ValidatorContainer, len(readOnlyVals))
for i, val := range readOnlyVals {
valStatus, err := helpers.ValidatorSubStatus(val, epoch)
if err != nil {
@@ -119,7 +118,7 @@ func (s *Server) GetValidators(w http.ResponseWriter, r *http.Request) {
}
containers[i] = valContainerFromReadOnlyVal(val, id, balance, valStatus)
}
resp := &structs.GetValidatorsResponse{
resp := &GetValidatorsResponse{
Data: containers,
ExecutionOptimistic: isOptimistic,
Finalized: isFinalized,
@@ -137,7 +136,7 @@ func (s *Server) GetValidators(w http.ResponseWriter, r *http.Request) {
}
filteredStatuses[vs] = true
}
valContainers := make([]*structs.ValidatorContainer, 0, len(readOnlyVals))
valContainers := make([]*ValidatorContainer, 0, len(readOnlyVals))
for i, val := range readOnlyVals {
valStatus, err := helpers.ValidatorStatus(val, epoch)
if err != nil {
@@ -150,7 +149,7 @@ func (s *Server) GetValidators(w http.ResponseWriter, r *http.Request) {
return
}
if filteredStatuses[valStatus] || filteredStatuses[valSubStatus] {
var container *structs.ValidatorContainer
var container *ValidatorContainer
id := primitives.ValidatorIndex(i)
if len(ids) > 0 {
id = ids[i]
@@ -165,7 +164,7 @@ func (s *Server) GetValidators(w http.ResponseWriter, r *http.Request) {
}
}
resp := &structs.GetValidatorsResponse{
resp := &GetValidatorsResponse{
Data: valContainers,
ExecutionOptimistic: isOptimistic,
Finalized: isFinalized,
@@ -230,7 +229,7 @@ func (s *Server) GetValidator(w http.ResponseWriter, r *http.Request) {
}
isFinalized := s.FinalizationFetcher.IsFinalized(ctx, blockRoot)
resp := &structs.GetValidatorResponse{
resp := &GetValidatorResponse{
Data: container,
ExecutionOptimistic: isOptimistic,
Finalized: isFinalized,
@@ -287,8 +286,8 @@ func (s *Server) GetValidatorBalances(w http.ResponseWriter, r *http.Request) {
}
// return no data if all IDs are ignored
if len(rawIds) > 0 && len(ids) == 0 {
resp := &structs.GetValidatorBalancesResponse{
Data: []*structs.ValidatorBalance{},
resp := &GetValidatorBalancesResponse{
Data: []*ValidatorBalance{},
ExecutionOptimistic: isOptimistic,
Finalized: isFinalized,
}
@@ -297,26 +296,26 @@ func (s *Server) GetValidatorBalances(w http.ResponseWriter, r *http.Request) {
}
bals := st.Balances()
var valBalances []*structs.ValidatorBalance
var valBalances []*ValidatorBalance
if len(ids) == 0 {
valBalances = make([]*structs.ValidatorBalance, len(bals))
valBalances = make([]*ValidatorBalance, len(bals))
for i, b := range bals {
valBalances[i] = &structs.ValidatorBalance{
valBalances[i] = &ValidatorBalance{
Index: strconv.FormatUint(uint64(i), 10),
Balance: strconv.FormatUint(b, 10),
}
}
} else {
valBalances = make([]*structs.ValidatorBalance, len(ids))
valBalances = make([]*ValidatorBalance, len(ids))
for i, id := range ids {
valBalances[i] = &structs.ValidatorBalance{
valBalances[i] = &ValidatorBalance{
Index: strconv.FormatUint(uint64(id), 10),
Balance: strconv.FormatUint(bals[id], 10),
}
}
}
resp := &structs.GetValidatorBalancesResponse{
resp := &GetValidatorBalancesResponse{
Data: valBalances,
ExecutionOptimistic: isOptimistic,
Finalized: isFinalized,
@@ -405,13 +404,13 @@ func valContainerFromReadOnlyVal(
id primitives.ValidatorIndex,
bal uint64,
valStatus validator.Status,
) *structs.ValidatorContainer {
) *ValidatorContainer {
pubkey := val.PublicKey()
return &structs.ValidatorContainer{
return &ValidatorContainer{
Index: strconv.FormatUint(uint64(id), 10),
Balance: strconv.FormatUint(bal, 10),
Status: valStatus.String(),
Validator: &structs.Validator{
Validator: &Validator{
Pubkey: hexutil.Encode(pubkey[:]),
WithdrawalCredentials: hexutil.Encode(val.WithdrawalCredentials()),
EffectiveBalance: strconv.FormatUint(val.EffectiveBalance(), 10),

View File

@@ -12,7 +12,6 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/gorilla/mux"
"github.com/prysmaticlabs/prysm/v4/api/server/structs"
chainMock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
@@ -53,7 +52,7 @@ func TestGetValidators(t *testing.T) {
s.GetValidators(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetValidatorsResponse{}
resp := &GetValidatorsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 4, len(resp.Data))
val := resp.Data[0]
@@ -92,7 +91,7 @@ func TestGetValidators(t *testing.T) {
s.GetValidators(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetValidatorsResponse{}
resp := &GetValidatorsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 2, len(resp.Data))
assert.Equal(t, "0", resp.Data[0].Index)
@@ -124,7 +123,7 @@ func TestGetValidators(t *testing.T) {
s.GetValidators(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetValidatorsResponse{}
resp := &GetValidatorsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 2, len(resp.Data))
assert.Equal(t, "0", resp.Data[0].Index)
@@ -154,7 +153,7 @@ func TestGetValidators(t *testing.T) {
s.GetValidators(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetValidatorsResponse{}
resp := &GetValidatorsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 2, len(resp.Data))
assert.Equal(t, "0", resp.Data[0].Index)
@@ -203,7 +202,7 @@ func TestGetValidators(t *testing.T) {
s.GetValidators(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetValidatorsResponse{}
resp := &GetValidatorsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 1, len(resp.Data))
assert.Equal(t, "1", resp.Data[0].Index)
@@ -226,7 +225,7 @@ func TestGetValidators(t *testing.T) {
s.GetValidators(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetValidatorsResponse{}
resp := &GetValidatorsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 1, len(resp.Data))
assert.Equal(t, "1", resp.Data[0].Index)
@@ -249,7 +248,7 @@ func TestGetValidators(t *testing.T) {
s.GetValidators(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetValidatorsResponse{}
resp := &GetValidatorsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, true, resp.ExecutionOptimistic)
})
@@ -277,7 +276,7 @@ func TestGetValidators(t *testing.T) {
s.GetValidators(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetValidatorsResponse{}
resp := &GetValidatorsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, true, resp.Finalized)
})
@@ -293,7 +292,7 @@ func TestGetValidators(t *testing.T) {
}
var body bytes.Buffer
req := &structs.GetValidatorsRequest{
req := &GetValidatorsRequest{
Ids: []string{"0", strconv.Itoa(exitedValIndex)},
Statuses: []string{"exited"},
}
@@ -312,7 +311,7 @@ func TestGetValidators(t *testing.T) {
s.GetValidators(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetValidatorsResponse{}
resp := &GetValidatorsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 1, len(resp.Data))
assert.Equal(t, "3", resp.Data[0].Index)
@@ -329,7 +328,7 @@ func TestGetValidators(t *testing.T) {
}
var body bytes.Buffer
req := &structs.GetValidatorsRequest{
req := &GetValidatorsRequest{
Ids: nil,
Statuses: nil,
}
@@ -348,7 +347,7 @@ func TestGetValidators(t *testing.T) {
s.GetValidators(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetValidatorsResponse{}
resp := &GetValidatorsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 4, len(resp.Data))
})
@@ -498,7 +497,7 @@ func TestGetValidators_FilterByStatus(t *testing.T) {
s.GetValidators(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetValidatorsResponse{}
resp := &GetValidatorsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, 3, len(resp.Data))
for _, vc := range resp.Data {
@@ -529,7 +528,7 @@ func TestGetValidators_FilterByStatus(t *testing.T) {
s.GetValidators(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetValidatorsResponse{}
resp := &GetValidatorsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, 2, len(resp.Data))
for _, vc := range resp.Data {
@@ -559,7 +558,7 @@ func TestGetValidators_FilterByStatus(t *testing.T) {
s.GetValidators(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetValidatorsResponse{}
resp := &GetValidatorsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, 4, len(resp.Data))
for _, vc := range resp.Data {
@@ -592,7 +591,7 @@ func TestGetValidators_FilterByStatus(t *testing.T) {
s.GetValidators(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetValidatorsResponse{}
resp := &GetValidatorsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, 4, len(resp.Data))
for _, vc := range resp.Data {
@@ -625,7 +624,7 @@ func TestGetValidators_FilterByStatus(t *testing.T) {
s.GetValidators(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetValidatorsResponse{}
resp := &GetValidatorsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, 2, len(resp.Data))
for _, vc := range resp.Data {
@@ -660,7 +659,7 @@ func TestGetValidator(t *testing.T) {
s.GetValidator(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetValidatorResponse{}
resp := &GetValidatorResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, "0", resp.Data.Index)
assert.Equal(t, "32000000000", resp.Data.Balance)
@@ -695,7 +694,7 @@ func TestGetValidator(t *testing.T) {
s.GetValidator(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetValidatorResponse{}
resp := &GetValidatorResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, "0", resp.Data.Index)
})
@@ -797,7 +796,7 @@ func TestGetValidator(t *testing.T) {
s.GetValidator(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetValidatorResponse{}
resp := &GetValidatorResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, true, resp.ExecutionOptimistic)
})
@@ -825,7 +824,7 @@ func TestGetValidator(t *testing.T) {
s.GetValidator(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetValidatorResponse{}
resp := &GetValidatorResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, true, resp.Finalized)
})
@@ -859,7 +858,7 @@ func TestGetValidatorBalances(t *testing.T) {
s.GetValidatorBalances(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetValidatorBalancesResponse{}
resp := &GetValidatorBalancesResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 4, len(resp.Data))
val := resp.Data[3]
@@ -888,7 +887,7 @@ func TestGetValidatorBalances(t *testing.T) {
s.GetValidatorBalances(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetValidatorBalancesResponse{}
resp := &GetValidatorBalancesResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 2, len(resp.Data))
assert.Equal(t, "0", resp.Data[0].Index)
@@ -920,7 +919,7 @@ func TestGetValidatorBalances(t *testing.T) {
s.GetValidatorBalances(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetValidatorBalancesResponse{}
resp := &GetValidatorBalancesResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 2, len(resp.Data))
assert.Equal(t, "0", resp.Data[0].Index)
@@ -950,7 +949,7 @@ func TestGetValidatorBalances(t *testing.T) {
s.GetValidators(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetValidatorsResponse{}
resp := &GetValidatorsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 2, len(resp.Data))
assert.Equal(t, "0", resp.Data[0].Index)
@@ -980,7 +979,7 @@ func TestGetValidatorBalances(t *testing.T) {
s.GetValidatorBalances(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetValidatorBalancesResponse{}
resp := &GetValidatorBalancesResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 1, len(resp.Data))
assert.Equal(t, "1", resp.Data[0].Index)
@@ -1003,7 +1002,7 @@ func TestGetValidatorBalances(t *testing.T) {
s.GetValidatorBalances(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetValidatorBalancesResponse{}
resp := &GetValidatorBalancesResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 1, len(resp.Data))
assert.Equal(t, "1", resp.Data[0].Index)
@@ -1049,7 +1048,7 @@ func TestGetValidatorBalances(t *testing.T) {
s.GetValidatorBalances(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetValidatorBalancesResponse{}
resp := &GetValidatorBalancesResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, true, resp.ExecutionOptimistic)
})
@@ -1081,7 +1080,7 @@ func TestGetValidatorBalances(t *testing.T) {
s.GetValidatorBalances(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetValidatorBalancesResponse{}
resp := &GetValidatorBalancesResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, true, resp.Finalized)
})
@@ -1114,7 +1113,7 @@ func TestGetValidatorBalances(t *testing.T) {
s.GetValidatorBalances(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetValidatorBalancesResponse{}
resp := &GetValidatorBalancesResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 2, len(resp.Data))
assert.Equal(t, "0", resp.Data[0].Index)

View File

@@ -1,7 +1,9 @@
package structs
package beacon
import (
"encoding/json"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
)
type BlockRootResponse struct {
@@ -15,31 +17,31 @@ type BlockRoot struct {
}
type GetCommitteesResponse struct {
Data []*Committee `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
Data []*shared.Committee `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
}
type ListAttestationsResponse struct {
Data []*Attestation `json:"data"`
Data []*shared.Attestation `json:"data"`
}
type SubmitAttestationsRequest struct {
Data []*Attestation `json:"data"`
Data []*shared.Attestation `json:"data"`
}
type ListVoluntaryExitsResponse struct {
Data []*SignedVoluntaryExit `json:"data"`
Data []*shared.SignedVoluntaryExit `json:"data"`
}
type SubmitSyncCommitteeSignaturesRequest struct {
Data []*SyncCommitteeMessage `json:"data"`
Data []*shared.SyncCommitteeMessage `json:"data"`
}
type GetStateForkResponse struct {
Data *Fork `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
Data *shared.Fork `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
}
type GetFinalityCheckpointsResponse struct {
@@ -49,9 +51,9 @@ type GetFinalityCheckpointsResponse struct {
}
type FinalityCheckpoints struct {
PreviousJustified *Checkpoint `json:"previous_justified"`
CurrentJustified *Checkpoint `json:"current_justified"`
Finalized *Checkpoint `json:"finalized"`
PreviousJustified *shared.Checkpoint `json:"previous_justified"`
CurrentJustified *shared.Checkpoint `json:"current_justified"`
Finalized *shared.Checkpoint `json:"finalized"`
}
type GetGenesisResponse struct {
@@ -65,15 +67,15 @@ type Genesis struct {
}
type GetBlockHeadersResponse struct {
Data []*SignedBeaconBlockHeaderContainer `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
Data []*shared.SignedBeaconBlockHeaderContainer `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
}
type GetBlockHeaderResponse struct {
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
Data *SignedBeaconBlockHeaderContainer `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
Data *shared.SignedBeaconBlockHeaderContainer `json:"data"`
}
type GetValidatorsRequest struct {
@@ -106,6 +108,17 @@ type ValidatorContainer struct {
Validator *Validator `json:"validator"`
}
type Validator struct {
Pubkey string `json:"pubkey"`
WithdrawalCredentials string `json:"withdrawal_credentials"`
EffectiveBalance string `json:"effective_balance"`
Slashed bool `json:"slashed"`
ActivationEligibilityEpoch string `json:"activation_eligibility_epoch"`
ActivationEpoch string `json:"activation_epoch"`
ExitEpoch string `json:"exit_epoch"`
WithdrawableEpoch string `json:"withdrawable_epoch"`
}
type ValidatorBalance struct {
Index string `json:"index"`
Balance string `json:"balance"`
@@ -128,9 +141,9 @@ type SignedBlock struct {
}
type GetBlockAttestationsResponse struct {
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
Data []*Attestation `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
Data []*shared.Attestation `json:"data"`
}
type GetStateRootResponse struct {
@@ -165,34 +178,13 @@ type SyncCommitteeValidators struct {
}
type BLSToExecutionChangesPoolResponse struct {
Data []*SignedBLSToExecutionChange `json:"data"`
Data []*shared.SignedBLSToExecutionChange `json:"data"`
}
type GetAttesterSlashingsResponse struct {
Data []*AttesterSlashing `json:"data"`
Data []*shared.AttesterSlashing `json:"data"`
}
type GetProposerSlashingsResponse struct {
Data []*ProposerSlashing `json:"data"`
}
type GetWeakSubjectivityResponse struct {
Data *WeakSubjectivityData `json:"data"`
}
type WeakSubjectivityData struct {
WsCheckpoint *Checkpoint `json:"ws_checkpoint"`
StateRoot string `json:"state_root"`
}
type GetDepositSnapshotResponse struct {
Data *DepositSnapshot `json:"data"`
}
type DepositSnapshot struct {
Finalized []string `json:"finalized"`
DepositRoot string `json:"deposit_root"`
DepositCount string `json:"deposit_count"`
ExecutionBlockHash string `json:"execution_block_hash"`
ExecutionBlockHeight string `json:"execution_block_height"`
Data []*shared.ProposerSlashing `json:"data"`
}

View File

@@ -5,12 +5,13 @@ go_library(
srcs = [
"handlers.go",
"server.go",
"structs.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/blob",
visibility = ["//visibility:public"],
deps = [
"//api/server/structs:go_default_library",
"//beacon-chain/rpc/core:go_default_library",
"//beacon-chain/rpc/eth/shared:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",
"//config/fieldparams:go_default_library",
"//network/httputil:go_default_library",
@@ -25,12 +26,10 @@ go_test(
srcs = ["handlers_test.go"],
embed = [":go_default_library"],
deps = [
"//api/server/structs:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/db/filesystem:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",
"//beacon-chain/rpc/testutil:go_default_library",
"//beacon-chain/verification:go_default_library",
"//config/params:go_default_library",
"//network/httputil:go_default_library",

View File

@@ -7,8 +7,8 @@ import (
"strings"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/prysmaticlabs/prysm/v4/api/server/structs"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
field_params "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/network/httputil"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
@@ -85,18 +85,18 @@ loop:
return indices
}
func buildSidecarsResponse(sidecars []*eth.BlobSidecar) *structs.SidecarsResponse {
resp := &structs.SidecarsResponse{Data: make([]*structs.Sidecar, len(sidecars))}
func buildSidecarsResponse(sidecars []*eth.BlobSidecar) *SidecarsResponse {
resp := &SidecarsResponse{Data: make([]*Sidecar, len(sidecars))}
for i, sc := range sidecars {
proofs := make([]string, len(sc.CommitmentInclusionProof))
for j := range sc.CommitmentInclusionProof {
proofs[j] = hexutil.Encode(sc.CommitmentInclusionProof[j])
}
resp.Data[i] = &structs.Sidecar{
resp.Data[i] = &Sidecar{
Index: strconv.FormatUint(sc.Index, 10),
Blob: hexutil.Encode(sc.Blob),
KzgCommitment: hexutil.Encode(sc.KzgCommitment),
SignedBeaconBlockHeader: structs.SignedBeaconBlockHeaderFromConsensus(sc.SignedBlockHeader),
SignedBeaconBlockHeader: shared.SignedBeaconBlockHeaderFromConsensus(sc.SignedBlockHeader),
KzgProof: hexutil.Encode(sc.KzgProof),
CommitmentInclusionProof: proofs,
}

View File

@@ -9,15 +9,12 @@ import (
"net/url"
"strings"
"testing"
"time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/prysmaticlabs/prysm/v4/api/server/structs"
mockChain "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/verification"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/network/httputil"
@@ -73,11 +70,8 @@ func TestBlobs(t *testing.T) {
writer.Body = &bytes.Buffer{}
blocker := &lookup.BeaconDbBlocker{
ChainInfoFetcher: &mockChain.ChainService{Root: blockRoot[:]},
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
BlobStorage: bs,
BeaconDB: db,
BlobStorage: bs,
}
s := &Server{
Blocker: blocker,
@@ -86,7 +80,7 @@ func TestBlobs(t *testing.T) {
s.Blobs(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.SidecarsResponse{}
resp := &SidecarsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 4, len(resp.Data))
sidecar := resp.Data[0]
@@ -121,11 +115,8 @@ func TestBlobs(t *testing.T) {
writer.Body = &bytes.Buffer{}
blocker := &lookup.BeaconDbBlocker{
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: &eth.Checkpoint{Root: blockRoot[:]}},
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
BlobStorage: bs,
BeaconDB: db,
BlobStorage: bs,
}
s := &Server{
Blocker: blocker,
@@ -134,7 +125,7 @@ func TestBlobs(t *testing.T) {
s.Blobs(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.SidecarsResponse{}
resp := &SidecarsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 4, len(resp.Data))
})
@@ -145,9 +136,26 @@ func TestBlobs(t *testing.T) {
writer.Body = &bytes.Buffer{}
blocker := &lookup.BeaconDbBlocker{
ChainInfoFetcher: &mockChain.ChainService{CurrentJustifiedCheckPoint: &eth.Checkpoint{Root: blockRoot[:]}},
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
BlobStorage: bs,
}
s := &Server{
Blocker: blocker,
}
s.Blobs(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &SidecarsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 4, len(resp.Data))
})
t.Run("root", func(t *testing.T) {
u := "http://foo.example/" + hexutil.Encode(blockRoot[:])
request := httptest.NewRequest("GET", u, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
blocker := &lookup.BeaconDbBlocker{
BeaconDB: db,
BlobStorage: bs,
}
@@ -158,30 +166,7 @@ func TestBlobs(t *testing.T) {
s.Blobs(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.SidecarsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 4, len(resp.Data))
})
t.Run("root", func(t *testing.T) {
u := "http://foo.example/" + hexutil.Encode(blockRoot[:])
request := httptest.NewRequest("GET", u, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
blocker := &lookup.BeaconDbBlocker{
BeaconDB: db,
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BlobStorage: bs,
}
s := &Server{
Blocker: blocker,
}
s.Blobs(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.SidecarsResponse{}
resp := &SidecarsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 4, len(resp.Data))
})
@@ -191,10 +176,7 @@ func TestBlobs(t *testing.T) {
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
blocker := &lookup.BeaconDbBlocker{
BeaconDB: db,
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
BlobStorage: bs,
}
s := &Server{
@@ -204,7 +186,7 @@ func TestBlobs(t *testing.T) {
s.Blobs(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.SidecarsResponse{}
resp := &SidecarsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 4, len(resp.Data))
})
@@ -215,11 +197,8 @@ func TestBlobs(t *testing.T) {
writer.Body = &bytes.Buffer{}
blocker := &lookup.BeaconDbBlocker{
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: &eth.Checkpoint{Root: blockRoot[:]}},
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
BlobStorage: bs,
BeaconDB: db,
BlobStorage: bs,
}
s := &Server{
Blocker: blocker,
@@ -228,7 +207,7 @@ func TestBlobs(t *testing.T) {
s.Blobs(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.SidecarsResponse{}
resp := &SidecarsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 1, len(resp.Data))
sidecar := resp.Data[0]
@@ -245,11 +224,8 @@ func TestBlobs(t *testing.T) {
writer.Body = &bytes.Buffer{}
blocker := &lookup.BeaconDbBlocker{
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: &eth.Checkpoint{Root: blockRoot[:]}},
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
BlobStorage: filesystem.NewEphemeralBlobStorage(t), // new ephemeral storage
BeaconDB: db,
BlobStorage: filesystem.NewEphemeralBlobStorage(t), // new ephemeral storage
}
s := &Server{
Blocker: blocker,
@@ -257,63 +233,10 @@ func TestBlobs(t *testing.T) {
s.Blobs(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.SidecarsResponse{}
resp := &SidecarsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, len(resp.Data), 0)
})
t.Run("outside retention period returns 200 w/ empty list ", func(t *testing.T) {
u := "http://foo.example/123"
request := httptest.NewRequest("GET", u, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
moc := &mockChain.ChainService{FinalizedCheckPoint: &eth.Checkpoint{Root: blockRoot[:]}}
blocker := &lookup.BeaconDbBlocker{
ChainInfoFetcher: moc,
GenesisTimeFetcher: moc, // genesis time is set to 0 here, so it results in current epoch being extremely large
BeaconDB: db,
BlobStorage: bs,
}
s := &Server{
Blocker: blocker,
}
s.Blobs(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.SidecarsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 0, len(resp.Data))
})
t.Run("block without commitments returns 200 w/empty list ", func(t *testing.T) {
denebBlock, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 333, 0)
commitments, err := denebBlock.Block().Body().BlobKzgCommitments()
require.NoError(t, err)
require.Equal(t, len(commitments), 0)
require.NoError(t, db.SaveBlock(context.Background(), denebBlock))
u := "http://foo.example/333"
request := httptest.NewRequest("GET", u, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
blocker := &lookup.BeaconDbBlocker{
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: &eth.Checkpoint{Root: blockRoot[:]}},
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
BlobStorage: bs,
}
s := &Server{
Blocker: blocker,
}
s.Blobs(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.SidecarsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 0, len(resp.Data))
})
t.Run("slot before Deneb fork", func(t *testing.T) {
u := "http://foo.example/31"
request := httptest.NewRequest("GET", u, nil)
@@ -358,11 +281,8 @@ func TestBlobs(t *testing.T) {
writer.Body = &bytes.Buffer{}
blocker := &lookup.BeaconDbBlocker{
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: &eth.Checkpoint{Root: blockRoot[:]}},
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
BlobStorage: bs,
BeaconDB: db,
BlobStorage: bs,
}
s := &Server{
Blocker: blocker,

View File

@@ -0,0 +1,16 @@
package blob
import "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
type SidecarsResponse struct {
Data []*Sidecar `json:"data"`
}
type Sidecar struct {
Index string `json:"index"`
Blob string `json:"blob"`
SignedBeaconBlockHeader *shared.SignedBeaconBlockHeader `json:"signed_block_header"`
KzgCommitment string `json:"kzg_commitment"`
KzgProof string `json:"kzg_proof"`
CommitmentInclusionProof []string `json:"kzg_commitment_inclusion_proof"`
}

View File

@@ -5,11 +5,11 @@ go_library(
srcs = [
"handlers.go",
"server.go",
"structs.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/builder",
visibility = ["//visibility:public"],
deps = [
"//api/server/structs:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/transition:go_default_library",
@@ -30,7 +30,6 @@ go_test(
srcs = ["handlers_test.go"],
embed = [":go_default_library"],
deps = [
"//api/server/structs:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/rpc/testutil:go_default_library",
"//beacon-chain/state:go_default_library",

View File

@@ -8,7 +8,6 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/gorilla/mux"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api/server/structs"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/config/params"
@@ -104,17 +103,17 @@ func (s *Server) ExpectedWithdrawals(w http.ResponseWriter, r *http.Request) {
})
return
}
httputil.WriteJson(w, &structs.ExpectedWithdrawalsResponse{
httputil.WriteJson(w, &ExpectedWithdrawalsResponse{
ExecutionOptimistic: isOptimistic,
Finalized: isFinalized,
Data: buildExpectedWithdrawalsData(withdrawals),
})
}
func buildExpectedWithdrawalsData(withdrawals []*enginev1.Withdrawal) []*structs.ExpectedWithdrawal {
data := make([]*structs.ExpectedWithdrawal, len(withdrawals))
func buildExpectedWithdrawalsData(withdrawals []*enginev1.Withdrawal) []*ExpectedWithdrawal {
data := make([]*ExpectedWithdrawal, len(withdrawals))
for i, withdrawal := range withdrawals {
data[i] = &structs.ExpectedWithdrawal{
data[i] = &ExpectedWithdrawal{
Address: hexutil.Encode(withdrawal.Address),
Amount: strconv.FormatUint(withdrawal.Amount, 10),
Index: strconv.FormatUint(withdrawal.Index, 10),

View File

@@ -10,7 +10,6 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/gorilla/mux"
"github.com/prysmaticlabs/prysm/v4/api/server/structs"
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
@@ -178,26 +177,26 @@ func TestExpectedWithdrawals(t *testing.T) {
s.ExpectedWithdrawals(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.ExpectedWithdrawalsResponse{}
resp := &ExpectedWithdrawalsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, true, resp.ExecutionOptimistic)
assert.Equal(t, false, resp.Finalized)
assert.Equal(t, 3, len(resp.Data))
expectedWithdrawal1 := &structs.ExpectedWithdrawal{
expectedWithdrawal1 := &ExpectedWithdrawal{
Index: strconv.FormatUint(0, 10),
ValidatorIndex: strconv.FormatUint(5, 10),
Address: hexutil.Encode(validators[5].WithdrawalCredentials[12:]),
// Decreased due to epoch processing when state advanced forward
Amount: strconv.FormatUint(31998257885, 10),
}
expectedWithdrawal2 := &structs.ExpectedWithdrawal{
expectedWithdrawal2 := &ExpectedWithdrawal{
Index: strconv.FormatUint(1, 10),
ValidatorIndex: strconv.FormatUint(14, 10),
Address: hexutil.Encode(validators[14].WithdrawalCredentials[12:]),
// MaxEffectiveBalance + MinDepositAmount + decrease after epoch processing
Amount: strconv.FormatUint(32998257885, 10),
}
expectedWithdrawal3 := &structs.ExpectedWithdrawal{
expectedWithdrawal3 := &ExpectedWithdrawal{
Index: strconv.FormatUint(2, 10),
ValidatorIndex: strconv.FormatUint(15, 10),
Address: hexutil.Encode(validators[15].WithdrawalCredentials[12:]),

View File

@@ -1,4 +1,4 @@
package structs
package builder
type ExpectedWithdrawalsResponse struct {
Data []*ExpectedWithdrawal `json:"data"`

View File

@@ -2,11 +2,14 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["handlers.go"],
srcs = [
"handlers.go",
"structs.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/config",
visibility = ["//visibility:public"],
deps = [
"//api/server/structs:go_default_library",
"//beacon-chain/rpc/eth/shared:go_default_library",
"//config/params:go_default_library",
"//network/forks:go_default_library",
"//network/httputil:go_default_library",
@@ -20,7 +23,6 @@ go_test(
srcs = ["handlers_test.go"],
embed = [":go_default_library"],
deps = [
"//api/server/structs:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",

View File

@@ -8,7 +8,7 @@ import (
"strings"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/prysmaticlabs/prysm/v4/api/server/structs"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/network/forks"
"github.com/prysmaticlabs/prysm/v4/network/httputil"
@@ -20,8 +20,8 @@ func GetDepositContract(w http.ResponseWriter, r *http.Request) {
_, span := trace.StartSpan(r.Context(), "config.GetDepositContract")
defer span.End()
httputil.WriteJson(w, &structs.GetDepositContractResponse{
Data: &structs.DepositContractData{
httputil.WriteJson(w, &GetDepositContractResponse{
Data: &DepositContractData{
ChainId: strconv.FormatUint(params.BeaconConfig().DepositChainID, 10),
Address: params.BeaconConfig().DepositContractAddress,
},
@@ -35,14 +35,14 @@ func GetForkSchedule(w http.ResponseWriter, r *http.Request) {
schedule := params.BeaconConfig().ForkVersionSchedule
if len(schedule) == 0 {
httputil.WriteJson(w, &structs.GetForkScheduleResponse{
Data: make([]*structs.Fork, 0),
httputil.WriteJson(w, &GetForkScheduleResponse{
Data: make([]*shared.Fork, 0),
})
return
}
versions := forks.SortedForkVersions(schedule)
chainForks := make([]*structs.Fork, len(schedule))
chainForks := make([]*shared.Fork, len(schedule))
var previous, current []byte
for i, v := range versions {
if i == 0 {
@@ -52,14 +52,14 @@ func GetForkSchedule(w http.ResponseWriter, r *http.Request) {
}
copyV := v
current = copyV[:]
chainForks[i] = &structs.Fork{
chainForks[i] = &shared.Fork{
PreviousVersion: hexutil.Encode(previous),
CurrentVersion: hexutil.Encode(current),
Epoch: fmt.Sprintf("%d", schedule[v]),
}
}
httputil.WriteJson(w, &structs.GetForkScheduleResponse{
httputil.WriteJson(w, &GetForkScheduleResponse{
Data: chainForks,
})
}
@@ -77,7 +77,7 @@ func GetSpec(w http.ResponseWriter, r *http.Request) {
httputil.HandleError(w, "Could not prepare config spec: "+err.Error(), http.StatusInternalServerError)
return
}
httputil.WriteJson(w, &structs.GetSpecResponse{Data: data})
httputil.WriteJson(w, &GetSpecResponse{Data: data})
}
func prepareConfigSpec() (map[string]string, error) {

View File

@@ -11,7 +11,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/prysmaticlabs/prysm/v4/api/server/structs"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
@@ -34,7 +33,7 @@ func TestGetDepositContract(t *testing.T) {
GetDepositContract(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
response := structs.GetDepositContractResponse{}
response := GetDepositContractResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &response))
assert.Equal(t, "10", response.Data.ChainId)
assert.Equal(t, "0x4242424242424242424242424242424242424242", response.Data.Address)
@@ -168,7 +167,7 @@ func TestGetSpec(t *testing.T) {
GetSpec(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := structs.GetSpecResponse{}
resp := GetSpecResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
data, ok := resp.Data.(map[string]interface{})
require.Equal(t, true, ok)
@@ -487,7 +486,7 @@ func TestForkSchedule_Ok(t *testing.T) {
GetForkSchedule(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetForkScheduleResponse{}
resp := &GetForkScheduleResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 3, len(resp.Data))
fork := resp.Data[0]
@@ -510,7 +509,7 @@ func TestForkSchedule_Ok(t *testing.T) {
GetForkSchedule(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetForkScheduleResponse{}
resp := &GetForkScheduleResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
os := forks.NewOrderedSchedule(params.BeaconConfig())
assert.Equal(t, os.Len(), len(resp.Data))

View File

@@ -1,4 +1,6 @@
package structs
package config
import "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
type GetDepositContractResponse struct {
Data *DepositContractData `json:"data"`
@@ -10,7 +12,7 @@ type DepositContractData struct {
}
type GetForkScheduleResponse struct {
Data []*Fork `json:"data"`
Data []*shared.Fork `json:"data"`
}
type GetSpecResponse struct {

Some files were not shown because too many files have changed in this diff Show More