mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 13:58:09 -05:00
Compare commits
26 Commits
v4.0.7
...
prestonvan
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
12d703d7ac | ||
|
|
ff3d2bc69f | ||
|
|
dd403f830c | ||
|
|
e9c8e84618 | ||
|
|
9c250dd4c2 | ||
|
|
f97db3b738 | ||
|
|
43378ae8d5 | ||
|
|
2217b45e16 | ||
|
|
405cd6ed86 | ||
|
|
ba9bbdd6b9 | ||
|
|
945c76132c | ||
|
|
056d3ff0cc | ||
|
|
d4fd3c34de | ||
|
|
4ac4d00377 | ||
|
|
ec2fda7ad9 | ||
|
|
292f4de099 | ||
|
|
145a485b75 | ||
|
|
7e474b7a30 | ||
|
|
af0ee9bd16 | ||
|
|
456ba7c498 | ||
|
|
cd8847c53b | ||
|
|
1894a124ea | ||
|
|
490bd22b97 | ||
|
|
f23e720a16 | ||
|
|
402799a584 | ||
|
|
0266609bf6 |
@@ -35,3 +35,4 @@ Want to get involved? Check out our [Contribution Guide](https://docs.prylabs.ne
|
||||
## Legal Disclaimer
|
||||
|
||||
[Terms of Use](/TERMS_OF_SERVICE.md)
|
||||
|
||||
|
||||
10
WORKSPACE
10
WORKSPACE
@@ -16,14 +16,12 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
|
||||
|
||||
rules_pkg_dependencies()
|
||||
|
||||
HERMETIC_CC_TOOLCHAIN_VERSION = "v2.0.0"
|
||||
|
||||
http_archive(
|
||||
name = "hermetic_cc_toolchain",
|
||||
sha256 = "57f03a6c29793e8add7bd64186fc8066d23b5ffd06fe9cc6b0b8c499914d3a65",
|
||||
sha256 = "973ab22945b921ef45b8e1d6ce01ca7ce1b8a462167449a36e297438c4ec2755",
|
||||
strip_prefix = "hermetic_cc_toolchain-5098046bccc15d2962f3cc8e7e53d6a2a26072dc",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/uber/hermetic_cc_toolchain/releases/download/{0}/hermetic_cc_toolchain-{0}.tar.gz".format(HERMETIC_CC_TOOLCHAIN_VERSION),
|
||||
"https://github.com/uber/hermetic_cc_toolchain/releases/download/{0}/hermetic_cc_toolchain-{0}.tar.gz".format(HERMETIC_CC_TOOLCHAIN_VERSION),
|
||||
"https://github.com/uber/hermetic_cc_toolchain/archive/5098046bccc15d2962f3cc8e7e53d6a2a26072dc.tar.gz", # 2023-06-28
|
||||
],
|
||||
)
|
||||
|
||||
@@ -174,7 +172,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.20.3",
|
||||
go_version = "1.20.6",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
|
||||
8
api/BUILD.bazel
Normal file
8
api/BUILD.bazel
Normal file
@@ -0,0 +1,8 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["headers.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/api",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -13,6 +13,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
@@ -32,6 +33,7 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/grpc"
|
||||
)
|
||||
|
||||
@@ -116,7 +117,11 @@ func HandleGrpcResponseError(errJson ErrorJson, resp *http.Response, respBody []
|
||||
// Something went wrong, but the request completed, meaning we can write headers and the error message.
|
||||
for h, vs := range resp.Header {
|
||||
for _, v := range vs {
|
||||
w.Header().Set(h, v)
|
||||
if strings.HasSuffix(h, api.VersionHeader) {
|
||||
w.Header().Set(api.VersionHeader, v)
|
||||
} else {
|
||||
w.Header().Set(h, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Handle gRPC timeout.
|
||||
@@ -187,9 +192,11 @@ func WriteMiddlewareResponseHeadersAndBody(grpcResp *http.Response, responseJson
|
||||
var statusCodeHeader string
|
||||
for h, vs := range grpcResp.Header {
|
||||
// We don't want to expose any gRPC metadata in the HTTP response, so we skip forwarding metadata headers.
|
||||
if strings.HasPrefix(h, "Grpc-Metadata") {
|
||||
if h == "Grpc-Metadata-"+grpc.HttpCodeMetadataKey {
|
||||
if strings.HasPrefix(h, grpc.MetadataPrefix) {
|
||||
if h == grpc.WithPrefix(grpc.HttpCodeMetadataKey) {
|
||||
statusCodeHeader = vs[0]
|
||||
} else if strings.HasSuffix(h, api.VersionHeader) {
|
||||
w.Header().Set(api.VersionHeader, vs[0])
|
||||
}
|
||||
} else {
|
||||
for _, v := range vs {
|
||||
@@ -223,7 +230,7 @@ func WriteError(w http.ResponseWriter, errJson ErrorJson, responseHeader http.He
|
||||
// Include custom error in the error JSON.
|
||||
hasCustomError := false
|
||||
if responseHeader != nil {
|
||||
customError, ok := responseHeader["Grpc-Metadata-"+grpc.CustomErrorMetadataKey]
|
||||
customError, ok := responseHeader[grpc.WithPrefix(grpc.CustomErrorMetadataKey)]
|
||||
if ok {
|
||||
hasCustomError = true
|
||||
// Assume header has only one value and read the 0 index.
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/grpc"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
@@ -280,7 +281,8 @@ func TestWriteMiddlewareResponseHeadersAndBody(t *testing.T) {
|
||||
response := &http.Response{
|
||||
Header: http.Header{
|
||||
"Foo": []string{"foo"},
|
||||
"Grpc-Metadata-" + grpc.HttpCodeMetadataKey: []string{"204"},
|
||||
grpc.WithPrefix(grpc.HttpCodeMetadataKey): []string{"204"},
|
||||
grpc.WithPrefix(api.VersionHeader): []string{"capella"},
|
||||
},
|
||||
}
|
||||
container := defaultResponseContainer()
|
||||
@@ -299,6 +301,9 @@ func TestWriteMiddlewareResponseHeadersAndBody(t *testing.T) {
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, "224", v[0])
|
||||
v, ok = writer.Header()["Eth-Consensus-Version"]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
assert.Equal(t, "capella", v[0])
|
||||
assert.Equal(t, 204, writer.Code)
|
||||
assert.DeepEqual(t, responseJson, writer.Body.Bytes())
|
||||
})
|
||||
@@ -320,11 +325,12 @@ func TestWriteMiddlewareResponseHeadersAndBody(t *testing.T) {
|
||||
|
||||
t.Run("GET_invalid_status_code", func(t *testing.T) {
|
||||
response := &http.Response{
|
||||
Header: http.Header{},
|
||||
Header: http.Header{"Grpc-Metadata-Eth-Consensus-Version": []string{"capella"}},
|
||||
}
|
||||
|
||||
// Set invalid status code.
|
||||
response.Header["Grpc-Metadata-"+grpc.HttpCodeMetadataKey] = []string{"invalid"}
|
||||
response.Header[grpc.WithPrefix(grpc.HttpCodeMetadataKey)] = []string{"invalid"}
|
||||
response.Header[grpc.WithPrefix(api.VersionHeader)] = []string{"capella"}
|
||||
|
||||
container := defaultResponseContainer()
|
||||
responseJson, err := json.Marshal(container)
|
||||
@@ -390,7 +396,7 @@ func TestWriteMiddlewareResponseHeadersAndBody(t *testing.T) {
|
||||
func TestWriteError(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
responseHeader := http.Header{
|
||||
"Grpc-Metadata-" + grpc.CustomErrorMetadataKey: []string{"{\"CustomField\":\"bar\"}"},
|
||||
grpc.WithPrefix(grpc.CustomErrorMetadataKey): []string{"{\"CustomField\":\"bar\"}"},
|
||||
}
|
||||
errJson := &testErrorJson{
|
||||
Message: "foo",
|
||||
@@ -420,7 +426,7 @@ func TestWriteError(t *testing.T) {
|
||||
logHook := test.NewGlobal()
|
||||
|
||||
responseHeader := http.Header{
|
||||
"Grpc-Metadata-" + grpc.CustomErrorMetadataKey: []string{"invalid"},
|
||||
grpc.WithPrefix(grpc.CustomErrorMetadataKey): []string{"invalid"},
|
||||
}
|
||||
|
||||
WriteError(httptest.NewRecorder(), &testErrorJson{}, responseHeader)
|
||||
|
||||
@@ -6,3 +6,11 @@ const CustomErrorMetadataKey = "Custom-Error"
|
||||
|
||||
// HttpCodeMetadataKey is the key to use when setting custom HTTP status codes in gRPC metadata.
|
||||
const HttpCodeMetadataKey = "X-Http-Code"
|
||||
|
||||
// MetadataPrefix is the prefix for grpc headers on metadata
|
||||
const MetadataPrefix = "Grpc-Metadata"
|
||||
|
||||
// WithPrefix creates a new string with grpc metadata prefix
|
||||
func WithPrefix(value string) string {
|
||||
return MetadataPrefix + "-" + value
|
||||
}
|
||||
|
||||
7
api/headers.go
Normal file
7
api/headers.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package api
|
||||
|
||||
const (
|
||||
VersionHeader = "Eth-Consensus-Version"
|
||||
JsonMediaType = "application/json"
|
||||
OctetStreamMediaType = "application/octet-stream"
|
||||
)
|
||||
@@ -387,7 +387,7 @@ func (s *Service) InForkchoice(root [32]byte) bool {
|
||||
return s.cfg.ForkChoiceStore.HasNode(root)
|
||||
}
|
||||
|
||||
// IsViableForkCheckpoint returns whether the given checkpoint is a checkpoint in any
|
||||
// IsViableForCheckpoint returns whether the given checkpoint is a checkpoint in any
|
||||
// chain known to forkchoice
|
||||
func (s *Service) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
|
||||
@@ -250,40 +250,45 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
|
||||
slashingBalance := uint64(0)
|
||||
slashingEffectiveBalance := uint64(0)
|
||||
|
||||
for i, validator := range postState.Validators() {
|
||||
for i := 0; i < postState.NumValidators(); i++ {
|
||||
validator, err := postState.ValidatorAtIndexReadOnly(primitives.ValidatorIndex(i))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not load validator")
|
||||
continue
|
||||
}
|
||||
bal, err := postState.BalanceAtIndex(primitives.ValidatorIndex(i))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not load validator balance")
|
||||
continue
|
||||
}
|
||||
if validator.Slashed {
|
||||
if currentEpoch < validator.ExitEpoch {
|
||||
if validator.Slashed() {
|
||||
if currentEpoch < validator.ExitEpoch() {
|
||||
slashingInstances++
|
||||
slashingBalance += bal
|
||||
slashingEffectiveBalance += validator.EffectiveBalance
|
||||
slashingEffectiveBalance += validator.EffectiveBalance()
|
||||
} else {
|
||||
slashedInstances++
|
||||
}
|
||||
continue
|
||||
}
|
||||
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
if currentEpoch < validator.ExitEpoch {
|
||||
if validator.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
|
||||
if currentEpoch < validator.ExitEpoch() {
|
||||
exitingInstances++
|
||||
exitingBalance += bal
|
||||
exitingEffectiveBalance += validator.EffectiveBalance
|
||||
exitingEffectiveBalance += validator.EffectiveBalance()
|
||||
} else {
|
||||
exitedInstances++
|
||||
}
|
||||
continue
|
||||
}
|
||||
if currentEpoch < validator.ActivationEpoch {
|
||||
if currentEpoch < validator.ActivationEpoch() {
|
||||
pendingInstances++
|
||||
pendingBalance += bal
|
||||
continue
|
||||
}
|
||||
activeInstances++
|
||||
activeBalance += bal
|
||||
activeEffectiveBalance += validator.EffectiveBalance
|
||||
activeEffectiveBalance += validator.EffectiveBalance()
|
||||
}
|
||||
activeInstances += exitingInstances + slashingInstances
|
||||
activeBalance += exitingBalance + slashingBalance
|
||||
|
||||
@@ -41,7 +41,7 @@ var initialSyncBlockCacheSize = uint64(2 * params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
// postBlockProcess is called when a gossip block is received. This function performs
|
||||
// several duties most importantly informing the engine if head was updated,
|
||||
// saving the new head information to the blockchain package and database and
|
||||
// saving the new head information to the blockchain package and
|
||||
// handling attestations, slashings and similar included in the block.
|
||||
func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, postState state.BeaconState, isValidPayload bool) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlock")
|
||||
@@ -107,6 +107,12 @@ func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOn
|
||||
return err
|
||||
}
|
||||
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(blockRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not check if block is optimistic")
|
||||
optimistic = true
|
||||
}
|
||||
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
@@ -115,6 +121,7 @@ func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOn
|
||||
BlockRoot: blockRoot,
|
||||
SignedBlock: signed,
|
||||
Verified: true,
|
||||
Optimistic: optimistic,
|
||||
},
|
||||
})
|
||||
|
||||
|
||||
@@ -86,6 +86,11 @@ func (s *Service) spawnProcessAttestationsRoutine() {
|
||||
}
|
||||
log.Warn("Genesis time received, now available to process attestations")
|
||||
}
|
||||
// Wait for node to be synced before running the routine.
|
||||
if err := s.waitForSync(); err != nil {
|
||||
log.WithError(err).Error("Could not wait to sync")
|
||||
return
|
||||
}
|
||||
|
||||
st := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
|
||||
pat := slots.NewSlotTickerWithOffset(s.genesisTime, -reorgLateBlockCountAttestations, params.BeaconConfig().SecondsPerSlot)
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
@@ -82,7 +81,6 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
return errors.Wrap(err, "could not save post state info")
|
||||
}
|
||||
|
||||
// Apply state transition on the new block.
|
||||
if err := s.postBlockProcess(ctx, blockCopy, blockRoot, postState, isValidPayload); err != nil {
|
||||
err := errors.Wrap(err, "could not process block")
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -100,7 +98,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
// Send finalized events and finalized deposits in the background
|
||||
if newFinalized {
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
go s.sendNewFinalizedEvent(ctx, blockCopy, postState, finalized)
|
||||
go s.sendNewFinalizedEvent(blockCopy, postState)
|
||||
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
|
||||
go func() {
|
||||
s.insertFinalizedDeposits(depCtx, finalized.Root)
|
||||
@@ -170,6 +168,11 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Rea
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(blkRoots[i])
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not check if block is optimistic")
|
||||
optimistic = true
|
||||
}
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
@@ -178,6 +181,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Rea
|
||||
BlockRoot: blkRoots[i],
|
||||
SignedBlock: blockCopy,
|
||||
Verified: true,
|
||||
Optimistic: optimistic,
|
||||
},
|
||||
})
|
||||
|
||||
@@ -337,7 +341,7 @@ func (s *Service) updateFinalizationOnBlock(ctx context.Context, preState, postS
|
||||
|
||||
// sendNewFinalizedEvent sends a new finalization checkpoint event over the
|
||||
// event feed. It needs to be called on the background
|
||||
func (s *Service) sendNewFinalizedEvent(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock, postState state.BeaconState, finalized *forkchoicetypes.Checkpoint) {
|
||||
func (s *Service) sendNewFinalizedEvent(signed interfaces.ReadOnlySignedBeaconBlock, postState state.BeaconState) {
|
||||
isValidPayload := false
|
||||
s.headLock.RLock()
|
||||
if s.head != nil {
|
||||
|
||||
@@ -533,7 +533,7 @@ func (s *ChainService) GetProposerHead() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// SetForkchoiceGenesisTime mocks the same method in the chain service
|
||||
// SetForkChoiceGenesisTime mocks the same method in the chain service
|
||||
func (s *ChainService) SetForkChoiceGenesisTime(timestamp uint64) {
|
||||
if s.ForkChoiceStore != nil {
|
||||
s.ForkChoiceStore.SetGenesisTime(timestamp)
|
||||
|
||||
@@ -39,6 +39,8 @@ type BlockProcessedData struct {
|
||||
SignedBlock interfaces.ReadOnlySignedBeaconBlock
|
||||
// Verified is true if the block's BLS contents have been verified.
|
||||
Verified bool
|
||||
// Optimistic is true if the block is optimistic.
|
||||
Optimistic bool
|
||||
}
|
||||
|
||||
// ChainStartedData is the data sent with ChainStarted events.
|
||||
|
||||
@@ -297,9 +297,6 @@ func (s *Service) ExchangeTransitionConfiguration(
|
||||
}
|
||||
|
||||
func (s *Service) ExchangeCapabilities(ctx context.Context) ([]string, error) {
|
||||
if !features.Get().EnableOptionalEngineMethods {
|
||||
return nil, errors.New("optional engine methods not enabled")
|
||||
}
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.ExchangeCapabilities")
|
||||
defer span.End()
|
||||
|
||||
@@ -491,9 +488,6 @@ func (s *Service) HeaderByNumber(ctx context.Context, number *big.Int) (*types.H
|
||||
|
||||
// GetPayloadBodiesByHash returns the relevant payload bodies for the provided block hash.
|
||||
func (s *Service) GetPayloadBodiesByHash(ctx context.Context, executionBlockHashes []common.Hash) ([]*pb.ExecutionPayloadBodyV1, error) {
|
||||
if !features.Get().EnableOptionalEngineMethods {
|
||||
return nil, errors.New("optional engine methods not enabled")
|
||||
}
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetPayloadBodiesByHashV1")
|
||||
defer span.End()
|
||||
|
||||
@@ -513,9 +507,6 @@ func (s *Service) GetPayloadBodiesByHash(ctx context.Context, executionBlockHash
|
||||
|
||||
// GetPayloadBodiesByRange returns the relevant payload bodies for the provided range.
|
||||
func (s *Service) GetPayloadBodiesByRange(ctx context.Context, start, count uint64) ([]*pb.ExecutionPayloadBodyV1, error) {
|
||||
if !features.Get().EnableOptionalEngineMethods {
|
||||
return nil, errors.New("optional engine methods not enabled")
|
||||
}
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetPayloadBodiesByRangeV1")
|
||||
defer span.End()
|
||||
|
||||
@@ -560,19 +551,7 @@ func (s *Service) ReconstructFullBlock(
|
||||
}
|
||||
|
||||
executionBlockHash := common.BytesToHash(header.BlockHash())
|
||||
executionBlock, err := s.ExecutionBlockByHash(ctx, executionBlockHash, true /* with txs */)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fetch execution block with txs by hash %#x: %v", executionBlockHash, err)
|
||||
}
|
||||
if executionBlock == nil {
|
||||
return nil, fmt.Errorf("received nil execution block for request by hash %#x", executionBlockHash)
|
||||
}
|
||||
if bytes.Equal(executionBlock.Hash.Bytes(), []byte{}) {
|
||||
return nil, EmptyBlockHash
|
||||
}
|
||||
|
||||
executionBlock.Version = blindedBlock.Version()
|
||||
payload, err := fullPayloadFromExecutionBlock(header, executionBlock)
|
||||
payload, err := s.retrievePayloadFromExecutionHash(ctx, executionBlockHash, header, blindedBlock.Version())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -619,32 +598,9 @@ func (s *Service) ReconstructFullBellatrixBlockBatch(
|
||||
executionHashes = append(executionHashes, executionBlockHash)
|
||||
}
|
||||
}
|
||||
execBlocks, err := s.ExecutionBlocksByHashes(ctx, executionHashes, true /* with txs*/)
|
||||
fullBlocks, err := s.retrievePayloadsFromExecutionHashes(ctx, executionHashes, validExecPayloads, blindedBlocks)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fetch execution blocks with txs by hash %#x: %v", executionHashes, err)
|
||||
}
|
||||
|
||||
// For each valid payload, we reconstruct the full block from it with the
|
||||
// blinded block.
|
||||
fullBlocks := make([]interfaces.SignedBeaconBlock, len(blindedBlocks))
|
||||
for sliceIdx, realIdx := range validExecPayloads {
|
||||
b := execBlocks[sliceIdx]
|
||||
if b == nil {
|
||||
return nil, fmt.Errorf("received nil execution block for request by hash %#x", executionHashes[sliceIdx])
|
||||
}
|
||||
header, err := blindedBlocks[realIdx].Block().Body().Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payload, err := fullPayloadFromExecutionBlock(header, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullBlock, err := blocks.BuildSignedBeaconBlockFromExecutionPayload(blindedBlocks[realIdx], payload.Proto())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullBlocks[realIdx] = fullBlock
|
||||
return nil, err
|
||||
}
|
||||
// For blocks that are pre-merge we simply reconstruct them via an empty
|
||||
// execution payload.
|
||||
@@ -660,6 +616,95 @@ func (s *Service) ReconstructFullBellatrixBlockBatch(
|
||||
return fullBlocks, nil
|
||||
}
|
||||
|
||||
func (s *Service) retrievePayloadFromExecutionHash(ctx context.Context, executionBlockHash common.Hash, header interfaces.ExecutionData, version int) (interfaces.ExecutionData, error) {
|
||||
if features.Get().EnableOptionalEngineMethods {
|
||||
pBodies, err := s.GetPayloadBodiesByHash(ctx, []common.Hash{executionBlockHash})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get payload body by hash %#x: %v", executionBlockHash, err)
|
||||
}
|
||||
if len(pBodies) != 1 {
|
||||
return nil, errors.Errorf("could not retrieve the correct number of payload bodies: wanted 1 but got %d", len(pBodies))
|
||||
}
|
||||
bdy := pBodies[0]
|
||||
return fullPayloadFromPayloadBody(header, bdy, version)
|
||||
}
|
||||
|
||||
executionBlock, err := s.ExecutionBlockByHash(ctx, executionBlockHash, true /* with txs */)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fetch execution block with txs by hash %#x: %v", executionBlockHash, err)
|
||||
}
|
||||
if executionBlock == nil {
|
||||
return nil, fmt.Errorf("received nil execution block for request by hash %#x", executionBlockHash)
|
||||
}
|
||||
if bytes.Equal(executionBlock.Hash.Bytes(), []byte{}) {
|
||||
return nil, EmptyBlockHash
|
||||
}
|
||||
|
||||
executionBlock.Version = version
|
||||
return fullPayloadFromExecutionBlock(header, executionBlock)
|
||||
}
|
||||
|
||||
func (s *Service) retrievePayloadsFromExecutionHashes(
|
||||
ctx context.Context,
|
||||
executionHashes []common.Hash,
|
||||
validExecPayloads []int,
|
||||
blindedBlocks []interfaces.ReadOnlySignedBeaconBlock) ([]interfaces.SignedBeaconBlock, error) {
|
||||
fullBlocks := make([]interfaces.SignedBeaconBlock, len(blindedBlocks))
|
||||
var execBlocks []*pb.ExecutionBlock
|
||||
var payloadBodies []*pb.ExecutionPayloadBodyV1
|
||||
var err error
|
||||
if features.Get().EnableOptionalEngineMethods {
|
||||
payloadBodies, err = s.GetPayloadBodiesByHash(ctx, executionHashes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fetch payload bodies by hash %#x: %v", executionHashes, err)
|
||||
}
|
||||
} else {
|
||||
execBlocks, err = s.ExecutionBlocksByHashes(ctx, executionHashes, true /* with txs*/)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fetch execution blocks with txs by hash %#x: %v", executionHashes, err)
|
||||
}
|
||||
}
|
||||
|
||||
// For each valid payload, we reconstruct the full block from it with the
|
||||
// blinded block.
|
||||
for sliceIdx, realIdx := range validExecPayloads {
|
||||
var payload interfaces.ExecutionData
|
||||
if features.Get().EnableOptionalEngineMethods {
|
||||
b := payloadBodies[sliceIdx]
|
||||
if b == nil {
|
||||
return nil, fmt.Errorf("received nil payload body for request by hash %#x", executionHashes[sliceIdx])
|
||||
}
|
||||
header, err := blindedBlocks[realIdx].Block().Body().Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payload, err = fullPayloadFromPayloadBody(header, b, blindedBlocks[realIdx].Version())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
b := execBlocks[sliceIdx]
|
||||
if b == nil {
|
||||
return nil, fmt.Errorf("received nil execution block for request by hash %#x", executionHashes[sliceIdx])
|
||||
}
|
||||
header, err := blindedBlocks[realIdx].Block().Body().Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payload, err = fullPayloadFromExecutionBlock(header, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
fullBlock, err := blocks.BuildSignedBeaconBlockFromExecutionPayload(blindedBlocks[realIdx], payload.Proto())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullBlocks[realIdx] = fullBlock
|
||||
}
|
||||
return fullBlocks, nil
|
||||
}
|
||||
|
||||
func fullPayloadFromExecutionBlock(
|
||||
header interfaces.ExecutionData, block *pb.ExecutionBlock,
|
||||
) (interfaces.ExecutionData, error) {
|
||||
@@ -721,6 +766,50 @@ func fullPayloadFromExecutionBlock(
|
||||
}, 0) // We can't get the block value and don't care about the block value for this instance
|
||||
}
|
||||
|
||||
func fullPayloadFromPayloadBody(
|
||||
header interfaces.ExecutionData, body *pb.ExecutionPayloadBodyV1, bVersion int,
|
||||
) (interfaces.ExecutionData, error) {
|
||||
if header.IsNil() || body == nil {
|
||||
return nil, errors.New("execution block and header cannot be nil")
|
||||
}
|
||||
|
||||
if bVersion == version.Bellatrix {
|
||||
return blocks.WrappedExecutionPayload(&pb.ExecutionPayload{
|
||||
ParentHash: header.ParentHash(),
|
||||
FeeRecipient: header.FeeRecipient(),
|
||||
StateRoot: header.StateRoot(),
|
||||
ReceiptsRoot: header.ReceiptsRoot(),
|
||||
LogsBloom: header.LogsBloom(),
|
||||
PrevRandao: header.PrevRandao(),
|
||||
BlockNumber: header.BlockNumber(),
|
||||
GasLimit: header.GasLimit(),
|
||||
GasUsed: header.GasUsed(),
|
||||
Timestamp: header.Timestamp(),
|
||||
ExtraData: header.ExtraData(),
|
||||
BaseFeePerGas: header.BaseFeePerGas(),
|
||||
BlockHash: header.BlockHash(),
|
||||
Transactions: body.Transactions,
|
||||
})
|
||||
}
|
||||
return blocks.WrappedExecutionPayloadCapella(&pb.ExecutionPayloadCapella{
|
||||
ParentHash: header.ParentHash(),
|
||||
FeeRecipient: header.FeeRecipient(),
|
||||
StateRoot: header.StateRoot(),
|
||||
ReceiptsRoot: header.ReceiptsRoot(),
|
||||
LogsBloom: header.LogsBloom(),
|
||||
PrevRandao: header.PrevRandao(),
|
||||
BlockNumber: header.BlockNumber(),
|
||||
GasLimit: header.GasLimit(),
|
||||
GasUsed: header.GasUsed(),
|
||||
Timestamp: header.Timestamp(),
|
||||
ExtraData: header.ExtraData(),
|
||||
BaseFeePerGas: header.BaseFeePerGas(),
|
||||
BlockHash: header.BlockHash(),
|
||||
Transactions: body.Transactions,
|
||||
Withdrawals: body.Withdrawals,
|
||||
}, 0) // We can't get the block value and don't care about the block value for this instance
|
||||
}
|
||||
|
||||
// Handles errors received from the RPC server according to the specification.
|
||||
func handleRPCError(err error) error {
|
||||
if err == nil {
|
||||
|
||||
@@ -12,6 +12,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/apimiddleware",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/gateway/apimiddleware:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
"//beacon-chain/rpc/eth/events:go_default_library",
|
||||
@@ -35,6 +36,7 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/gateway/apimiddleware:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
"//beacon-chain/rpc/eth/events:go_default_library",
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/grpc"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/events"
|
||||
@@ -19,13 +20,6 @@ import (
|
||||
"github.com/r3labs/sse"
|
||||
)
|
||||
|
||||
const (
|
||||
versionHeader = "Eth-Consensus-Version"
|
||||
grpcVersionHeader = "Grpc-metadata-Eth-Consensus-Version"
|
||||
jsonMediaType = "application/json"
|
||||
octetStreamMediaType = "application/octet-stream"
|
||||
)
|
||||
|
||||
// match a number with optional decimals
|
||||
var priorityRegex = regexp.MustCompile(`q=(\d+(?:\.\d+)?)`)
|
||||
|
||||
@@ -223,7 +217,7 @@ func sszRequested(req *http.Request) (bool, error) {
|
||||
for _, t := range types {
|
||||
values := strings.Split(t, ";")
|
||||
name := values[0]
|
||||
if name != jsonMediaType && name != octetStreamMediaType {
|
||||
if name != api.JsonMediaType && name != api.OctetStreamMediaType {
|
||||
continue
|
||||
}
|
||||
// no params specified
|
||||
@@ -248,7 +242,7 @@ func sszRequested(req *http.Request) (bool, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return currentType == octetStreamMediaType, nil
|
||||
return currentType == api.OctetStreamMediaType, nil
|
||||
}
|
||||
|
||||
func sszPosted(req *http.Request) bool {
|
||||
@@ -259,7 +253,7 @@ func sszPosted(req *http.Request) bool {
|
||||
if len(ct) != 1 {
|
||||
return false
|
||||
}
|
||||
return ct[0] == octetStreamMediaType
|
||||
return ct[0] == api.OctetStreamMediaType
|
||||
}
|
||||
|
||||
func prepareSSZRequestForProxying(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, req *http.Request) apimiddleware.ErrorJson {
|
||||
@@ -278,10 +272,10 @@ func prepareSSZRequestForProxying(m *apimiddleware.ApiProxyMiddleware, endpoint
|
||||
}
|
||||
|
||||
func prepareCustomHeaders(req *http.Request) {
|
||||
ver := req.Header.Get(versionHeader)
|
||||
ver := req.Header.Get(api.VersionHeader)
|
||||
if ver != "" {
|
||||
req.Header.Del(versionHeader)
|
||||
req.Header.Add(grpcVersionHeader, ver)
|
||||
req.Header.Del(api.VersionHeader)
|
||||
req.Header.Add(grpc.WithPrefix(api.VersionHeader), ver)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -297,7 +291,7 @@ func preparePostedSSZData(req *http.Request) apimiddleware.ErrorJson {
|
||||
}
|
||||
req.Body = io.NopCloser(bytes.NewBuffer(data))
|
||||
req.ContentLength = int64(len(data))
|
||||
req.Header.Set("Content-Type", jsonMediaType)
|
||||
req.Header.Set("Content-Type", api.JsonMediaType)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -325,9 +319,9 @@ func writeSSZResponseHeaderAndBody(grpcResp *http.Response, w http.ResponseWrite
|
||||
}
|
||||
}
|
||||
w.Header().Set("Content-Length", strconv.Itoa(len(respSsz)))
|
||||
w.Header().Set("Content-Type", octetStreamMediaType)
|
||||
w.Header().Set("Content-Type", api.OctetStreamMediaType)
|
||||
w.Header().Set("Content-Disposition", "attachment; filename="+fileName)
|
||||
w.Header().Set(versionHeader, respVersion)
|
||||
w.Header().Set(api.VersionHeader, respVersion)
|
||||
if statusCodeHeader != "" {
|
||||
code, err := strconv.Atoi(statusCodeHeader)
|
||||
if err != nil {
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/grpc"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/events"
|
||||
@@ -45,7 +46,7 @@ func (t testSSZResponseJson) SSZFinalized() bool {
|
||||
func TestSSZRequested(t *testing.T) {
|
||||
t.Run("ssz_requested", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{octetStreamMediaType}
|
||||
request.Header["Accept"] = []string{api.OctetStreamMediaType}
|
||||
result, err := sszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, result)
|
||||
@@ -53,7 +54,7 @@ func TestSSZRequested(t *testing.T) {
|
||||
|
||||
t.Run("ssz_content_type_first", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s,%s", octetStreamMediaType, jsonMediaType)}
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s,%s", api.OctetStreamMediaType, api.JsonMediaType)}
|
||||
result, err := sszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, result)
|
||||
@@ -61,7 +62,7 @@ func TestSSZRequested(t *testing.T) {
|
||||
|
||||
t.Run("ssz_content_type_preferred_1", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s;q=0.9,%s", jsonMediaType, octetStreamMediaType)}
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s;q=0.9,%s", api.JsonMediaType, api.OctetStreamMediaType)}
|
||||
result, err := sszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, result)
|
||||
@@ -69,7 +70,7 @@ func TestSSZRequested(t *testing.T) {
|
||||
|
||||
t.Run("ssz_content_type_preferred_2", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s;q=0.95,%s;q=0.9", octetStreamMediaType, jsonMediaType)}
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s;q=0.95,%s;q=0.9", api.OctetStreamMediaType, api.JsonMediaType)}
|
||||
result, err := sszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, result)
|
||||
@@ -77,7 +78,7 @@ func TestSSZRequested(t *testing.T) {
|
||||
|
||||
t.Run("other_content_type_preferred", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s,%s;q=0.9", jsonMediaType, octetStreamMediaType)}
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s,%s;q=0.9", api.JsonMediaType, api.OctetStreamMediaType)}
|
||||
result, err := sszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, result)
|
||||
@@ -85,7 +86,7 @@ func TestSSZRequested(t *testing.T) {
|
||||
|
||||
t.Run("other_params", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s,%s;q=0.9,otherparam=xyz", jsonMediaType, octetStreamMediaType)}
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s,%s;q=0.9,otherparam=xyz", api.JsonMediaType, api.OctetStreamMediaType)}
|
||||
result, err := sszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, result)
|
||||
@@ -153,7 +154,7 @@ func TestPreparePostedSszData(t *testing.T) {
|
||||
|
||||
preparePostedSSZData(request)
|
||||
assert.Equal(t, int64(19), request.ContentLength)
|
||||
assert.Equal(t, jsonMediaType, request.Header.Get("Content-Type"))
|
||||
assert.Equal(t, api.JsonMediaType, request.Header.Get("Content-Type"))
|
||||
}
|
||||
|
||||
func TestSerializeMiddlewareResponseIntoSSZ(t *testing.T) {
|
||||
@@ -209,12 +210,12 @@ func TestWriteSSZResponseHeaderAndBody(t *testing.T) {
|
||||
v, ok = writer.Header()["Content-Type"]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, octetStreamMediaType, v[0])
|
||||
assert.Equal(t, api.OctetStreamMediaType, v[0])
|
||||
v, ok = writer.Header()["Content-Disposition"]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, "attachment; filename=test.ssz", v[0])
|
||||
v, ok = writer.Header()[versionHeader]
|
||||
v, ok = writer.Header()[api.VersionHeader]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, "version", v[0])
|
||||
|
||||
@@ -18,6 +18,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/beacon",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
@@ -67,6 +68,7 @@ go_library(
|
||||
"@com_github_wealdtech_go_bytesutil//:go_default_library",
|
||||
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
@@ -91,6 +93,7 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
rpchelpers "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/validator"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
@@ -17,7 +18,9 @@ import (
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -38,7 +41,9 @@ func (bs *Server) GetBlindedBlock(ctx context.Context, req *ethpbv1.BlockRequest
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
|
||||
if err := grpc.SetHeader(ctx, metadata.Pairs(api.VersionHeader, version.String(blk.Version()))); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not set "+api.VersionHeader+" header: %v", err)
|
||||
}
|
||||
result, err := getBlindedBlockPhase0(blk)
|
||||
if result != nil {
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
@@ -196,11 +201,11 @@ func (bs *Server) SubmitBlindedBlockSSZ(ctx context.Context, req *ethpbv2.SSZCon
|
||||
|
||||
md, ok := metadata.FromIncomingContext(ctx)
|
||||
if !ok {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read"+versionHeader+" header")
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read"+api.VersionHeader+" header")
|
||||
}
|
||||
ver := md.Get(versionHeader)
|
||||
ver := md.Get(api.VersionHeader)
|
||||
if len(ver) == 0 {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read"+versionHeader+" header")
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read"+api.VersionHeader+" header")
|
||||
}
|
||||
schedule := forks.NewOrderedSchedule(params.BeaconConfig())
|
||||
forkVer, err := schedule.VersionForName(ver[0])
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
@@ -17,11 +19,13 @@ import (
|
||||
mock2 "github.com/prysmaticlabs/prysm/v4/testing/mock"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func TestServer_GetBlindedBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
stream := &runtime.ServerTransportStream{}
|
||||
ctx := grpc.NewContextWithServerTransportStream(context.Background(), stream)
|
||||
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
b := util.NewBeaconBlock()
|
||||
@@ -321,7 +325,7 @@ func TestServer_SubmitBlindedBlockSSZ(t *testing.T) {
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "phase0")
|
||||
md.Set(api.VersionHeader, "phase0")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err)
|
||||
@@ -342,7 +346,7 @@ func TestServer_SubmitBlindedBlockSSZ(t *testing.T) {
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "altair")
|
||||
md.Set(api.VersionHeader, "altair")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err)
|
||||
@@ -363,7 +367,7 @@ func TestServer_SubmitBlindedBlockSSZ(t *testing.T) {
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "bellatrix")
|
||||
md.Set(api.VersionHeader, "bellatrix")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err)
|
||||
@@ -381,7 +385,7 @@ func TestServer_SubmitBlindedBlockSSZ(t *testing.T) {
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "bellatrix")
|
||||
md.Set(api.VersionHeader, "bellatrix")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NotNil(t, err)
|
||||
@@ -402,7 +406,7 @@ func TestServer_SubmitBlindedBlockSSZ(t *testing.T) {
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "capella")
|
||||
md.Set(api.VersionHeader, "capella")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err)
|
||||
@@ -420,7 +424,7 @@ func TestServer_SubmitBlindedBlockSSZ(t *testing.T) {
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "capella")
|
||||
md.Set(api.VersionHeader, "capella")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filters"
|
||||
rpchelpers "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
|
||||
@@ -25,16 +26,16 @@ import (
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
const versionHeader = "eth-consensus-version"
|
||||
|
||||
var (
|
||||
errNilBlock = errors.New("nil block")
|
||||
)
|
||||
@@ -253,11 +254,11 @@ func (bs *Server) SubmitBlockSSZ(ctx context.Context, req *ethpbv2.SSZContainer)
|
||||
|
||||
md, ok := metadata.FromIncomingContext(ctx)
|
||||
if !ok {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read "+versionHeader+" header")
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read "+api.VersionHeader+" header")
|
||||
}
|
||||
ver := md.Get(versionHeader)
|
||||
ver := md.Get(api.VersionHeader)
|
||||
if len(ver) == 0 {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read "+versionHeader+" header")
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read "+api.VersionHeader+" header")
|
||||
}
|
||||
schedule := forks.NewOrderedSchedule(params.BeaconConfig())
|
||||
forkVer, err := schedule.VersionForName(ver[0])
|
||||
@@ -424,6 +425,9 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
if err := grpc.SetHeader(ctx, metadata.Pairs(api.VersionHeader, version.String(blk.Version()))); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not set "+api.VersionHeader+" header: %v", err)
|
||||
}
|
||||
result, err = getBlockAltair(blk)
|
||||
if result != nil {
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
|
||||
@@ -5,7 +5,9 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
dbTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
@@ -24,6 +26,7 @@ import (
|
||||
mock2 "github.com/prysmaticlabs/prysm/v4/testing/mock"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
@@ -444,7 +447,7 @@ func TestServer_SubmitBlockSSZ(t *testing.T) {
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "phase0")
|
||||
md.Set(api.VersionHeader, "phase0")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err)
|
||||
@@ -465,7 +468,7 @@ func TestServer_SubmitBlockSSZ(t *testing.T) {
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "altair")
|
||||
md.Set(api.VersionHeader, "altair")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err)
|
||||
@@ -486,7 +489,7 @@ func TestServer_SubmitBlockSSZ(t *testing.T) {
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "bellatrix")
|
||||
md.Set(api.VersionHeader, "bellatrix")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err)
|
||||
@@ -504,7 +507,7 @@ func TestServer_SubmitBlockSSZ(t *testing.T) {
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "bellatrix")
|
||||
md.Set(api.VersionHeader, "bellatrix")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
|
||||
assert.NotNil(t, err)
|
||||
@@ -525,7 +528,7 @@ func TestServer_SubmitBlockSSZ(t *testing.T) {
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "capella")
|
||||
md.Set(api.VersionHeader, "capella")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err)
|
||||
@@ -543,7 +546,7 @@ func TestServer_SubmitBlockSSZ(t *testing.T) {
|
||||
Data: ssz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "capella")
|
||||
md.Set(api.VersionHeader, "capella")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
|
||||
assert.NotNil(t, err)
|
||||
@@ -579,8 +582,8 @@ func TestServer_GetBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestServer_GetBlockV2(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
stream := &runtime.ServerTransportStream{}
|
||||
ctx := grpc.NewContextWithServerTransportStream(context.Background(), stream)
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 123
|
||||
|
||||
@@ -15,6 +15,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/network"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -38,7 +41,142 @@ func (bs *Server) PublishBlindedBlockV2(w http.ResponseWriter, r *http.Request)
|
||||
if ok := bs.checkSync(r.Context(), w); !ok {
|
||||
return
|
||||
}
|
||||
isSSZ, err := network.SszRequested(r)
|
||||
if isSSZ && err == nil {
|
||||
publishBlindedBlockV2SSZ(bs, w, r)
|
||||
} else {
|
||||
publishBlindedBlockV2(bs, w, r)
|
||||
}
|
||||
}
|
||||
|
||||
func publishBlindedBlockV2SSZ(bs *Server, w http.ResponseWriter, r *http.Request) {
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not read request body: " + err.Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
capellaBlock := ðpbv2.SignedBlindedBeaconBlockCapella{}
|
||||
if err := capellaBlock.UnmarshalSSZ(body); err == nil {
|
||||
v1block, err := migration.BlindedCapellaToV1Alpha1SignedBlock(capellaBlock)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not decode request body into consensus block: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
genericBlock := ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_BlindedCapella{
|
||||
BlindedCapella: v1block,
|
||||
},
|
||||
}
|
||||
if err = bs.validateBroadcast(r, genericBlock); err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
bs.proposeBlock(r.Context(), w, genericBlock)
|
||||
return
|
||||
}
|
||||
bellatrixBlock := ðpbv2.SignedBlindedBeaconBlockBellatrix{}
|
||||
if err := bellatrixBlock.UnmarshalSSZ(body); err == nil {
|
||||
v1block, err := migration.BlindedBellatrixToV1Alpha1SignedBlock(bellatrixBlock)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not decode request body into consensus block: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
genericBlock := ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_BlindedBellatrix{
|
||||
BlindedBellatrix: v1block,
|
||||
},
|
||||
}
|
||||
if err = bs.validateBroadcast(r, genericBlock); err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
bs.proposeBlock(r.Context(), w, genericBlock)
|
||||
return
|
||||
}
|
||||
|
||||
// blinded is not supported before bellatrix hardfork
|
||||
altairBlock := ðpbv2.SignedBeaconBlockAltair{}
|
||||
if err := altairBlock.UnmarshalSSZ(body); err == nil {
|
||||
v1block, err := migration.AltairToV1Alpha1SignedBlock(altairBlock)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not decode request body into consensus block: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
genericBlock := ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Altair{
|
||||
Altair: v1block,
|
||||
},
|
||||
}
|
||||
if err = bs.validateBroadcast(r, genericBlock); err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
bs.proposeBlock(r.Context(), w, genericBlock)
|
||||
return
|
||||
}
|
||||
phase0Block := ðpbv1.SignedBeaconBlock{}
|
||||
if err := phase0Block.UnmarshalSSZ(body); err == nil {
|
||||
v1block, err := migration.V1ToV1Alpha1SignedBlock(phase0Block)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not decode request body into consensus block: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
genericBlock := ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Phase0{
|
||||
Phase0: v1block,
|
||||
},
|
||||
}
|
||||
if err = bs.validateBroadcast(r, genericBlock); err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
bs.proposeBlock(r.Context(), w, genericBlock)
|
||||
return
|
||||
}
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Body does not represent a valid block type",
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
}
|
||||
|
||||
func publishBlindedBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
|
||||
validate := validator.New()
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
@@ -167,7 +305,15 @@ func (bs *Server) PublishBlockV2(w http.ResponseWriter, r *http.Request) {
|
||||
if ok := bs.checkSync(r.Context(), w); !ok {
|
||||
return
|
||||
}
|
||||
isSSZ, err := network.SszRequested(r)
|
||||
if isSSZ && err == nil {
|
||||
publishBlockV2SSZ(bs, w, r)
|
||||
} else {
|
||||
publishBlockV2(bs, w, r)
|
||||
}
|
||||
}
|
||||
|
||||
func publishBlockV2SSZ(bs *Server, w http.ResponseWriter, r *http.Request) {
|
||||
validate := validator.New()
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
@@ -178,7 +324,140 @@ func (bs *Server) PublishBlockV2(w http.ResponseWriter, r *http.Request) {
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
capellaBlock := ðpbv2.SignedBeaconBlockCapella{}
|
||||
if err := capellaBlock.UnmarshalSSZ(body); err == nil {
|
||||
if err = validate.Struct(capellaBlock); err == nil {
|
||||
v1block, err := migration.CapellaToV1Alpha1SignedBlock(capellaBlock)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not decode request body into consensus block: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
genericBlock := ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Capella{
|
||||
Capella: v1block,
|
||||
},
|
||||
}
|
||||
if err = bs.validateBroadcast(r, genericBlock); err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
bs.proposeBlock(r.Context(), w, genericBlock)
|
||||
return
|
||||
}
|
||||
}
|
||||
bellatrixBlock := ðpbv2.SignedBeaconBlockBellatrix{}
|
||||
if err := bellatrixBlock.UnmarshalSSZ(body); err == nil {
|
||||
if err = validate.Struct(bellatrixBlock); err == nil {
|
||||
v1block, err := migration.BellatrixToV1Alpha1SignedBlock(bellatrixBlock)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not decode request body into consensus block: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
genericBlock := ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Bellatrix{
|
||||
Bellatrix: v1block,
|
||||
},
|
||||
}
|
||||
if err = bs.validateBroadcast(r, genericBlock); err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
bs.proposeBlock(r.Context(), w, genericBlock)
|
||||
return
|
||||
}
|
||||
}
|
||||
altairBlock := ðpbv2.SignedBeaconBlockAltair{}
|
||||
if err := altairBlock.UnmarshalSSZ(body); err == nil {
|
||||
if err = validate.Struct(altairBlock); err == nil {
|
||||
v1block, err := migration.AltairToV1Alpha1SignedBlock(altairBlock)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not decode request body into consensus block: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
genericBlock := ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Altair{
|
||||
Altair: v1block,
|
||||
},
|
||||
}
|
||||
if err = bs.validateBroadcast(r, genericBlock); err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
bs.proposeBlock(r.Context(), w, genericBlock)
|
||||
return
|
||||
}
|
||||
}
|
||||
phase0Block := ðpbv1.SignedBeaconBlock{}
|
||||
if err := phase0Block.UnmarshalSSZ(body); err == nil {
|
||||
if err = validate.Struct(phase0Block); err == nil {
|
||||
v1block, err := migration.V1ToV1Alpha1SignedBlock(phase0Block)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not decode request body into consensus block: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
genericBlock := ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Phase0{
|
||||
Phase0: v1block,
|
||||
},
|
||||
}
|
||||
if err = bs.validateBroadcast(r, genericBlock); err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
bs.proposeBlock(r.Context(), w, genericBlock)
|
||||
return
|
||||
}
|
||||
}
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Body does not represent a valid block type",
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
}
|
||||
|
||||
func publishBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
|
||||
validate := validator.New()
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not read request body",
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
var capellaBlock *SignedBeaconBlockCapella
|
||||
if err = unmarshalStrict(body, &capellaBlock); err == nil {
|
||||
if err = validate.Struct(capellaBlock); err == nil {
|
||||
|
||||
@@ -2,6 +2,7 @@ package beacon
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
@@ -13,6 +14,7 @@ import (
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
mock2 "github.com/prysmaticlabs/prysm/v4/testing/mock"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
@@ -30,7 +32,7 @@ func TestPublishBlockV2(t *testing.T) {
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(phase0Block)))
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(phase0Block)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlockV2(writer, request)
|
||||
@@ -47,7 +49,7 @@ func TestPublishBlockV2(t *testing.T) {
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(altairBlock)))
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(altairBlock)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlockV2(writer, request)
|
||||
@@ -64,7 +66,7 @@ func TestPublishBlockV2(t *testing.T) {
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(bellatrixBlock)))
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(bellatrixBlock)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlockV2(writer, request)
|
||||
@@ -81,7 +83,7 @@ func TestPublishBlockV2(t *testing.T) {
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(capellaBlock)))
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(capellaBlock)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlockV2(writer, request)
|
||||
@@ -92,7 +94,7 @@ func TestPublishBlockV2(t *testing.T) {
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(blindedBellatrixBlock)))
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(blindedBellatrixBlock)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlockV2(writer, request)
|
||||
@@ -108,7 +110,7 @@ func TestPublishBlockV2(t *testing.T) {
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte("foo")))
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte("foo")))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlockV2(writer, request)
|
||||
@@ -117,9 +119,74 @@ func TestPublishBlockV2(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestPublishBlindedBlockV2(t *testing.T) {
|
||||
func TestPublishBlockV2SSZ(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
_, ok := req.Block.(*eth.GenericSignedBeaconBlock_Bellatrix)
|
||||
return ok
|
||||
}))
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
var bellablock SignedBeaconBlockBellatrix
|
||||
err := json.Unmarshal([]byte(bellatrixBlock), &bellablock)
|
||||
require.NoError(t, err)
|
||||
genericBlock, err := bellablock.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
sszvalue, err := genericBlock.GetBellatrix().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(sszvalue))
|
||||
request.Header.Set("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
_, ok := req.Block.(*eth.GenericSignedBeaconBlock_Capella)
|
||||
return ok
|
||||
}))
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
var cblock SignedBeaconBlockCapella
|
||||
err := json.Unmarshal([]byte(capellaBlock), &cblock)
|
||||
require.NoError(t, err)
|
||||
genericBlock, err := cblock.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
sszvalue, err := genericBlock.GetCapella().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(sszvalue))
|
||||
request.Header.Set("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("invalid block", func(t *testing.T) {
|
||||
server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(blindedBellatrixBlock)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
assert.Equal(t, true, strings.Contains(writer.Body.String(), "Body does not represent a valid block type"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestPublishBlindedBlockV2(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
@@ -131,7 +198,7 @@ func TestPublishBlindedBlockV2(t *testing.T) {
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(phase0Block)))
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(phase0Block)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlindedBlockV2(writer, request)
|
||||
@@ -148,7 +215,7 @@ func TestPublishBlindedBlockV2(t *testing.T) {
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(altairBlock)))
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(altairBlock)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlindedBlockV2(writer, request)
|
||||
@@ -165,7 +232,7 @@ func TestPublishBlindedBlockV2(t *testing.T) {
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(blindedBellatrixBlock)))
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(blindedBellatrixBlock)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlindedBlockV2(writer, request)
|
||||
@@ -182,7 +249,7 @@ func TestPublishBlindedBlockV2(t *testing.T) {
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(blindedCapellaBlock)))
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(blindedCapellaBlock)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlindedBlockV2(writer, request)
|
||||
@@ -193,7 +260,7 @@ func TestPublishBlindedBlockV2(t *testing.T) {
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte(bellatrixBlock)))
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(bellatrixBlock)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlindedBlockV2(writer, request)
|
||||
@@ -209,7 +276,7 @@ func TestPublishBlindedBlockV2(t *testing.T) {
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.example", bytes.NewReader([]byte("foo")))
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte("foo")))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlindedBlockV2(writer, request)
|
||||
@@ -218,6 +285,72 @@ func TestPublishBlindedBlockV2(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestPublishBlindedBlockV2SSZ(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
_, ok := req.Block.(*eth.GenericSignedBeaconBlock_BlindedBellatrix)
|
||||
return ok
|
||||
}))
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
var bellablock SignedBlindedBeaconBlockBellatrix
|
||||
err := json.Unmarshal([]byte(blindedBellatrixBlock), &bellablock)
|
||||
require.NoError(t, err)
|
||||
genericBlock, err := bellablock.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
sszvalue, err := genericBlock.GetBlindedBellatrix().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(sszvalue))
|
||||
request.Header.Set("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlindedBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
_, ok := req.Block.(*eth.GenericSignedBeaconBlock_BlindedCapella)
|
||||
return ok
|
||||
}))
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
var cblock SignedBlindedBeaconBlockCapella
|
||||
err := json.Unmarshal([]byte(blindedCapellaBlock), &cblock)
|
||||
require.NoError(t, err)
|
||||
genericBlock, err := cblock.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
sszvalue, err := genericBlock.GetBlindedCapella().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(sszvalue))
|
||||
request.Header.Set("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlindedBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("invalid block", func(t *testing.T) {
|
||||
server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(bellatrixBlock)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlindedBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
assert.Equal(t, true, strings.Contains(writer.Body.String(), "Body does not represent a valid block type"))
|
||||
})
|
||||
}
|
||||
|
||||
const (
|
||||
phase0Block = `{
|
||||
"message": {
|
||||
@@ -668,7 +801,8 @@ const (
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
],
|
||||
"data": {
|
||||
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a",
|
||||
@@ -688,7 +822,7 @@ const (
|
||||
}
|
||||
],
|
||||
"sync_aggregate": {
|
||||
"sync_committee_bits": "0x01",
|
||||
"sync_committee_bits": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"sync_committee_signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
},
|
||||
"execution_payload": {
|
||||
@@ -846,7 +980,8 @@ const (
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
],
|
||||
"data": {
|
||||
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a",
|
||||
@@ -866,7 +1001,7 @@ const (
|
||||
}
|
||||
],
|
||||
"sync_aggregate": {
|
||||
"sync_committee_bits": "0x01",
|
||||
"sync_committee_bits": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"sync_committee_signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
},
|
||||
"execution_payload_header": {
|
||||
@@ -1022,7 +1157,8 @@ const (
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
],
|
||||
"data": {
|
||||
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a",
|
||||
@@ -1042,7 +1178,7 @@ const (
|
||||
}
|
||||
],
|
||||
"sync_aggregate": {
|
||||
"sync_committee_bits": "0x01",
|
||||
"sync_committee_bits": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"sync_committee_signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
},
|
||||
"execution_payload": {
|
||||
@@ -1218,7 +1354,8 @@ const (
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
],
|
||||
"data": {
|
||||
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a",
|
||||
@@ -1238,7 +1375,7 @@ const (
|
||||
}
|
||||
],
|
||||
"sync_aggregate": {
|
||||
"sync_committee_bits": "0x01",
|
||||
"sync_committee_bits": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"sync_committee_signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
},
|
||||
"execution_payload_header": {
|
||||
|
||||
@@ -11,7 +11,6 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
@@ -42,7 +41,6 @@ go_test(
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
gwpb "github.com/grpc-ecosystem/grpc-gateway/v2/proto/gateway"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
blockfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/block"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
@@ -80,26 +79,18 @@ func (s *Server) StreamEvents(
|
||||
}
|
||||
|
||||
// Subscribe to event feeds from information received in the beacon node runtime.
|
||||
blockChan := make(chan *feed.Event, 1)
|
||||
blockSub := s.BlockNotifier.BlockFeed().Subscribe(blockChan)
|
||||
|
||||
opsChan := make(chan *feed.Event, 1)
|
||||
opsSub := s.OperationNotifier.OperationFeed().Subscribe(opsChan)
|
||||
|
||||
stateChan := make(chan *feed.Event, 1)
|
||||
stateSub := s.StateNotifier.StateFeed().Subscribe(stateChan)
|
||||
|
||||
defer blockSub.Unsubscribe()
|
||||
defer opsSub.Unsubscribe()
|
||||
defer stateSub.Unsubscribe()
|
||||
|
||||
// Handle each event received and context cancelation.
|
||||
for {
|
||||
select {
|
||||
case event := <-blockChan:
|
||||
if err := handleBlockEvents(stream, requestedTopics, event); err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not handle block event: %v", err)
|
||||
}
|
||||
case event := <-opsChan:
|
||||
if err := handleBlockOperationEvents(stream, requestedTopics, event); err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not handle block operations event: %v", err)
|
||||
@@ -116,37 +107,6 @@ func (s *Server) StreamEvents(
|
||||
}
|
||||
}
|
||||
|
||||
func handleBlockEvents(
|
||||
stream ethpbservice.Events_StreamEventsServer, requestedTopics map[string]bool, event *feed.Event,
|
||||
) error {
|
||||
switch event.Type {
|
||||
case blockfeed.ReceivedBlock:
|
||||
if _, ok := requestedTopics[BlockTopic]; !ok {
|
||||
return nil
|
||||
}
|
||||
blkData, ok := event.Data.(*blockfeed.ReceivedBlockData)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
v1Data, err := migration.BlockIfaceToV1BlockHeader(blkData.SignedBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
item, err := v1Data.Message.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not hash tree root block")
|
||||
}
|
||||
eventBlock := ðpb.EventBlock{
|
||||
Slot: v1Data.Message.Slot,
|
||||
Block: item[:],
|
||||
ExecutionOptimistic: blkData.IsOptimistic,
|
||||
}
|
||||
return streamData(stream, BlockTopic, eventBlock)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func handleBlockOperationEvents(
|
||||
stream ethpbservice.Events_StreamEventsServer, requestedTopics map[string]bool, event *feed.Event,
|
||||
) error {
|
||||
@@ -252,6 +212,28 @@ func (s *Server) handleStateEvents(
|
||||
return nil
|
||||
}
|
||||
return streamData(stream, ChainReorgTopic, reorg)
|
||||
case statefeed.BlockProcessed:
|
||||
if _, ok := requestedTopics[BlockTopic]; !ok {
|
||||
return nil
|
||||
}
|
||||
blkData, ok := event.Data.(*statefeed.BlockProcessedData)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
v1Data, err := migration.BlockIfaceToV1BlockHeader(blkData.SignedBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
item, err := v1Data.Message.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not hash tree root block")
|
||||
}
|
||||
eventBlock := ðpb.EventBlock{
|
||||
Slot: blkData.Slot,
|
||||
Block: item[:],
|
||||
ExecutionOptimistic: blkData.Optimistic,
|
||||
}
|
||||
return streamData(stream, BlockTopic, eventBlock)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
mockChain "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
b "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
blockfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/block"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
@@ -51,55 +50,6 @@ func TestStreamEvents_Preconditions(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestStreamEvents_BlockEvents(t *testing.T) {
|
||||
t.Run(BlockTopic, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
srv, ctrl, mockStream := setupServer(ctx, t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
blk := util.HydrateSignedBeaconBlock(ð.SignedBeaconBlock{
|
||||
Block: ð.BeaconBlock{
|
||||
Slot: 8,
|
||||
},
|
||||
})
|
||||
bodyRoot, err := blk.Block.Body.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wantedHeader := util.HydrateBeaconHeader(ð.BeaconBlockHeader{
|
||||
Slot: 8,
|
||||
BodyRoot: bodyRoot[:],
|
||||
})
|
||||
wantedBlockRoot, err := wantedHeader.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
genericResponse, err := anypb.New(ðpb.EventBlock{
|
||||
Slot: 8,
|
||||
Block: wantedBlockRoot[:],
|
||||
ExecutionOptimistic: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
wantedMessage := &gateway.EventSource{
|
||||
Event: BlockTopic,
|
||||
Data: genericResponse,
|
||||
}
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
assertFeedSendAndReceive(ctx, &assertFeedArgs{
|
||||
t: t,
|
||||
srv: srv,
|
||||
topics: []string{BlockTopic},
|
||||
stream: mockStream,
|
||||
shouldReceive: wantedMessage,
|
||||
itemToSend: &feed.Event{
|
||||
Type: blockfeed.ReceivedBlock,
|
||||
Data: &blockfeed.ReceivedBlockData{
|
||||
SignedBlock: wsb,
|
||||
IsOptimistic: true,
|
||||
},
|
||||
},
|
||||
feed: srv.BlockNotifier.BlockFeed(),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestStreamEvents_OperationsEvents(t *testing.T) {
|
||||
t.Run("attestation_unaggregated", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
@@ -588,6 +538,53 @@ func TestStreamEvents_StateEvents(t *testing.T) {
|
||||
feed: srv.StateNotifier.StateFeed(),
|
||||
})
|
||||
})
|
||||
t.Run(BlockTopic, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
srv, ctrl, mockStream := setupServer(ctx, t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
blk := util.HydrateSignedBeaconBlock(ð.SignedBeaconBlock{
|
||||
Block: ð.BeaconBlock{
|
||||
Slot: 8,
|
||||
},
|
||||
})
|
||||
bodyRoot, err := blk.Block.Body.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wantedHeader := util.HydrateBeaconHeader(ð.BeaconBlockHeader{
|
||||
Slot: 8,
|
||||
BodyRoot: bodyRoot[:],
|
||||
})
|
||||
wantedBlockRoot, err := wantedHeader.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
genericResponse, err := anypb.New(ðpb.EventBlock{
|
||||
Slot: 8,
|
||||
Block: wantedBlockRoot[:],
|
||||
ExecutionOptimistic: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
wantedMessage := &gateway.EventSource{
|
||||
Event: BlockTopic,
|
||||
Data: genericResponse,
|
||||
}
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
assertFeedSendAndReceive(ctx, &assertFeedArgs{
|
||||
t: t,
|
||||
srv: srv,
|
||||
topics: []string{BlockTopic},
|
||||
stream: mockStream,
|
||||
shouldReceive: wantedMessage,
|
||||
itemToSend: &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: 8,
|
||||
SignedBlock: wsb,
|
||||
Optimistic: true,
|
||||
},
|
||||
},
|
||||
feed: srv.StateNotifier.StateFeed(),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestStreamEvents_CommaSeparatedTopics(t *testing.T) {
|
||||
@@ -651,7 +648,6 @@ func TestStreamEvents_CommaSeparatedTopics(t *testing.T) {
|
||||
|
||||
func setupServer(ctx context.Context, t testing.TB) (*Server, *gomock.Controller, *mock.MockEvents_StreamEventsServer) {
|
||||
srv := &Server{
|
||||
BlockNotifier: &mockChain.MockBlockNotifier{},
|
||||
StateNotifier: &mockChain.MockStateNotifier{},
|
||||
OperationNotifier: &mockChain.MockOperationNotifier{},
|
||||
Ctx: ctx,
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
blockfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/block"
|
||||
opfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
)
|
||||
@@ -17,7 +16,6 @@ import (
|
||||
type Server struct {
|
||||
Ctx context.Context
|
||||
StateNotifier statefeed.Notifier
|
||||
BlockNotifier blockfeed.Notifier
|
||||
OperationNotifier opfeed.Notifier
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
ChainInfoFetcher blockchain.ChainInfoFetcher
|
||||
|
||||
@@ -38,6 +38,7 @@ go_test(
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
|
||||
@@ -226,6 +226,123 @@ func (s *Server) AttestationRewards(w http.ResponseWriter, r *http.Request) {
|
||||
network.WriteJson(w, resp)
|
||||
}
|
||||
|
||||
// SyncCommitteeRewards retrieves rewards info for sync committee members specified by array of public keys or validator index.
|
||||
// If no array is provided, return reward info for every committee member.
|
||||
func (s *Server) SyncCommitteeRewards(w http.ResponseWriter, r *http.Request) {
|
||||
segments := strings.Split(r.URL.Path, "/")
|
||||
blockId := segments[len(segments)-1]
|
||||
|
||||
blk, err := s.Blocker.Block(r.Context(), []byte(blockId))
|
||||
if errJson := handleGetBlockError(blk, err); errJson != nil {
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
if blk.Version() == version.Phase0 {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Sync committee rewards are not supported for Phase 0",
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
st, err := s.ReplayerBuilder.ReplayerForSlot(blk.Block().Slot()-1).ReplayToSlot(r.Context(), blk.Block().Slot())
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not get state: " + err.Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
sa, err := blk.Block().Body().SyncAggregate()
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not get sync aggregate: " + err.Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
|
||||
vals, valIndices, ok := syncRewardsVals(w, r, st)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
preProcessBals := make([]uint64, len(vals))
|
||||
for i, valIdx := range valIndices {
|
||||
preProcessBals[i], err = st.BalanceAtIndex(valIdx)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not get validator's balance: " + err.Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
_, proposerReward, err := altair.ProcessSyncAggregate(r.Context(), st, sa)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not get sync aggregate rewards: " + err.Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
|
||||
rewards := make([]int, len(preProcessBals))
|
||||
proposerIndex := blk.Block().ProposerIndex()
|
||||
for i, valIdx := range valIndices {
|
||||
bal, err := st.BalanceAtIndex(valIdx)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not get validator's balance: " + err.Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
rewards[i] = int(bal - preProcessBals[i]) // lint:ignore uintcast
|
||||
if valIdx == proposerIndex {
|
||||
rewards[i] = rewards[i] - int(proposerReward) // lint:ignore uintcast
|
||||
}
|
||||
}
|
||||
|
||||
optimistic, err := s.OptimisticModeFetcher.IsOptimistic(r.Context())
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not get optimistic mode info: " + err.Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
blkRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not get block root: " + err.Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
|
||||
scRewards := make([]SyncCommitteeReward, len(valIndices))
|
||||
for i, valIdx := range valIndices {
|
||||
scRewards[i] = SyncCommitteeReward{
|
||||
ValidatorIndex: strconv.FormatUint(uint64(valIdx), 10),
|
||||
Reward: strconv.Itoa(rewards[i]),
|
||||
}
|
||||
}
|
||||
response := &SyncCommitteeRewardsResponse{
|
||||
Data: scRewards,
|
||||
ExecutionOptimistic: optimistic,
|
||||
Finalized: s.FinalizationFetcher.IsFinalized(r.Context(), blkRoot),
|
||||
}
|
||||
network.WriteJson(w, response)
|
||||
}
|
||||
|
||||
func (s *Server) attRewardsState(w http.ResponseWriter, r *http.Request) (state.BeaconState, bool) {
|
||||
segments := strings.Split(r.URL.Path, "/")
|
||||
requestedEpoch, err := strconv.ParseUint(segments[len(segments)-1], 10, 64)
|
||||
@@ -298,57 +415,9 @@ func attRewardsBalancesAndVals(
|
||||
network.WriteError(w, errJson)
|
||||
return nil, nil, nil, false
|
||||
}
|
||||
var rawValIds []string
|
||||
if r.Body != http.NoBody {
|
||||
if err = json.NewDecoder(r.Body).Decode(&rawValIds); err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not decode validators: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return nil, nil, nil, false
|
||||
}
|
||||
}
|
||||
valIndices := make([]primitives.ValidatorIndex, len(rawValIds))
|
||||
for i, v := range rawValIds {
|
||||
index, err := strconv.ParseUint(v, 10, 64)
|
||||
if err != nil {
|
||||
pubkey, err := bytesutil.FromHexString(v)
|
||||
if err != nil || len(pubkey) != fieldparams.BLSPubkeyLength {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: fmt.Sprintf("%s is not a validator index or pubkey", v),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return nil, nil, nil, false
|
||||
}
|
||||
var ok bool
|
||||
valIndices[i], ok = st.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubkey))
|
||||
if !ok {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: fmt.Sprintf("No validator index found for pubkey %#x", pubkey),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return nil, nil, nil, false
|
||||
}
|
||||
} else {
|
||||
if index >= uint64(st.NumValidators()) {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: fmt.Sprintf("Validator index %d is too large. Maximum allowed index is %d", index, st.NumValidators()-1),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return nil, nil, nil, false
|
||||
}
|
||||
valIndices[i] = primitives.ValidatorIndex(index)
|
||||
}
|
||||
}
|
||||
if len(valIndices) == 0 {
|
||||
valIndices = make([]primitives.ValidatorIndex, len(allVals))
|
||||
for i := 0; i < len(allVals); i++ {
|
||||
valIndices[i] = primitives.ValidatorIndex(i)
|
||||
}
|
||||
valIndices, ok := requestedValIndices(w, r, st, allVals)
|
||||
if !ok {
|
||||
return nil, nil, nil, false
|
||||
}
|
||||
if len(valIndices) == len(allVals) {
|
||||
return bal, allVals, valIndices, true
|
||||
@@ -453,6 +522,120 @@ func totalAttRewards(
|
||||
return totalRewards, true
|
||||
}
|
||||
|
||||
func syncRewardsVals(
|
||||
w http.ResponseWriter,
|
||||
r *http.Request,
|
||||
st state.BeaconState,
|
||||
) ([]*precompute.Validator, []primitives.ValidatorIndex, bool) {
|
||||
allVals, _, err := altair.InitializePrecomputeValidators(r.Context(), st)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not initialize precompute validators: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return nil, nil, false
|
||||
}
|
||||
valIndices, ok := requestedValIndices(w, r, st, allVals)
|
||||
if !ok {
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
sc, err := st.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not get current sync committee: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return nil, nil, false
|
||||
}
|
||||
allScIndices := make([]primitives.ValidatorIndex, len(sc.Pubkeys))
|
||||
for i, pk := range sc.Pubkeys {
|
||||
valIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(pk))
|
||||
if !ok {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: fmt.Sprintf("No validator index found for pubkey %#x", pk),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return nil, nil, false
|
||||
}
|
||||
allScIndices[i] = valIdx
|
||||
}
|
||||
|
||||
scIndices := make([]primitives.ValidatorIndex, 0, len(allScIndices))
|
||||
scVals := make([]*precompute.Validator, 0, len(allScIndices))
|
||||
for _, valIdx := range valIndices {
|
||||
for _, scIdx := range allScIndices {
|
||||
if valIdx == scIdx {
|
||||
scVals = append(scVals, allVals[valIdx])
|
||||
scIndices = append(scIndices, valIdx)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return scVals, scIndices, true
|
||||
}
|
||||
|
||||
func requestedValIndices(w http.ResponseWriter, r *http.Request, st state.BeaconState, allVals []*precompute.Validator) ([]primitives.ValidatorIndex, bool) {
|
||||
var rawValIds []string
|
||||
if r.Body != http.NoBody {
|
||||
if err := json.NewDecoder(r.Body).Decode(&rawValIds); err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not decode validators: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
valIndices := make([]primitives.ValidatorIndex, len(rawValIds))
|
||||
for i, v := range rawValIds {
|
||||
index, err := strconv.ParseUint(v, 10, 64)
|
||||
if err != nil {
|
||||
pubkey, err := bytesutil.FromHexString(v)
|
||||
if err != nil || len(pubkey) != fieldparams.BLSPubkeyLength {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: fmt.Sprintf("%s is not a validator index or pubkey", v),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return nil, false
|
||||
}
|
||||
var ok bool
|
||||
valIndices[i], ok = st.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubkey))
|
||||
if !ok {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: fmt.Sprintf("No validator index found for pubkey %#x", pubkey),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return nil, false
|
||||
}
|
||||
} else {
|
||||
if index >= uint64(st.NumValidators()) {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: fmt.Sprintf("Validator index %d is too large. Maximum allowed index is %d", index, st.NumValidators()-1),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return nil, false
|
||||
}
|
||||
valIndices[i] = primitives.ValidatorIndex(index)
|
||||
}
|
||||
}
|
||||
if len(valIndices) == 0 {
|
||||
valIndices = make([]primitives.ValidatorIndex, len(allVals))
|
||||
for i := 0; i < len(allVals); i++ {
|
||||
valIndices[i] = primitives.ValidatorIndex(i)
|
||||
}
|
||||
}
|
||||
|
||||
return valIndices, true
|
||||
}
|
||||
|
||||
func handleGetBlockError(blk interfaces.ReadOnlySignedBeaconBlock, err error) *network.DefaultErrorJson {
|
||||
if errors.Is(err, lookup.BlockIdParseError{}) {
|
||||
return &network.DefaultErrorJson{
|
||||
@@ -468,7 +651,7 @@ func handleGetBlockError(blk interfaces.ReadOnlySignedBeaconBlock, err error) *n
|
||||
}
|
||||
if err := blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
return &network.DefaultErrorJson{
|
||||
Message: "Could not find requested block" + err.Error(),
|
||||
Message: "Could not find requested block: " + err.Error(),
|
||||
Code: http.StatusNotFound,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
@@ -34,6 +35,8 @@ import (
|
||||
)
|
||||
|
||||
func TestBlockRewards(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
valCount := 64
|
||||
|
||||
st, err := util.NewBeaconStateCapella()
|
||||
@@ -65,7 +68,7 @@ func TestBlockRewards(t *testing.T) {
|
||||
bRoots[0] = slot0bRoot
|
||||
require.NoError(t, st.SetBlockRoots(bRoots))
|
||||
|
||||
b := util.HydrateSignedBeaconBlockAltair(util.NewBeaconBlockAltair())
|
||||
b := util.HydrateSignedBeaconBlockCapella(util.NewBeaconBlockCapella())
|
||||
b.Block.Slot = 2
|
||||
// we have to set the proposer index to the value that will be randomly chosen (fortunately it's deterministic)
|
||||
b.Block.ProposerIndex = 12
|
||||
@@ -206,6 +209,7 @@ func TestAttestationRewards(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.AltairForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
helpers.ClearCache()
|
||||
|
||||
valCount := 64
|
||||
|
||||
@@ -479,3 +483,298 @@ func TestAttestationRewards(t *testing.T) {
|
||||
assert.Equal(t, "Attestation rewards are available after two epoch transitions to ensure all attestations have a chance of inclusion", e.Message)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSyncCommiteeRewards(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.AltairForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
helpers.ClearCache()
|
||||
|
||||
const valCount = 1024
|
||||
// we have to set the proposer index to the value that will be randomly chosen (fortunately it's deterministic)
|
||||
const proposerIndex = 84
|
||||
|
||||
st, err := util.NewBeaconStateCapella()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch-1))
|
||||
validators := make([]*eth.Validator, 0, valCount)
|
||||
secretKeys := make([]bls.SecretKey, 0, valCount)
|
||||
for i := 0; i < valCount; i++ {
|
||||
blsKey, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
secretKeys = append(secretKeys, blsKey)
|
||||
validators = append(validators, ð.Validator{
|
||||
PublicKey: blsKey.PublicKey().Marshal(),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
})
|
||||
}
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
require.NoError(t, st.SetInactivityScores(make([]uint64, len(validators))))
|
||||
syncCommitteePubkeys := make([][]byte, fieldparams.SyncCommitteeLength)
|
||||
for i := 0; i < fieldparams.SyncCommitteeLength; i++ {
|
||||
syncCommitteePubkeys[i] = secretKeys[i].PublicKey().Marshal()
|
||||
}
|
||||
aggPubkey, err := bls.AggregatePublicKeys(syncCommitteePubkeys)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetCurrentSyncCommittee(ð.SyncCommittee{
|
||||
Pubkeys: syncCommitteePubkeys,
|
||||
AggregatePubkey: aggPubkey.Marshal(),
|
||||
}))
|
||||
|
||||
b := util.HydrateSignedBeaconBlockCapella(util.NewBeaconBlockCapella())
|
||||
b.Block.Slot = 32
|
||||
b.Block.ProposerIndex = proposerIndex
|
||||
scBits := bitfield.NewBitvector512()
|
||||
// last 10 sync committee members didn't perform their duty
|
||||
for i := uint64(0); i < fieldparams.SyncCommitteeLength-10; i++ {
|
||||
scBits.SetBitAt(i, true)
|
||||
}
|
||||
domain, err := signing.Domain(st.Fork(), 0, params.BeaconConfig().DomainSyncCommittee, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
sszBytes := primitives.SSZBytes("")
|
||||
r, err := signing.ComputeSigningRoot(&sszBytes, domain)
|
||||
require.NoError(t, err)
|
||||
// Bits set in sync committee bits determine which validators will be treated as participating in sync committee.
|
||||
// These validators have to sign the message.
|
||||
sigs := make([]bls.Signature, fieldparams.SyncCommitteeLength-10)
|
||||
for i := range sigs {
|
||||
sigs[i], err = blst.SignatureFromBytes(secretKeys[i].Sign(r[:]).Marshal())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
aggSig := bls.AggregateSignatures(sigs).Marshal()
|
||||
b.Block.Body.SyncAggregate = ð.SyncAggregate{SyncCommitteeBits: scBits, SyncCommitteeSignature: aggSig}
|
||||
sbb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
phase0block, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := params.BeaconConfig().SlotsPerEpoch
|
||||
mockChainService := &mock.ChainService{Optimistic: true, Slot: ¤tSlot}
|
||||
s := &Server{
|
||||
Blocker: &testutil.MockBlocker{SlotBlockMap: map[primitives.Slot]interfaces.ReadOnlySignedBeaconBlock{
|
||||
0: phase0block,
|
||||
32: sbb,
|
||||
}},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
ReplayerBuilder: mockstategen.NewMockReplayerBuilder(mockstategen.WithMockState(st)),
|
||||
}
|
||||
|
||||
t.Run("ok - filtered vals", func(t *testing.T) {
|
||||
balances := make([]uint64, 0, valCount)
|
||||
for i := 0; i < valCount; i++ {
|
||||
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance)
|
||||
}
|
||||
require.NoError(t, st.SetBalances(balances))
|
||||
|
||||
url := "http://only.the.slot.number.at.the.end.is.important/32"
|
||||
var body bytes.Buffer
|
||||
pubkey := fmt.Sprintf("%#x", secretKeys[10].PublicKey().Marshal())
|
||||
valIds, err := json.Marshal([]string{"20", pubkey})
|
||||
require.NoError(t, err)
|
||||
_, err = body.Write(valIds)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", url, &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SyncCommitteeRewards(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &SyncCommitteeRewardsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 2, len(resp.Data))
|
||||
sum := uint64(0)
|
||||
for _, scReward := range resp.Data {
|
||||
r, err := strconv.ParseUint(scReward.Reward, 10, 64)
|
||||
require.NoError(t, err)
|
||||
sum += r
|
||||
}
|
||||
assert.Equal(t, uint64(1396), sum)
|
||||
assert.Equal(t, true, resp.ExecutionOptimistic)
|
||||
assert.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("ok - all vals", func(t *testing.T) {
|
||||
balances := make([]uint64, 0, valCount)
|
||||
for i := 0; i < valCount; i++ {
|
||||
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance)
|
||||
}
|
||||
require.NoError(t, st.SetBalances(balances))
|
||||
|
||||
url := "http://only.the.slot.number.at.the.end.is.important/32"
|
||||
request := httptest.NewRequest("POST", url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SyncCommitteeRewards(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &SyncCommitteeRewardsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 512, len(resp.Data))
|
||||
sum := 0
|
||||
for _, scReward := range resp.Data {
|
||||
r, err := strconv.Atoi(scReward.Reward)
|
||||
require.NoError(t, err)
|
||||
sum += r
|
||||
}
|
||||
assert.Equal(t, 343416, sum)
|
||||
})
|
||||
t.Run("ok - validator outside sync committee is ignored", func(t *testing.T) {
|
||||
balances := make([]uint64, 0, valCount)
|
||||
for i := 0; i < valCount; i++ {
|
||||
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance)
|
||||
}
|
||||
require.NoError(t, st.SetBalances(balances))
|
||||
|
||||
url := "http://only.the.slot.number.at.the.end.is.important/32"
|
||||
var body bytes.Buffer
|
||||
pubkey := fmt.Sprintf("%#x", secretKeys[10].PublicKey().Marshal())
|
||||
valIds, err := json.Marshal([]string{"20", "999", pubkey})
|
||||
require.NoError(t, err)
|
||||
_, err = body.Write(valIds)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", url, &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SyncCommitteeRewards(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &SyncCommitteeRewardsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 2, len(resp.Data))
|
||||
sum := 0
|
||||
for _, scReward := range resp.Data {
|
||||
r, err := strconv.Atoi(scReward.Reward)
|
||||
require.NoError(t, err)
|
||||
sum += r
|
||||
}
|
||||
assert.Equal(t, 1396, sum)
|
||||
})
|
||||
t.Run("ok - proposer reward is deducted", func(t *testing.T) {
|
||||
balances := make([]uint64, 0, valCount)
|
||||
for i := 0; i < valCount; i++ {
|
||||
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance)
|
||||
}
|
||||
require.NoError(t, st.SetBalances(balances))
|
||||
|
||||
url := "http://only.the.slot.number.at.the.end.is.important/32"
|
||||
var body bytes.Buffer
|
||||
pubkey := fmt.Sprintf("%#x", secretKeys[10].PublicKey().Marshal())
|
||||
valIds, err := json.Marshal([]string{"20", "84", pubkey})
|
||||
require.NoError(t, err)
|
||||
_, err = body.Write(valIds)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", url, &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SyncCommitteeRewards(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &SyncCommitteeRewardsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 3, len(resp.Data))
|
||||
sum := 0
|
||||
for _, scReward := range resp.Data {
|
||||
r, err := strconv.Atoi(scReward.Reward)
|
||||
require.NoError(t, err)
|
||||
sum += r
|
||||
}
|
||||
assert.Equal(t, 2094, sum)
|
||||
})
|
||||
t.Run("invalid validator index/pubkey", func(t *testing.T) {
|
||||
balances := make([]uint64, 0, valCount)
|
||||
for i := 0; i < valCount; i++ {
|
||||
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance)
|
||||
}
|
||||
require.NoError(t, st.SetBalances(balances))
|
||||
|
||||
url := "http://only.the.slot.number.at.the.end.is.important/32"
|
||||
var body bytes.Buffer
|
||||
valIds, err := json.Marshal([]string{"10", "foo"})
|
||||
require.NoError(t, err)
|
||||
_, err = body.Write(valIds)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", url, &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SyncCommitteeRewards(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &network.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.Equal(t, "foo is not a validator index or pubkey", e.Message)
|
||||
})
|
||||
t.Run("unknown validator pubkey", func(t *testing.T) {
|
||||
balances := make([]uint64, 0, valCount)
|
||||
for i := 0; i < valCount; i++ {
|
||||
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance)
|
||||
}
|
||||
require.NoError(t, st.SetBalances(balances))
|
||||
|
||||
url := "http://only.the.slot.number.at.the.end.is.important/32"
|
||||
var body bytes.Buffer
|
||||
privkey, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pubkey := fmt.Sprintf("%#x", privkey.PublicKey().Marshal())
|
||||
valIds, err := json.Marshal([]string{"10", pubkey})
|
||||
require.NoError(t, err)
|
||||
_, err = body.Write(valIds)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", url, &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SyncCommitteeRewards(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &network.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.Equal(t, "No validator index found for pubkey "+pubkey, e.Message)
|
||||
})
|
||||
t.Run("validator index too large", func(t *testing.T) {
|
||||
balances := make([]uint64, 0, valCount)
|
||||
for i := 0; i < valCount; i++ {
|
||||
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance)
|
||||
}
|
||||
require.NoError(t, st.SetBalances(balances))
|
||||
|
||||
url := "http://only.the.slot.number.at.the.end.is.important/32"
|
||||
var body bytes.Buffer
|
||||
valIds, err := json.Marshal([]string{"10", "9999"})
|
||||
require.NoError(t, err)
|
||||
_, err = body.Write(valIds)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", url, &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SyncCommitteeRewards(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &network.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.Equal(t, "Validator index 9999 is too large. Maximum allowed index is 1023", e.Message)
|
||||
})
|
||||
t.Run("phase 0", func(t *testing.T) {
|
||||
balances := make([]uint64, 0, valCount)
|
||||
for i := 0; i < valCount; i++ {
|
||||
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance)
|
||||
}
|
||||
require.NoError(t, st.SetBalances(balances))
|
||||
|
||||
url := "http://only.the.slot.number.at.the.end.is.important/0"
|
||||
request := httptest.NewRequest("POST", url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SyncCommitteeRewards(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &network.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.Equal(t, "Sync committee rewards are not supported for Phase 0", e.Message)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -11,8 +11,7 @@ type Server struct {
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
FinalizationFetcher blockchain.FinalizationFetcher
|
||||
ReplayerBuilder stategen.ReplayerBuilder
|
||||
// TODO: Init
|
||||
TimeFetcher blockchain.TimeFetcher
|
||||
Stater lookup.Stater
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
TimeFetcher blockchain.TimeFetcher
|
||||
Stater lookup.Stater
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
}
|
||||
|
||||
@@ -40,3 +40,14 @@ type TotalAttestationReward struct {
|
||||
Source string `json:"source"`
|
||||
InclusionDelay string `json:"inclusion_delay"`
|
||||
}
|
||||
|
||||
type SyncCommitteeRewardsResponse struct {
|
||||
Data []SyncCommitteeReward `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type SyncCommitteeReward struct {
|
||||
ValidatorIndex string `json:"validator_index"`
|
||||
Reward string `json:"reward"`
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ func (vs *Server) setSyncAggregate(ctx context.Context, blk interfaces.SignedBea
|
||||
log.WithError(err).Error("Could not get sync aggregate")
|
||||
emptySig := [96]byte{0xC0}
|
||||
emptyAggregate := ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, params.BeaconConfig().SyncCommitteeSize),
|
||||
SyncCommitteeBits: make([]byte, params.BeaconConfig().SyncCommitteeSize/8),
|
||||
SyncCommitteeSignature: emptySig[:],
|
||||
}
|
||||
if err := blk.SetSyncAggregate(emptyAggregate); err != nil {
|
||||
|
||||
@@ -21,7 +21,7 @@ func TestServer_SetSyncAggregate_EmptyCase(t *testing.T) {
|
||||
|
||||
emptySig := [96]byte{0xC0}
|
||||
want := ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, params.BeaconConfig().SyncCommitteeSize),
|
||||
SyncCommitteeBits: make([]byte, params.BeaconConfig().SyncCommitteeSize/8),
|
||||
SyncCommitteeSignature: emptySig[:],
|
||||
}
|
||||
require.DeepEqual(t, want, agg)
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
@@ -218,15 +219,17 @@ func (s *Service) Start() {
|
||||
Stater: stater,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
}
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/rewards/blocks/{block_id}", rewardsServer.BlockRewards)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/rewards/attestations/{epoch}", rewardsServer.AttestationRewards)
|
||||
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/rewards/blocks/{block_id}", rewardsServer.BlockRewards).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/rewards/attestations/{epoch}", rewardsServer.AttestationRewards).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/rewards/sync_committee/{block_id}", rewardsServer.SyncCommitteeRewards).Methods(http.MethodPost)
|
||||
|
||||
builderServer := &rpcBuilder.Server{
|
||||
FinalizationFetcher: s.cfg.FinalizationFetcher,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
Stater: stater,
|
||||
}
|
||||
s.cfg.Router.HandleFunc("/eth/v1/builder/states/{state_id}/expected_withdrawals", builderServer.ExpectedWithdrawals)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/builder/states/{state_id}/expected_withdrawals", builderServer.ExpectedWithdrawals).Methods(http.MethodGet)
|
||||
|
||||
validatorServer := &validatorv1alpha1.Server{
|
||||
Ctx: s.ctx,
|
||||
@@ -320,9 +323,9 @@ func (s *Service) Start() {
|
||||
ExecutionChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
|
||||
}
|
||||
|
||||
s.cfg.Router.HandleFunc("/prysm/node/trusted_peers", nodeServerPrysm.ListTrustedPeer).Methods("GET")
|
||||
s.cfg.Router.HandleFunc("/prysm/node/trusted_peers", nodeServerPrysm.AddTrustedPeer).Methods("POST")
|
||||
s.cfg.Router.HandleFunc("/prysm/node/trusted_peers/{peer_id}", nodeServerPrysm.RemoveTrustedPeer).Methods("Delete")
|
||||
s.cfg.Router.HandleFunc("/prysm/node/trusted_peers", nodeServerPrysm.ListTrustedPeer).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/prysm/node/trusted_peers", nodeServerPrysm.AddTrustedPeer).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/prysm/node/trusted_peers/{peer_id}", nodeServerPrysm.RemoveTrustedPeer).Methods(http.MethodDelete)
|
||||
|
||||
beaconChainServer := &beaconv1alpha1.Server{
|
||||
Ctx: s.ctx,
|
||||
@@ -377,9 +380,9 @@ func (s *Service) Start() {
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
}
|
||||
s.cfg.Router.HandleFunc("/prysm/validators/performance", httpServer.GetValidatorPerformance)
|
||||
s.cfg.Router.HandleFunc("/eth/v2/beacon/blocks", beaconChainServerV1.PublishBlockV2)
|
||||
s.cfg.Router.HandleFunc("/eth/v2/beacon/blinded_blocks", beaconChainServerV1.PublishBlindedBlockV2)
|
||||
s.cfg.Router.HandleFunc("/prysm/validators/performance", httpServer.GetValidatorPerformance).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v2/beacon/blocks", beaconChainServerV1.PublishBlockV2).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v2/beacon/blinded_blocks", beaconChainServerV1.PublishBlindedBlockV2).Methods(http.MethodPost)
|
||||
ethpbv1alpha1.RegisterNodeServer(s.grpcServer, nodeServer)
|
||||
ethpbservice.RegisterBeaconNodeServer(s.grpcServer, nodeServerV1)
|
||||
ethpbv1alpha1.RegisterHealthServer(s.grpcServer, nodeServer)
|
||||
@@ -388,7 +391,6 @@ func (s *Service) Start() {
|
||||
ethpbservice.RegisterEventsServer(s.grpcServer, &events.Server{
|
||||
Ctx: s.ctx,
|
||||
StateNotifier: s.cfg.StateNotifier,
|
||||
BlockNotifier: s.cfg.BlockNotifier,
|
||||
OperationNotifier: s.cfg.OperationNotifier,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
|
||||
@@ -34,6 +34,7 @@ go_library(
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -3,12 +3,15 @@ package stateutil
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/hash/htr"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -51,6 +54,20 @@ func validatorRegistryRoot(validators []*ethpb.Validator) ([32]byte, error) {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func hashValidatorHelper(validators []*ethpb.Validator, roots [][32]byte, j int, groupSize int, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
for i := 0; i < groupSize; i++ {
|
||||
fRoots, err := ValidatorFieldRoots(validators[j*groupSize+i])
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("could not get validator field roots")
|
||||
return
|
||||
}
|
||||
for k, root := range fRoots {
|
||||
roots[(j*groupSize+i)*validatorFieldRoots+k] = root
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// OptimizedValidatorRoots uses an optimized routine with gohashtree in order to
|
||||
// derive a list of validator roots from a list of validator objects.
|
||||
func OptimizedValidatorRoots(validators []*ethpb.Validator) ([][32]byte, error) {
|
||||
@@ -58,14 +75,25 @@ func OptimizedValidatorRoots(validators []*ethpb.Validator) ([][32]byte, error)
|
||||
if len(validators) == 0 {
|
||||
return [][32]byte{}, nil
|
||||
}
|
||||
roots := make([][32]byte, 0, len(validators)*validatorFieldRoots)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
wg := sync.WaitGroup{}
|
||||
n := runtime.GOMAXPROCS(0)
|
||||
rootsSize := len(validators) * validatorFieldRoots
|
||||
groupSize := len(validators) / n
|
||||
roots := make([][32]byte, rootsSize)
|
||||
wg.Add(n - 1)
|
||||
for j := 0; j < n-1; j++ {
|
||||
go hashValidatorHelper(validators, roots, j, groupSize, &wg)
|
||||
}
|
||||
for i := (n - 1) * groupSize; i < len(validators); i++ {
|
||||
fRoots, err := ValidatorFieldRoots(validators[i])
|
||||
if err != nil {
|
||||
return [][32]byte{}, errors.Wrap(err, "could not compute validators merkleization")
|
||||
}
|
||||
roots = append(roots, fRoots...)
|
||||
for k, root := range fRoots {
|
||||
roots[i*validatorFieldRoots+k] = root
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// A validator's tree can represented with a depth of 3. As log2(8) = 3
|
||||
// Using this property we can lay out all the individual fields of a
|
||||
@@ -73,9 +101,7 @@ func OptimizedValidatorRoots(validators []*ethpb.Validator) ([][32]byte, error)
|
||||
for i := 0; i < validatorTreeDepth; i++ {
|
||||
// Overwrite input lists as we are hashing by level
|
||||
// and only need the highest level to proceed.
|
||||
outputLen := len(roots) / 2
|
||||
htr.VectorizedSha256(roots, roots)
|
||||
roots = roots[:outputLen]
|
||||
roots = htr.VectorizedSha256(roots)
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
|
||||
@@ -3,11 +3,13 @@ package stateutil
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
mathutil "github.com/prysmaticlabs/prysm/v4/math"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestValidatorConstants(t *testing.T) {
|
||||
@@ -30,3 +32,28 @@ func TestValidatorConstants(t *testing.T) {
|
||||
_, err := ValidatorRegistryRoot([]*ethpb.Validator{v})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestHashValidatorHelper(t *testing.T) {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
v := ðpb.Validator{}
|
||||
valList := make([]*ethpb.Validator, 10*validatorFieldRoots)
|
||||
for i := range valList {
|
||||
valList[i] = v
|
||||
}
|
||||
roots := make([][32]byte, len(valList))
|
||||
hashValidatorHelper(valList, roots, 2, 2, &wg)
|
||||
for i := 0; i < 4*validatorFieldRoots; i++ {
|
||||
require.Equal(t, [32]byte{}, roots[i])
|
||||
}
|
||||
emptyValRoots, err := ValidatorFieldRoots(v)
|
||||
require.NoError(t, err)
|
||||
for i := 4; i < 6; i++ {
|
||||
for j := 0; j < validatorFieldRoots; j++ {
|
||||
require.Equal(t, emptyValRoots[j], roots[i*validatorFieldRoots+j])
|
||||
}
|
||||
}
|
||||
for i := 6 * validatorFieldRoots; i < 10*validatorFieldRoots; i++ {
|
||||
require.Equal(t, [32]byte{}, roots[i])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,8 +48,7 @@ func merkleizePubkey(pubkey []byte) ([32]byte, error) {
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
outputChunk := make([][32]byte, 1)
|
||||
htr.VectorizedSha256(chunks, outputChunk)
|
||||
outputChunk := htr.VectorizedSha256(chunks)
|
||||
|
||||
return outputChunk[0], nil
|
||||
}
|
||||
|
||||
@@ -71,9 +71,7 @@ func ReturnTrieLayerVariable(elements [][32]byte, length uint64) [][]*[32]byte {
|
||||
}
|
||||
|
||||
layers[i+1] = make([]*[32]byte, layerLen/2)
|
||||
newElems := make([][32]byte, layerLen/2)
|
||||
htr.VectorizedSha256(elements, newElems)
|
||||
elements = newElems
|
||||
elements = htr.VectorizedSha256(elements)
|
||||
for j := range elements {
|
||||
layers[i+1][j] = &elements[j]
|
||||
}
|
||||
@@ -295,9 +293,7 @@ func MerkleizeTrieLeaves(layers [][][32]byte, hashLayer [][32]byte) ([][][32]byt
|
||||
if !math.IsPowerOf2(uint64(len(hashLayer))) {
|
||||
return nil, nil, errors.Errorf("hash layer is a non power of 2: %d", len(hashLayer))
|
||||
}
|
||||
newLayer := make([][32]byte, len(hashLayer)/2)
|
||||
htr.VectorizedSha256(hashLayer, newLayer)
|
||||
hashLayer = newLayer
|
||||
hashLayer = htr.VectorizedSha256(hashLayer)
|
||||
layers[i] = hashLayer
|
||||
i++
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
rpcBlocksByRangeResponseLatency.Observe(float64(time.Since(batchStart).Milliseconds()))
|
||||
}
|
||||
if err := batch.error(); err != nil {
|
||||
log.WithError(err).Info("error in BlocksByRange batch")
|
||||
log.WithError(err).Debug("error in BlocksByRange batch")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
|
||||
17
cache/nonblocking/BUILD.bazel
vendored
Normal file
17
cache/nonblocking/BUILD.bazel
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"list.go",
|
||||
"lru.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/cache/nonblocking",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["lru_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
)
|
||||
123
cache/nonblocking/list.go
vendored
Normal file
123
cache/nonblocking/list.go
vendored
Normal file
@@ -0,0 +1,123 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE_list file.
|
||||
package nonblocking
|
||||
|
||||
// entry is an LRU entry
|
||||
type entry[K comparable, V any] struct {
|
||||
// Next and previous pointers in the doubly-linked list of elements.
|
||||
// To simplify the implementation, internally a list l is implemented
|
||||
// as a ring, such that &l.root is both the next element of the last
|
||||
// list element (l.Back()) and the previous element of the first list
|
||||
// element (l.Front()).
|
||||
next, prev *entry[K, V]
|
||||
|
||||
// The list to which this element belongs.
|
||||
list *lruList[K, V]
|
||||
|
||||
// The LRU key of this element.
|
||||
key K
|
||||
|
||||
// The value stored with this element.
|
||||
value V
|
||||
}
|
||||
|
||||
// lruList represents a doubly linked list.
|
||||
// The zero value for lruList is an empty list ready to use.
|
||||
type lruList[K comparable, V any] struct {
|
||||
root entry[K, V] // sentinel list element, only &root, root.prev, and root.next are used
|
||||
len int // current list length excluding (this) sentinel element
|
||||
}
|
||||
|
||||
// init initializes or clears list l.
|
||||
func (l *lruList[K, V]) init() *lruList[K, V] {
|
||||
l.root.next = &l.root
|
||||
l.root.prev = &l.root
|
||||
l.len = 0
|
||||
return l
|
||||
}
|
||||
|
||||
// newList returns an initialized list.
|
||||
func newList[K comparable, V any]() *lruList[K, V] { return new(lruList[K, V]).init() }
|
||||
|
||||
// length returns the number of elements of list l.
|
||||
// The complexity is O(1).
|
||||
func (l *lruList[K, V]) length() int { return l.len }
|
||||
|
||||
// back returns the last element of list l or nil if the list is empty.
|
||||
func (l *lruList[K, V]) back() *entry[K, V] {
|
||||
if l.len == 0 {
|
||||
return nil
|
||||
}
|
||||
return l.root.prev
|
||||
}
|
||||
|
||||
// lazyInit lazily initializes a zero List value.
|
||||
func (l *lruList[K, V]) lazyInit() {
|
||||
if l.root.next == nil {
|
||||
l.init()
|
||||
}
|
||||
}
|
||||
|
||||
// insert inserts e after at, increments l.len, and returns e.
|
||||
func (l *lruList[K, V]) insert(e, at *entry[K, V]) *entry[K, V] {
|
||||
e.prev = at
|
||||
e.next = at.next
|
||||
e.prev.next = e
|
||||
e.next.prev = e
|
||||
e.list = l
|
||||
l.len++
|
||||
return e
|
||||
}
|
||||
|
||||
// insertValue is a convenience wrapper for insert(&Element{Value: v}, at).
|
||||
func (l *lruList[K, V]) insertValue(k K, v V, at *entry[K, V]) *entry[K, V] {
|
||||
return l.insert(&entry[K, V]{value: v, key: k}, at)
|
||||
}
|
||||
|
||||
// remove removes e from its list, decrements l.len
|
||||
func (l *lruList[K, V]) remove(e *entry[K, V]) V {
|
||||
// If already removed, do nothing.
|
||||
if e.prev == nil && e.next == nil {
|
||||
return e.value
|
||||
}
|
||||
e.prev.next = e.next
|
||||
e.next.prev = e.prev
|
||||
e.next = nil // avoid memory leaks
|
||||
e.prev = nil // avoid memory leaks
|
||||
e.list = nil
|
||||
l.len--
|
||||
|
||||
return e.value
|
||||
}
|
||||
|
||||
// move moves e to next to at.
|
||||
func (*lruList[K, V]) move(e, at *entry[K, V]) {
|
||||
if e == at {
|
||||
return
|
||||
}
|
||||
e.prev.next = e.next
|
||||
e.next.prev = e.prev
|
||||
|
||||
e.prev = at
|
||||
e.next = at.next
|
||||
e.prev.next = e
|
||||
e.next.prev = e
|
||||
}
|
||||
|
||||
// pushFront inserts a new element e with value v at the front of list l and returns e.
|
||||
func (l *lruList[K, V]) pushFront(k K, v V) *entry[K, V] {
|
||||
l.lazyInit()
|
||||
return l.insertValue(k, v, &l.root)
|
||||
}
|
||||
|
||||
// moveToFront moves element e to the front of list l.
|
||||
// If e is not an element of l, the list is not modified.
|
||||
// The element must not be nil.
|
||||
func (l *lruList[K, V]) moveToFront(e *entry[K, V]) {
|
||||
if e.list != l || l.root.next == e {
|
||||
return
|
||||
}
|
||||
// see comment in List.Remove about initialization of l
|
||||
l.move(e, &l.root)
|
||||
}
|
||||
135
cache/nonblocking/lru.go
vendored
Normal file
135
cache/nonblocking/lru.go
vendored
Normal file
@@ -0,0 +1,135 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
package nonblocking
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// EvictCallback is used to get a callback when a cache entry is evicted
|
||||
type EvictCallback[K comparable, V any] func(key K, value V)
|
||||
|
||||
// LRU implements a non-thread safe fixed size LRU cache
|
||||
type LRU[K comparable, V any] struct {
|
||||
itemsLock sync.RWMutex
|
||||
evictListLock sync.RWMutex
|
||||
size int
|
||||
evictList *lruList[K, V]
|
||||
items map[K]*entry[K, V]
|
||||
onEvict EvictCallback[K, V]
|
||||
}
|
||||
|
||||
// NewLRU constructs an LRU of the given size
|
||||
func NewLRU[K comparable, V any](size int, onEvict EvictCallback[K, V]) (*LRU[K, V], error) {
|
||||
if size <= 0 {
|
||||
return nil, errors.New("must provide a positive size")
|
||||
}
|
||||
|
||||
c := &LRU[K, V]{
|
||||
size: size,
|
||||
evictList: newList[K, V](),
|
||||
items: make(map[K]*entry[K, V]),
|
||||
onEvict: onEvict,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Add adds a value to the cache. Returns true if an eviction occurred.
|
||||
func (c *LRU[K, V]) Add(key K, value V) (evicted bool) {
|
||||
// Check for existing item
|
||||
c.itemsLock.RLock()
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.itemsLock.RUnlock()
|
||||
|
||||
c.evictListLock.Lock()
|
||||
c.evictList.moveToFront(ent)
|
||||
c.evictListLock.Unlock()
|
||||
ent.value = value
|
||||
return false
|
||||
}
|
||||
c.itemsLock.RUnlock()
|
||||
|
||||
// Add new item
|
||||
c.evictListLock.Lock()
|
||||
ent := c.evictList.pushFront(key, value)
|
||||
c.evictListLock.Unlock()
|
||||
|
||||
c.itemsLock.Lock()
|
||||
c.items[key] = ent
|
||||
c.itemsLock.Unlock()
|
||||
|
||||
c.evictListLock.RLock()
|
||||
evict := c.evictList.length() > c.size
|
||||
c.evictListLock.RUnlock()
|
||||
|
||||
// Verify size not exceeded
|
||||
if evict {
|
||||
c.removeOldest()
|
||||
}
|
||||
return evict
|
||||
}
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
func (c *LRU[K, V]) Get(key K) (value V, ok bool) {
|
||||
c.itemsLock.RLock()
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.itemsLock.RUnlock()
|
||||
|
||||
// Make this get function non-blocking for multiple readers.
|
||||
go func() {
|
||||
c.evictListLock.Lock()
|
||||
c.evictList.moveToFront(ent)
|
||||
c.evictListLock.Unlock()
|
||||
}()
|
||||
|
||||
return ent.value, true
|
||||
}
|
||||
c.itemsLock.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (c *LRU[K, V]) Len() int {
|
||||
c.evictListLock.RLock()
|
||||
defer c.evictListLock.RUnlock()
|
||||
return c.evictList.length()
|
||||
}
|
||||
|
||||
// Resize changes the cache size.
|
||||
func (c *LRU[K, V]) Resize(size int) (evicted int) {
|
||||
diff := c.Len() - size
|
||||
if diff < 0 {
|
||||
diff = 0
|
||||
}
|
||||
for i := 0; i < diff; i++ {
|
||||
c.removeOldest()
|
||||
}
|
||||
c.size = size
|
||||
return diff
|
||||
}
|
||||
|
||||
// removeOldest removes the oldest item from the cache.
|
||||
func (c *LRU[K, V]) removeOldest() {
|
||||
c.evictListLock.RLock()
|
||||
if ent := c.evictList.back(); ent != nil {
|
||||
c.evictListLock.RUnlock()
|
||||
c.removeElement(ent)
|
||||
return
|
||||
}
|
||||
c.evictListLock.RUnlock()
|
||||
}
|
||||
|
||||
// removeElement is used to remove a given list element from the cache
|
||||
func (c *LRU[K, V]) removeElement(e *entry[K, V]) {
|
||||
c.evictListLock.Lock()
|
||||
c.evictList.remove(e)
|
||||
c.evictListLock.Unlock()
|
||||
|
||||
c.itemsLock.Lock()
|
||||
delete(c.items, e.key)
|
||||
c.itemsLock.Unlock()
|
||||
if c.onEvict != nil {
|
||||
c.onEvict(e.key, e.value)
|
||||
}
|
||||
}
|
||||
115
cache/nonblocking/lru_test.go
vendored
Normal file
115
cache/nonblocking/lru_test.go
vendored
Normal file
@@ -0,0 +1,115 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package nonblocking
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestLRU_Concurrency(t *testing.T) {
|
||||
onEvicted := func(_ int, _ int) {}
|
||||
size := 20
|
||||
cache, err := NewLRU(size, onEvicted)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*2)
|
||||
defer cancel()
|
||||
for i := 0; i < 100; i++ {
|
||||
go func(j int) {
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
cache.Add(j, j)
|
||||
cache.Get(j)
|
||||
time.Sleep(time.Millisecond * 50)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
||||
func TestLRU_Eviction(t *testing.T) {
|
||||
evictCounter := 0
|
||||
onEvicted := func(_ int, _ int) {
|
||||
evictCounter++
|
||||
}
|
||||
size := 20
|
||||
cache, err := NewLRU(size, onEvicted)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
for i := 0; i < 20; i++ {
|
||||
cache.Add(i, i)
|
||||
cache.Get(i)
|
||||
}
|
||||
cache.Add(20, 20)
|
||||
if evictCounter != 1 {
|
||||
t.Fatalf("should have evicted 1 element: %d", evictCounter)
|
||||
}
|
||||
}
|
||||
|
||||
// Test that Add returns true/false if an eviction occurred
|
||||
func TestLRU_Add(t *testing.T) {
|
||||
evictCounter := 0
|
||||
onEvicted := func(_ int, _ int) {
|
||||
evictCounter++
|
||||
}
|
||||
l, err := NewLRU(1, onEvicted)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if l.Add(1, 1) == true || evictCounter != 0 {
|
||||
t.Errorf("should not have an eviction")
|
||||
}
|
||||
if l.Add(2, 2) == false || evictCounter != 1 {
|
||||
t.Errorf("should have an eviction")
|
||||
}
|
||||
}
|
||||
|
||||
// Test that Resize can upsize and downsize
|
||||
func TestLRU_Resize(t *testing.T) {
|
||||
onEvictCounter := 0
|
||||
onEvicted := func(k int, v int) {
|
||||
onEvictCounter++
|
||||
}
|
||||
l, err := NewLRU(2, onEvicted)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Downsize
|
||||
l.Add(1, 1)
|
||||
l.Add(2, 2)
|
||||
evicted := l.Resize(1)
|
||||
if evicted != 1 {
|
||||
t.Errorf("1 element should have been evicted: %v", evicted)
|
||||
}
|
||||
if onEvictCounter != 1 {
|
||||
t.Errorf("onEvicted should have been called 1 time: %v", onEvictCounter)
|
||||
}
|
||||
|
||||
l.Add(3, 3)
|
||||
if _, ok := l.Get(1); ok {
|
||||
t.Errorf("Element 1 should have been evicted")
|
||||
}
|
||||
|
||||
// Upsize
|
||||
evicted = l.Resize(2)
|
||||
if evicted != 0 {
|
||||
t.Errorf("0 elements should have been evicted: %v", evicted)
|
||||
}
|
||||
|
||||
l.Add(4, 4)
|
||||
if _, ok := l.Get(3); !ok {
|
||||
t.Errorf("Cache should have contained 2 elements")
|
||||
}
|
||||
if _, ok := l.Get(4); !ok {
|
||||
t.Errorf("Cache should have contained 2 elements")
|
||||
}
|
||||
}
|
||||
@@ -73,6 +73,7 @@ var appFlags = []cli.Flag{
|
||||
flags.MaxBuilderEpochMissedSlots,
|
||||
flags.MaxBuilderConsecutiveMissedSlots,
|
||||
flags.EngineEndpointTimeoutSeconds,
|
||||
flags.LocalBlockValueBoost,
|
||||
cmd.BackupWebhookOutputDir,
|
||||
cmd.MinimalConfigFlag,
|
||||
cmd.E2EConfigFlag,
|
||||
|
||||
@@ -125,6 +125,7 @@ var appHelpFlagGroups = []flagGroup{
|
||||
flags.MaxBuilderConsecutiveMissedSlots,
|
||||
flags.EngineEndpointTimeoutSeconds,
|
||||
flags.SlasherDirFlag,
|
||||
flags.LocalBlockValueBoost,
|
||||
checkpoint.BlockPath,
|
||||
checkpoint.StatePath,
|
||||
checkpoint.RemoteURL,
|
||||
|
||||
@@ -17,7 +17,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/crypto/bls/blst",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//cache/lru:go_default_library",
|
||||
"//cache/nonblocking:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
|
||||
@@ -3,8 +3,11 @@
|
||||
package blst
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/cache/nonblocking"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls/common"
|
||||
blst "github.com/supranational/blst/bindings/go"
|
||||
)
|
||||
|
||||
@@ -15,4 +18,10 @@ func init() {
|
||||
maxProcs = 1
|
||||
}
|
||||
blst.SetMaxProcs(maxProcs)
|
||||
onEvict := func(_ [48]byte, _ common.PublicKey) {}
|
||||
keysCache, err := nonblocking.NewLRU(maxKeys, onEvict)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Could not initiate public keys cache: %v", err))
|
||||
}
|
||||
pubkeyCache = keysCache
|
||||
}
|
||||
|
||||
@@ -6,14 +6,14 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
lruwrpr "github.com/prysmaticlabs/prysm/v4/cache/lru"
|
||||
"github.com/prysmaticlabs/prysm/v4/cache/nonblocking"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls/common"
|
||||
)
|
||||
|
||||
var maxKeys = 1_000_000
|
||||
var pubkeyCache = lruwrpr.New(maxKeys)
|
||||
var pubkeyCache *nonblocking.LRU[[48]byte, common.PublicKey]
|
||||
|
||||
// PublicKey used in the BLS signature scheme.
|
||||
type PublicKey struct {
|
||||
@@ -27,7 +27,7 @@ func PublicKeyFromBytes(pubKey []byte) (common.PublicKey, error) {
|
||||
}
|
||||
newKey := (*[fieldparams.BLSPubkeyLength]byte)(pubKey)
|
||||
if cv, ok := pubkeyCache.Get(*newKey); ok {
|
||||
return cv.(*PublicKey).Copy(), nil
|
||||
return cv.Copy(), nil
|
||||
}
|
||||
// Subgroup check NOT done when decompressing pubkey.
|
||||
p := new(blstPublicKey).Uncompress(pubKey)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@@ -7,3 +7,11 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["@com_github_prysmaticlabs_gohashtree//:go_default_library"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = ["hashtree_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//testing/require:go_default_library"],
|
||||
)
|
||||
|
||||
@@ -1,17 +1,47 @@
|
||||
package htr
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/gohashtree"
|
||||
)
|
||||
|
||||
const minSliceSizeToParallelize = 5000
|
||||
|
||||
func hashParallel(inputList [][32]byte, outputList [][32]byte, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
err := gohashtree.Hash(outputList, inputList)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// VectorizedSha256 takes a list of roots and hashes them using CPU
|
||||
// specific vector instructions. Depending on host machine's specific
|
||||
// hardware configuration, using this routine can lead to a significant
|
||||
// performance improvement compared to the default method of hashing
|
||||
// lists.
|
||||
func VectorizedSha256(inputList [][32]byte, outputList [][32]byte) {
|
||||
err := gohashtree.Hash(outputList, inputList)
|
||||
func VectorizedSha256(inputList [][32]byte) [][32]byte {
|
||||
outputList := make([][32]byte, len(inputList)/2)
|
||||
if len(inputList) < minSliceSizeToParallelize {
|
||||
err := gohashtree.Hash(outputList, inputList)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return outputList
|
||||
}
|
||||
n := runtime.GOMAXPROCS(0) - 1
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(n)
|
||||
groupSize := len(inputList) / (2 * (n + 1))
|
||||
for j := 0; j < n; j++ {
|
||||
go hashParallel(inputList[j*2*groupSize:(j+1)*2*groupSize], outputList[j*groupSize:], &wg)
|
||||
}
|
||||
err := gohashtree.Hash(outputList[n*groupSize:], inputList[n*2*groupSize:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
wg.Wait()
|
||||
return outputList
|
||||
}
|
||||
|
||||
27
crypto/hash/htr/hashtree_test.go
Normal file
27
crypto/hash/htr/hashtree_test.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package htr
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func Test_VectorizedSha256(t *testing.T) {
|
||||
largeSlice := make([][32]byte, 32*minSliceSizeToParallelize)
|
||||
secondLargeSlice := make([][32]byte, 32*minSliceSizeToParallelize)
|
||||
hash1 := make([][32]byte, 16*minSliceSizeToParallelize)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
tempHash := VectorizedSha256(largeSlice)
|
||||
copy(hash1, tempHash)
|
||||
}()
|
||||
wg.Wait()
|
||||
hash2 := VectorizedSha256(secondLargeSlice)
|
||||
require.Equal(t, len(hash1), len(hash2))
|
||||
for i, r := range hash1 {
|
||||
require.Equal(t, r, hash2[i])
|
||||
}
|
||||
}
|
||||
@@ -213,9 +213,7 @@ func MerkleizeVector(elements [][32]byte, length uint64) [32]byte {
|
||||
zerohash := trie.ZeroHashes[i]
|
||||
elements = append(elements, zerohash)
|
||||
}
|
||||
outputLen := len(elements) / 2
|
||||
htr.VectorizedSha256(elements, elements)
|
||||
elements = elements[:outputLen]
|
||||
elements = htr.VectorizedSha256(elements)
|
||||
}
|
||||
return elements[0]
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
"auth.go",
|
||||
"endpoint.go",
|
||||
"external_ip.go",
|
||||
"reader.go",
|
||||
"writer.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/network",
|
||||
@@ -25,6 +26,7 @@ go_test(
|
||||
"auth_test.go",
|
||||
"endpoint_test.go",
|
||||
"external_ip_test.go",
|
||||
"reader_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
|
||||
51
network/reader.go
Normal file
51
network/reader.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package network
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// match a number with optional decimals
|
||||
var priorityRegex = regexp.MustCompile(`q=(\d+(?:\.\d+)?)`)
|
||||
|
||||
// SszRequested takes a http request and checks to see if it should be requesting a ssz response.
|
||||
func SszRequested(req *http.Request) (bool, error) {
|
||||
accept := req.Header.Values("Accept")
|
||||
if len(accept) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
types := strings.Split(accept[0], ",")
|
||||
currentType, currentPriority := "", 0.0
|
||||
for _, t := range types {
|
||||
values := strings.Split(t, ";")
|
||||
name := values[0]
|
||||
if name != jsonMediaType && name != octetStreamMediaType {
|
||||
continue
|
||||
}
|
||||
// no params specified
|
||||
if len(values) == 1 {
|
||||
priority := 1.0
|
||||
if priority > currentPriority {
|
||||
currentType, currentPriority = name, priority
|
||||
}
|
||||
continue
|
||||
}
|
||||
params := values[1]
|
||||
|
||||
match := priorityRegex.FindAllStringSubmatch(params, 1)
|
||||
if len(match) != 1 {
|
||||
continue
|
||||
}
|
||||
priority, err := strconv.ParseFloat(match[0][1], 32)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if priority > currentPriority {
|
||||
currentType, currentPriority = name, priority
|
||||
}
|
||||
}
|
||||
|
||||
return currentType == octetStreamMediaType, nil
|
||||
}
|
||||
99
network/reader_test.go
Normal file
99
network/reader_test.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package network
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestSSZRequested(t *testing.T) {
|
||||
t.Run("ssz_requested", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{octetStreamMediaType}
|
||||
result, err := SszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, result)
|
||||
})
|
||||
|
||||
t.Run("ssz_content_type_first", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s,%s", octetStreamMediaType, jsonMediaType)}
|
||||
result, err := SszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, result)
|
||||
})
|
||||
|
||||
t.Run("ssz_content_type_preferred_1", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s;q=0.9,%s", jsonMediaType, octetStreamMediaType)}
|
||||
result, err := SszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, result)
|
||||
})
|
||||
|
||||
t.Run("ssz_content_type_preferred_2", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s;q=0.95,%s;q=0.9", octetStreamMediaType, jsonMediaType)}
|
||||
result, err := SszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, result)
|
||||
})
|
||||
|
||||
t.Run("other_content_type_preferred", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s,%s;q=0.9", jsonMediaType, octetStreamMediaType)}
|
||||
result, err := SszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, result)
|
||||
})
|
||||
|
||||
t.Run("other_params", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{fmt.Sprintf("%s,%s;q=0.9,otherparam=xyz", jsonMediaType, octetStreamMediaType)}
|
||||
result, err := SszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, result)
|
||||
})
|
||||
|
||||
t.Run("no_header", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
result, err := SszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, result)
|
||||
})
|
||||
|
||||
t.Run("empty_header", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{}
|
||||
result, err := SszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, result)
|
||||
})
|
||||
|
||||
t.Run("empty_header_value", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{""}
|
||||
result, err := SszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, result)
|
||||
})
|
||||
|
||||
t.Run("other_content_type", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{"application/other"}
|
||||
result, err := SszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, result)
|
||||
})
|
||||
|
||||
t.Run("garbage", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example", nil)
|
||||
request.Header["Accept"] = []string{"This is Sparta!!!"}
|
||||
result, err := SszRequested(request)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, result)
|
||||
})
|
||||
}
|
||||
@@ -10,6 +10,11 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
jsonMediaType = "application/json"
|
||||
octetStreamMediaType = "application/octet-stream"
|
||||
)
|
||||
|
||||
// DefaultErrorJson is a JSON representation of a simple error value, containing only a message and an error code.
|
||||
type DefaultErrorJson struct {
|
||||
Message string `json:"message"`
|
||||
@@ -18,13 +23,23 @@ type DefaultErrorJson struct {
|
||||
|
||||
// WriteJson writes the response message in JSON format.
|
||||
func WriteJson(w http.ResponseWriter, v any) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Header().Set("Content-Type", jsonMediaType)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
if err := json.NewEncoder(w).Encode(v); err != nil {
|
||||
log.WithError(err).Error("Could not write response message")
|
||||
}
|
||||
}
|
||||
|
||||
// WriteSsz writes the response message in ssz format
|
||||
func WriteSsz(w http.ResponseWriter, respSsz []byte, fileName string) {
|
||||
w.Header().Set("Content-Length", strconv.Itoa(len(respSsz)))
|
||||
w.Header().Set("Content-Type", octetStreamMediaType)
|
||||
w.Header().Set("Content-Disposition", "attachment; filename="+fileName)
|
||||
if _, err := io.Copy(w, io.NopCloser(bytes.NewReader(respSsz))); err != nil {
|
||||
log.WithError(err).Error("could not write response message")
|
||||
}
|
||||
}
|
||||
|
||||
// WriteError writes the error by manipulating headers and the body of the final response.
|
||||
func WriteError(w http.ResponseWriter, errJson *DefaultErrorJson) {
|
||||
j, err := json.Marshal(errJson)
|
||||
|
||||
@@ -678,5 +678,6 @@ func (b *ExecutionPayloadBodyV1) UnmarshalJSON(enc []byte) error {
|
||||
transactions[i] = tx
|
||||
}
|
||||
b.Transactions = transactions
|
||||
b.Withdrawals = decoded.Withdrawals
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -487,6 +487,32 @@ func TestPayloadIDBytes_MarshalUnmarshalJSON(t *testing.T) {
|
||||
require.Equal(t, true, item == *res)
|
||||
}
|
||||
|
||||
func TestExecutionPayloadBody_MarshalUnmarshalJSON(t *testing.T) {
|
||||
pBody := &enginev1.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{[]byte("random1"), []byte("random2"), []byte("random3")},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
&enginev1.Withdrawal{
|
||||
Index: 200,
|
||||
ValidatorIndex: 20303,
|
||||
Amount: 3200000000,
|
||||
Address: bytesutil.PadTo([]byte("junk"), 20),
|
||||
},
|
||||
&enginev1.Withdrawal{
|
||||
Index: 200,
|
||||
ValidatorIndex: 70303,
|
||||
Amount: 3200000800,
|
||||
Address: bytesutil.PadTo([]byte("junk2"), 20),
|
||||
},
|
||||
},
|
||||
}
|
||||
enc, err := json.Marshal(pBody)
|
||||
require.NoError(t, err)
|
||||
res := &enginev1.ExecutionPayloadBodyV1{}
|
||||
err = res.UnmarshalJSON(enc)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, pBody, res)
|
||||
}
|
||||
|
||||
func TestExecutionBlock_MarshalUnmarshalJSON_MainnetBlock(t *testing.T) {
|
||||
newBlock := &enginev1.ExecutionBlock{}
|
||||
require.NoError(t, newBlock.UnmarshalJSON([]byte(blockJson)))
|
||||
|
||||
@@ -851,6 +851,10 @@ func BeaconStateCapellaToProto(st state.BeaconState) (*ethpbv2.BeaconStateCapell
|
||||
StateSummaryRoot: summary.StateSummaryRoot,
|
||||
}
|
||||
}
|
||||
hRoots, err := st.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get historical roots")
|
||||
}
|
||||
|
||||
result := ðpbv2.BeaconStateCapella{
|
||||
GenesisTime: st.GenesisTime(),
|
||||
@@ -925,6 +929,7 @@ func BeaconStateCapellaToProto(st state.BeaconState) (*ethpbv2.BeaconStateCapell
|
||||
NextWithdrawalIndex: sourceNextWithdrawalIndex,
|
||||
NextWithdrawalValidatorIndex: sourceNextWithdrawalValIndex,
|
||||
HistoricalSummaries: sourceHistoricalSummaries,
|
||||
HistoricalRoots: hRoots,
|
||||
}
|
||||
|
||||
return result, nil
|
||||
|
||||
@@ -727,6 +727,8 @@ func TestBeaconStateCapellaToProto(t *testing.T) {
|
||||
assert.DeepEqual(t, bytesutil.PadTo([]byte("blockroots"), 32), result.BlockRoots[0])
|
||||
assert.Equal(t, 8192, len(result.StateRoots))
|
||||
assert.DeepEqual(t, bytesutil.PadTo([]byte("stateroots"), 32), result.StateRoots[0])
|
||||
assert.Equal(t, 1, len(result.HistoricalRoots))
|
||||
assert.DeepEqual(t, bytesutil.PadTo([]byte("historicalroots"), 32), result.HistoricalRoots[0])
|
||||
resultEth1Data := result.Eth1Data
|
||||
require.NotNil(t, resultEth1Data)
|
||||
assert.DeepEqual(t, bytesutil.PadTo([]byte("e1ddepositroot"), 32), resultEth1Data.DepositRoot)
|
||||
|
||||
@@ -67,6 +67,9 @@ func builderActive(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn) err
|
||||
if string(execPayload.ExtraData()) != "prysm-builder" {
|
||||
return errors.Errorf("block with slot %d was not built by the builder. It has an extra data of %s", b.Block().Slot(), string(execPayload.ExtraData()))
|
||||
}
|
||||
if execPayload.GasLimit() == 0 {
|
||||
return errors.Errorf("block with slot %d has a gas limit of 0, when it should be in the 30M range", b.Block().Slot())
|
||||
}
|
||||
}
|
||||
if lowestBound == currEpoch {
|
||||
return nil
|
||||
|
||||
@@ -7,10 +7,12 @@ go_test(
|
||||
"attestation_test.go",
|
||||
"attester_slashing_test.go",
|
||||
"block_header_test.go",
|
||||
"bls_to_execution_change_test.go",
|
||||
"deposit_test.go",
|
||||
"proposer_slashing_test.go",
|
||||
"sync_committee_test.go",
|
||||
"voluntary_exit_test.go",
|
||||
"withdrawals_test.go",
|
||||
],
|
||||
data = [
|
||||
"@consensus_spec_tests_mainnet//:test_data",
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
package operations
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/spectest/shared/capella/operations"
|
||||
)
|
||||
|
||||
func TestMainnet_Capella_Operations_BLSToExecutionChange(t *testing.T) {
|
||||
operations.RunBLSToExecutionChangeTest(t, "mainnet")
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
package operations
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/spectest/shared/capella/operations"
|
||||
)
|
||||
|
||||
func TestMainnet_Capella_Operations_Withdrawals(t *testing.T) {
|
||||
operations.RunWithdrawalsTest(t, "mainnet")
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -269,20 +270,33 @@ func prettyPrint(sszPath string, data fssz.Unmarshaler) {
|
||||
func benchmarkHash(sszPath string, sszType string) {
|
||||
switch sszType {
|
||||
case "state_capella":
|
||||
data := ðpb.BeaconStateCapella{}
|
||||
if err := dataFetcher(sszPath, data); err != nil {
|
||||
st := ðpb.BeaconStateCapella{}
|
||||
rawFile, err := os.ReadFile(sszPath) // #nosec G304
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoCapella(data)
|
||||
|
||||
startDeserialize := time.Now()
|
||||
if err := st.UnmarshalSSZ(rawFile); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
deserializeDuration := time.Since(startDeserialize)
|
||||
|
||||
stateTrieState, err := state_native.InitializeFromProtoCapella(st)
|
||||
if err != nil {
|
||||
log.Fatal("not a state")
|
||||
log.Fatal(err)
|
||||
}
|
||||
start := time.Now()
|
||||
root, err := st.HashTreeRoot(context.Background())
|
||||
stat := &runtime.MemStats{}
|
||||
runtime.ReadMemStats(stat)
|
||||
root, err := stateTrieState.HashTreeRoot(context.Background())
|
||||
if err != nil {
|
||||
log.Fatal("couldn't hash")
|
||||
}
|
||||
fmt.Printf("Duration: %v HTR: %#x\n", time.Since(start), root)
|
||||
newStat := &runtime.MemStats{}
|
||||
runtime.ReadMemStats(newStat)
|
||||
fmt.Printf("Deserialize Duration: %v, Hashing Duration: %v HTR: %#x\n", deserializeDuration, time.Since(start), root)
|
||||
fmt.Printf("Total Memory Allocation Differential: %d bytes, Heap Memory Allocation Differential: %d bytes\n", int64(newStat.TotalAlloc)-int64(stat.TotalAlloc), int64(newStat.HeapAlloc)-int64(stat.HeapAlloc))
|
||||
return
|
||||
default:
|
||||
log.Fatal("Invalid type")
|
||||
|
||||
@@ -42,6 +42,7 @@ go_library(
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/rpc/apimiddleware:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/validator:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
@@ -106,6 +107,7 @@ go_test(
|
||||
"//api/gateway/apimiddleware:go_default_library",
|
||||
"//beacon-chain/rpc/apimiddleware:go_default_library",
|
||||
"//beacon-chain/rpc/eth/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/validator:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
package beacon_api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
@@ -11,6 +13,7 @@ import (
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/validator"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
@@ -23,6 +26,8 @@ type beaconApiBeaconChainClient struct {
|
||||
stateValidatorsProvider stateValidatorsProvider
|
||||
}
|
||||
|
||||
const getValidatorPerformanceEndpoint = "/prysm/validators/performance"
|
||||
|
||||
func (c beaconApiBeaconChainClient) getHeadBlockHeaders(ctx context.Context) (*apimiddleware.BlockHeaderResponseJson, error) {
|
||||
blockHeader := apimiddleware.BlockHeaderResponseJson{}
|
||||
if _, err := c.jsonRestHandler.GetRestJsonResponse(ctx, "/eth/v1/beacon/headers/head", &blockHeader); err != nil {
|
||||
@@ -317,12 +322,35 @@ func (c beaconApiBeaconChainClient) GetValidatorQueue(ctx context.Context, in *e
|
||||
}
|
||||
|
||||
func (c beaconApiBeaconChainClient) GetValidatorPerformance(ctx context.Context, in *ethpb.ValidatorPerformanceRequest) (*ethpb.ValidatorPerformanceResponse, error) {
|
||||
if c.fallbackClient != nil {
|
||||
return c.fallbackClient.GetValidatorPerformance(ctx, in)
|
||||
request, err := json.Marshal(validator.ValidatorPerformanceRequest{
|
||||
PublicKeys: in.PublicKeys,
|
||||
Indices: in.Indices,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to marshal request")
|
||||
}
|
||||
resp := &validator.ValidatorPerformanceResponse{}
|
||||
if _, err := c.jsonRestHandler.PostRestJson(
|
||||
ctx,
|
||||
getValidatorPerformanceEndpoint,
|
||||
nil,
|
||||
bytes.NewBuffer(request),
|
||||
resp,
|
||||
); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get validator performance")
|
||||
}
|
||||
|
||||
// TODO: Implement me
|
||||
panic("beaconApiBeaconChainClient.GetValidatorPerformance is not implemented. To use a fallback client, pass a fallback client as the last argument of NewBeaconApiBeaconChainClientWithFallback.")
|
||||
return ðpb.ValidatorPerformanceResponse{
|
||||
CurrentEffectiveBalances: resp.CurrentEffectiveBalances,
|
||||
CorrectlyVotedSource: resp.CorrectlyVotedSource,
|
||||
CorrectlyVotedTarget: resp.CorrectlyVotedTarget,
|
||||
CorrectlyVotedHead: resp.CorrectlyVotedHead,
|
||||
BalancesBeforeEpochTransition: resp.BalancesBeforeEpochTransition,
|
||||
BalancesAfterEpochTransition: resp.BalancesAfterEpochTransition,
|
||||
MissingValidators: resp.MissingValidators,
|
||||
PublicKeys: resp.PublicKeys,
|
||||
InactivityScores: resp.InactivityScores,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c beaconApiBeaconChainClient) GetValidatorParticipation(ctx context.Context, in *ethpb.GetValidatorParticipationRequest) (*ethpb.ValidatorParticipationResponse, error) {
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
package beacon_api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
@@ -10,8 +12,11 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/golang/mock/gomock"
|
||||
gatewaymiddleware "github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/validator"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
@@ -925,3 +930,44 @@ func TestGetChainHead(t *testing.T) {
|
||||
assert.DeepEqual(t, expectedChainHead, chainHead)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_beaconApiBeaconChainClient_GetValidatorPerformance(t *testing.T) {
|
||||
publicKeys := [][48]byte{
|
||||
bytesutil.ToBytes48([]byte{1}),
|
||||
bytesutil.ToBytes48([]byte{2}),
|
||||
bytesutil.ToBytes48([]byte{3}),
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
request, err := json.Marshal(validator.ValidatorPerformanceRequest{
|
||||
PublicKeys: [][]byte{publicKeys[0][:], publicKeys[2][:], publicKeys[1][:]},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
wantResponse := &validator.ValidatorPerformanceResponse{}
|
||||
want := ðpb.ValidatorPerformanceResponse{}
|
||||
jsonRestHandler := mock.NewMockjsonRestHandler(ctrl)
|
||||
jsonRestHandler.EXPECT().PostRestJson(
|
||||
ctx,
|
||||
getValidatorPerformanceEndpoint,
|
||||
nil,
|
||||
bytes.NewBuffer(request),
|
||||
wantResponse,
|
||||
).Return(
|
||||
&gatewaymiddleware.DefaultErrorJson{},
|
||||
nil,
|
||||
)
|
||||
|
||||
c := beaconApiBeaconChainClient{
|
||||
jsonRestHandler: jsonRestHandler,
|
||||
}
|
||||
|
||||
got, err := c.GetValidatorPerformance(ctx, ðpb.ValidatorPerformanceRequest{
|
||||
PublicKeys: [][]byte{publicKeys[0][:], publicKeys[2][:], publicKeys[1][:]},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.PublicKeys, got.PublicKeys)
|
||||
}
|
||||
|
||||
@@ -559,11 +559,13 @@ func proposerSettings(cliCtx *cli.Context, db iface.ValidatorDB) (*validatorServ
|
||||
}
|
||||
|
||||
if builderConfigFromFlag != nil {
|
||||
config := builderConfigFromFlag
|
||||
config := builderConfigFromFlag.Clone()
|
||||
if config.GasLimit == validator.Uint64(params.BeaconConfig().DefaultBuilderGasLimit) && vpSettings.DefaultConfig.BuilderConfig != nil {
|
||||
config.GasLimit = vpSettings.DefaultConfig.BuilderConfig.GasLimit
|
||||
}
|
||||
vpSettings.DefaultConfig.BuilderConfig = config
|
||||
} else if vpSettings.DefaultConfig.BuilderConfig != nil {
|
||||
vpSettings.DefaultConfig.BuilderConfig.GasLimit = reviewGasLimit(vpSettings.DefaultConfig.BuilderConfig.GasLimit)
|
||||
}
|
||||
|
||||
if psExists {
|
||||
@@ -573,7 +575,7 @@ func proposerSettings(cliCtx *cli.Context, db iface.ValidatorDB) (*validatorServ
|
||||
}
|
||||
}
|
||||
|
||||
if fileConfig.ProposerConfig != nil {
|
||||
if fileConfig.ProposerConfig != nil && len(fileConfig.ProposerConfig) != 0 {
|
||||
vpSettings.ProposeConfig = make(map[[fieldparams.BLSPubkeyLength]byte]*validatorServiceConfig.ProposerOption)
|
||||
for key, option := range fileConfig.ProposerConfig {
|
||||
decodedKey, err := hexutil.Decode(key)
|
||||
@@ -583,29 +585,24 @@ func proposerSettings(cliCtx *cli.Context, db iface.ValidatorDB) (*validatorServ
|
||||
if len(decodedKey) != fieldparams.BLSPubkeyLength {
|
||||
return nil, fmt.Errorf("%v is not a bls public key", key)
|
||||
}
|
||||
if option == nil {
|
||||
return nil, fmt.Errorf("fee recipient is required for proposer %s", key)
|
||||
}
|
||||
if !common.IsHexAddress(option.FeeRecipient) {
|
||||
return nil, errors.New("fee recipient is not a valid eth1 address")
|
||||
}
|
||||
if err := warnNonChecksummedAddress(option.FeeRecipient); err != nil {
|
||||
if err := verifyOption(key, option); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentBuilderConfig := validatorServiceConfig.ToBuilderConfig(option.Builder)
|
||||
if builderConfigFromFlag != nil {
|
||||
config := builderConfigFromFlag.ToPayload()
|
||||
if config.GasLimit == validator.Uint64(params.BeaconConfig().DefaultBuilderGasLimit) && option.Builder != nil {
|
||||
config.GasLimit = option.Builder.GasLimit
|
||||
config := builderConfigFromFlag.Clone()
|
||||
if config.GasLimit == validator.Uint64(params.BeaconConfig().DefaultBuilderGasLimit) && currentBuilderConfig != nil {
|
||||
config.GasLimit = currentBuilderConfig.GasLimit
|
||||
}
|
||||
option.Builder = config
|
||||
} else if option.Builder != nil {
|
||||
option.Builder.GasLimit = reviewGasLimit(option.Builder.GasLimit)
|
||||
currentBuilderConfig = config
|
||||
} else if currentBuilderConfig != nil {
|
||||
currentBuilderConfig.GasLimit = reviewGasLimit(currentBuilderConfig.GasLimit)
|
||||
}
|
||||
o := &validatorServiceConfig.ProposerOption{
|
||||
FeeRecipientConfig: &validatorServiceConfig.FeeRecipientConfig{
|
||||
FeeRecipient: common.HexToAddress(option.FeeRecipient),
|
||||
},
|
||||
BuilderConfig: validatorServiceConfig.ToBuilderConfig(option.Builder),
|
||||
BuilderConfig: currentBuilderConfig,
|
||||
}
|
||||
pubkeyB := bytesutil.ToBytes48(decodedKey)
|
||||
vpSettings.ProposeConfig[pubkeyB] = o
|
||||
@@ -626,6 +623,19 @@ func proposerSettings(cliCtx *cli.Context, db iface.ValidatorDB) (*validatorServ
|
||||
return vpSettings, nil
|
||||
}
|
||||
|
||||
func verifyOption(key string, option *validatorpb.ProposerOptionPayload) error {
|
||||
if option == nil {
|
||||
return fmt.Errorf("fee recipient is required for proposer %s", key)
|
||||
}
|
||||
if !common.IsHexAddress(option.FeeRecipient) {
|
||||
return errors.New("fee recipient is not a valid eth1 address")
|
||||
}
|
||||
if err := warnNonChecksummedAddress(option.FeeRecipient); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleNoProposerSettingsFlagsProvided(cliCtx *cli.Context,
|
||||
db iface.ValidatorDB,
|
||||
builderConfigFromFlag *validatorServiceConfig.BuilderConfig) (*validatorServiceConfig.ProposerSettings, error) {
|
||||
|
||||
@@ -231,6 +231,29 @@ func TestProposerSettings(t *testing.T) {
|
||||
withdb func(db iface.ValidatorDB) error
|
||||
validatorRegistrationEnabled bool
|
||||
}{
|
||||
{
|
||||
name: "Happy Path default only proposer settings file with builder settings,",
|
||||
args: args{
|
||||
proposerSettingsFlagValues: &proposerSettingsFlag{
|
||||
dir: "./testdata/default-only-proposer-config.json",
|
||||
url: "",
|
||||
defaultfee: "",
|
||||
},
|
||||
},
|
||||
want: func() *validatorserviceconfig.ProposerSettings {
|
||||
return &validatorserviceconfig.ProposerSettings{
|
||||
DefaultConfig: &validatorserviceconfig.ProposerOption{
|
||||
FeeRecipientConfig: &validatorserviceconfig.FeeRecipientConfig{
|
||||
FeeRecipient: common.HexToAddress("0xae967917c465db8578ca9024c205720b1a3651A9"),
|
||||
},
|
||||
BuilderConfig: &validatorserviceconfig.BuilderConfig{
|
||||
Enabled: true,
|
||||
GasLimit: validator.Uint64(params.BeaconConfig().DefaultBuilderGasLimit),
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Happy Path Config file File, bad checksum",
|
||||
args: args{
|
||||
|
||||
7
validator/node/testdata/default-only-proposer-config.json
vendored
Normal file
7
validator/node/testdata/default-only-proposer-config.json
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"proposer_config": {},
|
||||
"default_config": {
|
||||
"fee_recipient": "0xAe967917c465db8578ca9024c205720b1a3651A9",
|
||||
"builder": {"enabled": true}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user