mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 14:28:09 -05:00
Compare commits
23 Commits
state-diff
...
rose-devel
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5dcbb5294a | ||
|
|
6023a0d45f | ||
|
|
a8a2866798 | ||
|
|
011150e93e | ||
|
|
3bd52c706a | ||
|
|
f58019ba17 | ||
|
|
230d2af015 | ||
|
|
3f6c6e935f | ||
|
|
1d50bc0ebf | ||
|
|
281bdd84b4 | ||
|
|
c0368681d3 | ||
|
|
7d6afc3412 | ||
|
|
a168bc256d | ||
|
|
c8187616d8 | ||
|
|
4bc6df1f50 | ||
|
|
643962029a | ||
|
|
a95f7b4867 | ||
|
|
574108644c | ||
|
|
670b2b8291 | ||
|
|
3198d64a7c | ||
|
|
26645ed724 | ||
|
|
ee3541534c | ||
|
|
3e429b7d55 |
@@ -8,7 +8,11 @@ go_library(
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/api/server/middleware",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["@com_github_rs_cors//:go_default_library"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"@com_github_rs_cors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
@@ -22,5 +26,6 @@ go_test(
|
||||
"//api:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/rs/cors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Middleware func(http.Handler) http.Handler
|
||||
@@ -112,6 +115,46 @@ func AcceptHeaderHandler(serverAcceptedTypes []string) Middleware {
|
||||
}
|
||||
}
|
||||
|
||||
// AcceptEncodingHeaderHandler compresses the response before sending it back to the client, if gzip is supported.
|
||||
func AcceptEncodingHeaderHandler() Middleware {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
gz := gzip.NewWriter(w)
|
||||
gzipRW := &gzipResponseWriter{gz: gz, ResponseWriter: w}
|
||||
defer func() {
|
||||
if !gzipRW.zipped {
|
||||
return
|
||||
}
|
||||
if err := gz.Close(); err != nil {
|
||||
log.WithError(err).Error("Failed to close gzip writer")
|
||||
}
|
||||
}()
|
||||
|
||||
next.ServeHTTP(gzipRW, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type gzipResponseWriter struct {
|
||||
gz *gzip.Writer
|
||||
http.ResponseWriter
|
||||
zipped bool
|
||||
}
|
||||
|
||||
func (g *gzipResponseWriter) Write(b []byte) (int, error) {
|
||||
if strings.Contains(g.Header().Get("Content-Type"), api.JsonMediaType) {
|
||||
g.zipped = true
|
||||
g.Header().Set("Content-Encoding", "gzip")
|
||||
return g.gz.Write(b)
|
||||
}
|
||||
return g.ResponseWriter.Write(b)
|
||||
}
|
||||
|
||||
func MiddlewareChain(h http.Handler, mw []Middleware) http.Handler {
|
||||
if len(mw) < 1 {
|
||||
return h
|
||||
|
||||
@@ -1,12 +1,16 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func TestNormalizeQueryValuesHandler(t *testing.T) {
|
||||
@@ -124,6 +128,89 @@ func TestContentTypeHandler(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestAcceptEncodingHeaderHandler(t *testing.T) {
|
||||
dummyContent := "Test gzip middleware content"
|
||||
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", r.Header.Get("Accept"))
|
||||
_, err := w.Write([]byte(dummyContent))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
handler := AcceptEncodingHeaderHandler()(nextHandler)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
accept string
|
||||
acceptEncoding string
|
||||
expectCompressed bool
|
||||
}{
|
||||
{
|
||||
name: "Gzip supported",
|
||||
accept: api.JsonMediaType,
|
||||
acceptEncoding: "gzip",
|
||||
expectCompressed: true,
|
||||
},
|
||||
{
|
||||
name: "Multiple encodings supported",
|
||||
accept: api.JsonMediaType,
|
||||
acceptEncoding: "deflate, gzip",
|
||||
expectCompressed: true,
|
||||
},
|
||||
{
|
||||
name: "Gzip not supported",
|
||||
accept: api.JsonMediaType,
|
||||
acceptEncoding: "deflate",
|
||||
expectCompressed: false,
|
||||
},
|
||||
{
|
||||
name: "No accept encoding header",
|
||||
accept: api.JsonMediaType,
|
||||
acceptEncoding: "",
|
||||
expectCompressed: false,
|
||||
},
|
||||
{
|
||||
name: "SSZ",
|
||||
accept: api.OctetStreamMediaType,
|
||||
acceptEncoding: "gzip",
|
||||
expectCompressed: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
req := httptest.NewRequest("GET", "/", nil)
|
||||
req.Header.Set("Accept", tt.accept)
|
||||
if tt.acceptEncoding != "" {
|
||||
req.Header.Set("Accept-Encoding", tt.acceptEncoding)
|
||||
}
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if tt.expectCompressed {
|
||||
require.Equal(t, "gzip", rr.Header().Get("Content-Encoding"), "Expected Content-Encoding header to be 'gzip'")
|
||||
|
||||
compressedBody := rr.Body.Bytes()
|
||||
require.NotEqual(t, dummyContent, string(compressedBody), "Response body should be compressed and differ from the original")
|
||||
|
||||
gzReader, err := gzip.NewReader(bytes.NewReader(compressedBody))
|
||||
require.NoError(t, err, "Failed to create gzipReader")
|
||||
defer func() {
|
||||
if err := gzReader.Close(); err != nil {
|
||||
log.WithError(err).Error("Failed to close gzip reader")
|
||||
}
|
||||
}()
|
||||
|
||||
decompressedBody, err := io.ReadAll(gzReader)
|
||||
require.NoError(t, err, "Failed to decompress response body")
|
||||
require.Equal(t, dummyContent, string(decompressedBody), "Decompressed content should match the original")
|
||||
} else {
|
||||
require.Equal(t, dummyContent, rr.Body.String(), "Response body should be uncompressed and match the original")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAcceptHeaderHandler(t *testing.T) {
|
||||
acceptedTypes := []string{"application/json", "application/octet-stream"}
|
||||
|
||||
|
||||
@@ -12,47 +12,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/attestation"
|
||||
)
|
||||
|
||||
// ConvertToAltair converts a Phase 0 beacon state to an Altair beacon state.
|
||||
func ConvertToAltair(state state.BeaconState) (state.BeaconState, error) {
|
||||
epoch := time.CurrentEpoch(state)
|
||||
|
||||
numValidators := state.NumValidators()
|
||||
s := ðpb.BeaconStateAltair{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
Slot: state.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: state.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().AltairForkVersion,
|
||||
Epoch: epoch,
|
||||
},
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: state.HistoricalRoots(),
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
Validators: state.Validators(),
|
||||
Balances: state.Balances(),
|
||||
RandaoMixes: state.RandaoMixes(),
|
||||
Slashings: state.Slashings(),
|
||||
PreviousEpochParticipation: make([]byte, numValidators),
|
||||
CurrentEpochParticipation: make([]byte, numValidators),
|
||||
JustificationBits: state.JustificationBits(),
|
||||
PreviousJustifiedCheckpoint: state.PreviousJustifiedCheckpoint(),
|
||||
CurrentJustifiedCheckpoint: state.CurrentJustifiedCheckpoint(),
|
||||
FinalizedCheckpoint: state.FinalizedCheckpoint(),
|
||||
InactivityScores: make([]uint64, numValidators),
|
||||
}
|
||||
|
||||
newState, err := state_native.InitializeFromProtoUnsafeAltair(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newState, nil
|
||||
}
|
||||
|
||||
// UpgradeToAltair updates input state to return the version Altair state.
|
||||
//
|
||||
// Spec code:
|
||||
@@ -105,7 +64,39 @@ func ConvertToAltair(state state.BeaconState) (state.BeaconState, error) {
|
||||
// post.next_sync_committee = get_next_sync_committee(post)
|
||||
// return post
|
||||
func UpgradeToAltair(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
|
||||
newState, err := ConvertToAltair(state)
|
||||
epoch := time.CurrentEpoch(state)
|
||||
|
||||
numValidators := state.NumValidators()
|
||||
s := ðpb.BeaconStateAltair{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
Slot: state.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: state.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().AltairForkVersion,
|
||||
Epoch: epoch,
|
||||
},
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: state.HistoricalRoots(),
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
Validators: state.Validators(),
|
||||
Balances: state.Balances(),
|
||||
RandaoMixes: state.RandaoMixes(),
|
||||
Slashings: state.Slashings(),
|
||||
PreviousEpochParticipation: make([]byte, numValidators),
|
||||
CurrentEpochParticipation: make([]byte, numValidators),
|
||||
JustificationBits: state.JustificationBits(),
|
||||
PreviousJustifiedCheckpoint: state.PreviousJustifiedCheckpoint(),
|
||||
CurrentJustifiedCheckpoint: state.CurrentJustifiedCheckpoint(),
|
||||
FinalizedCheckpoint: state.FinalizedCheckpoint(),
|
||||
InactivityScores: make([]uint64, numValidators),
|
||||
}
|
||||
|
||||
newState, err := state_native.InitializeFromProtoUnsafeAltair(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -15,129 +15,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ConvertToElectra converts a Deneb beacon state to an Electra beacon state.
|
||||
func ConvertToElectra(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
currentSyncCommittee, err := beaconState.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nextSyncCommittee, err := beaconState.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prevEpochParticipation, err := beaconState.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentEpochParticipation, err := beaconState.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inactivityScores, err := beaconState.InactivityScores()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloadHeader, err := beaconState.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txRoot, err := payloadHeader.TransactionsRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wdRoot, err := payloadHeader.WithdrawalsRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wi, err := beaconState.NextWithdrawalIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vi, err := beaconState.NextWithdrawalValidatorIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
summaries, err := beaconState.HistoricalSummaries()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
excessBlobGas, err := payloadHeader.ExcessBlobGas()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobGasUsed, err := payloadHeader.BlobGasUsed()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateElectra{
|
||||
GenesisTime: beaconState.GenesisTime(),
|
||||
GenesisValidatorsRoot: beaconState.GenesisValidatorsRoot(),
|
||||
Slot: beaconState.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: beaconState.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().ElectraForkVersion,
|
||||
Epoch: time.CurrentEpoch(beaconState),
|
||||
},
|
||||
LatestBlockHeader: beaconState.LatestBlockHeader(),
|
||||
BlockRoots: beaconState.BlockRoots(),
|
||||
StateRoots: beaconState.StateRoots(),
|
||||
HistoricalRoots: beaconState.HistoricalRoots(),
|
||||
Eth1Data: beaconState.Eth1Data(),
|
||||
Eth1DataVotes: beaconState.Eth1DataVotes(),
|
||||
Eth1DepositIndex: beaconState.Eth1DepositIndex(),
|
||||
Validators: beaconState.Validators(),
|
||||
Balances: beaconState.Balances(),
|
||||
RandaoMixes: beaconState.RandaoMixes(),
|
||||
Slashings: beaconState.Slashings(),
|
||||
PreviousEpochParticipation: prevEpochParticipation,
|
||||
CurrentEpochParticipation: currentEpochParticipation,
|
||||
JustificationBits: beaconState.JustificationBits(),
|
||||
PreviousJustifiedCheckpoint: beaconState.PreviousJustifiedCheckpoint(),
|
||||
CurrentJustifiedCheckpoint: beaconState.CurrentJustifiedCheckpoint(),
|
||||
FinalizedCheckpoint: beaconState.FinalizedCheckpoint(),
|
||||
InactivityScores: inactivityScores,
|
||||
CurrentSyncCommittee: currentSyncCommittee,
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: payloadHeader.ParentHash(),
|
||||
FeeRecipient: payloadHeader.FeeRecipient(),
|
||||
StateRoot: payloadHeader.StateRoot(),
|
||||
ReceiptsRoot: payloadHeader.ReceiptsRoot(),
|
||||
LogsBloom: payloadHeader.LogsBloom(),
|
||||
PrevRandao: payloadHeader.PrevRandao(),
|
||||
BlockNumber: payloadHeader.BlockNumber(),
|
||||
GasLimit: payloadHeader.GasLimit(),
|
||||
GasUsed: payloadHeader.GasUsed(),
|
||||
Timestamp: payloadHeader.Timestamp(),
|
||||
ExtraData: payloadHeader.ExtraData(),
|
||||
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
|
||||
BlockHash: payloadHeader.BlockHash(),
|
||||
TransactionsRoot: txRoot,
|
||||
WithdrawalsRoot: wdRoot,
|
||||
ExcessBlobGas: excessBlobGas,
|
||||
BlobGasUsed: blobGasUsed,
|
||||
},
|
||||
NextWithdrawalIndex: wi,
|
||||
NextWithdrawalValidatorIndex: vi,
|
||||
HistoricalSummaries: summaries,
|
||||
|
||||
DepositRequestsStartIndex: params.BeaconConfig().UnsetDepositRequestsStartIndex,
|
||||
DepositBalanceToConsume: 0,
|
||||
EarliestConsolidationEpoch: helpers.ActivationExitEpoch(slots.ToEpoch(beaconState.Slot())),
|
||||
PendingDeposits: make([]*ethpb.PendingDeposit, 0),
|
||||
PendingPartialWithdrawals: make([]*ethpb.PendingPartialWithdrawal, 0),
|
||||
PendingConsolidations: make([]*ethpb.PendingConsolidation, 0),
|
||||
}
|
||||
|
||||
// need to cast the beaconState to use in helper functions
|
||||
post, err := state_native.InitializeFromProtoUnsafeElectra(s)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to initialize post electra beaconState")
|
||||
}
|
||||
return post, nil
|
||||
}
|
||||
|
||||
// UpgradeToElectra updates inputs a generic state to return the version Electra state.
|
||||
//
|
||||
// nolint:dupword
|
||||
|
||||
@@ -7,7 +7,6 @@ go_library(
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd/prysmctl/testnet:__pkg__",
|
||||
"//consensus-types/hdiff:__subpackages__",
|
||||
"//testing/spectest:__subpackages__",
|
||||
"//validator/client:__pkg__",
|
||||
],
|
||||
|
||||
@@ -25,9 +25,6 @@ go_library(
|
||||
"migration_state_validators.go",
|
||||
"schema.go",
|
||||
"state.go",
|
||||
"state_diff.go",
|
||||
"state_diff_cache.go",
|
||||
"state_diff_helpers.go",
|
||||
"state_summary.go",
|
||||
"state_summary_cache.go",
|
||||
"utils.go",
|
||||
@@ -47,7 +44,6 @@ go_library(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/hdiff:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/light-client:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
@@ -55,7 +51,6 @@ go_library(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz/detect:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/progress:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
@@ -99,7 +94,6 @@ go_test(
|
||||
"migration_archived_index_test.go",
|
||||
"migration_block_slot_index_test.go",
|
||||
"migration_state_validators_test.go",
|
||||
"state_diff_test.go",
|
||||
"state_summary_test.go",
|
||||
"state_test.go",
|
||||
"utils_test.go",
|
||||
@@ -122,7 +116,6 @@ go_test(
|
||||
"//consensus-types/light-client:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -91,7 +91,6 @@ type Store struct {
|
||||
blockCache *ristretto.Cache[string, interfaces.ReadOnlySignedBeaconBlock]
|
||||
validatorEntryCache *ristretto.Cache[[]byte, *ethpb.Validator]
|
||||
stateSummaryCache *stateSummaryCache
|
||||
stateDiffCache *stateDiffCache
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
@@ -113,7 +112,6 @@ var Buckets = [][]byte{
|
||||
lightClientUpdatesBucket,
|
||||
lightClientBootstrapBucket,
|
||||
lightClientSyncCommitteeBucket,
|
||||
stateDiffBucket,
|
||||
// Indices buckets.
|
||||
blockSlotIndicesBucket,
|
||||
stateSlotIndicesBucket,
|
||||
@@ -184,7 +182,6 @@ func NewKVStore(ctx context.Context, dirPath string, opts ...KVStoreOption) (*St
|
||||
blockCache: blockCache,
|
||||
validatorEntryCache: validatorCache,
|
||||
stateSummaryCache: newStateSummaryCache(),
|
||||
stateDiffCache: nil,
|
||||
ctx: ctx,
|
||||
}
|
||||
for _, o := range opts {
|
||||
@@ -203,14 +200,6 @@ func NewKVStore(ctx context.Context, dirPath string, opts ...KVStoreOption) (*St
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if features.Get().EnableStateDiff {
|
||||
sdCache, err := newStateDiffCache(kv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kv.stateDiffCache = sdCache
|
||||
}
|
||||
|
||||
return kv, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@ var (
|
||||
stateValidatorsBucket = []byte("state-validators")
|
||||
feeRecipientBucket = []byte("fee-recipient")
|
||||
registrationBucket = []byte("registration")
|
||||
stateDiffBucket = []byte("state-diff")
|
||||
|
||||
// Light Client Updates Bucket
|
||||
lightClientUpdatesBucket = []byte("light-client-updates")
|
||||
|
||||
@@ -1,232 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/hdiff"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/pkg/errors"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
const (
|
||||
stateSuffix = "_s"
|
||||
validatorSuffix = "_v"
|
||||
balancesSuffix = "_b"
|
||||
)
|
||||
|
||||
/*
|
||||
We use a level-based approach to save state diffs. The levels are 0-6, where each level corresponds to an exponent of 2 (exponents[lvl]).
|
||||
The data at level 0 is saved every 2**exponent[0] slots and always contains a full state snapshot that is used as a base for the delta saved at other levels.
|
||||
*/
|
||||
|
||||
// saveStateByDiff takes a state and decides between saving a full state snapshot or a diff.
|
||||
func (s *Store) saveStateByDiff(ctx context.Context, st state.ReadOnlyBeaconState) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.saveStateByDiff")
|
||||
defer span.End()
|
||||
|
||||
if st == nil {
|
||||
return errors.New("state is nil")
|
||||
}
|
||||
|
||||
slot := st.Slot()
|
||||
offset := s.getOffset()
|
||||
if uint64(slot) < offset {
|
||||
return ErrSlotBeforeOffset
|
||||
}
|
||||
|
||||
// Find the level to save the state.
|
||||
lvl := computeLevel(offset, slot)
|
||||
if lvl == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Save full state if level is 0.
|
||||
if lvl == 0 {
|
||||
return s.saveFullSnapshot(st)
|
||||
}
|
||||
|
||||
// Get anchor state to compute the diff from.
|
||||
anchorState, err := s.getAnchorState(offset, lvl, slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.saveHdiff(lvl, anchorState, st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// stateByDiff retrieves the full state for a given slot.
|
||||
func (s *Store) stateByDiff(ctx context.Context, slot primitives.Slot) (state.BeaconState, error) {
|
||||
offset := s.getOffset()
|
||||
if uint64(slot) < offset {
|
||||
return nil, ErrSlotBeforeOffset
|
||||
}
|
||||
|
||||
snapshot, diffChain, err := s.getBaseAndDiffChain(offset, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, diff := range diffChain {
|
||||
snapshot, err = hdiff.ApplyDiff(ctx, snapshot, diff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return snapshot, nil
|
||||
}
|
||||
|
||||
// SaveHdiff computes the diff between the anchor state and the current state and saves it to the database.
|
||||
func (s *Store) saveHdiff(lvl int, anchor, st state.ReadOnlyBeaconState) error {
|
||||
slot := uint64(st.Slot())
|
||||
key := makeKey(lvl, slot)
|
||||
|
||||
diff, err := hdiff.Diff(anchor, st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bolt.ErrBucketNotFound
|
||||
}
|
||||
buf := append(key, stateSuffix...)
|
||||
if err := bucket.Put(buf, diff.StateDiff); err != nil {
|
||||
return err
|
||||
}
|
||||
buf = append(key, validatorSuffix...)
|
||||
if err := bucket.Put(buf, diff.ValidatorDiffs); err != nil {
|
||||
return err
|
||||
}
|
||||
buf = append(key, balancesSuffix...)
|
||||
if err := bucket.Put(buf, diff.BalancesDiff); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Save the full state to the cache (if not the last level).
|
||||
if lvl != len(params.StateHierarchyExponents())-1 {
|
||||
err = s.stateDiffCache.setAnchor(lvl, st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveFullSnapshot saves the full level 0 state snapshot to the database.
|
||||
func (s *Store) saveFullSnapshot(st state.ReadOnlyBeaconState) error {
|
||||
slot := uint64(st.Slot())
|
||||
key := makeKey(0, slot)
|
||||
stateBytes, err := st.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// add version key to value
|
||||
enc, err := addKey(st.Version(), stateBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bolt.ErrBucketNotFound
|
||||
}
|
||||
|
||||
if err := bucket.Put(key, enc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Save the full state to the cache, and invalidate other levels.
|
||||
s.stateDiffCache.clearAnchors()
|
||||
err = s.stateDiffCache.setAnchor(0, st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) getDiff(lvl int, slot uint64) (hdiff.HdiffBytes, error) {
|
||||
key := makeKey(lvl, slot)
|
||||
var stateDiff []byte
|
||||
var validatorDiff []byte
|
||||
var balancesDiff []byte
|
||||
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bolt.ErrBucketNotFound
|
||||
}
|
||||
buf := append(key, stateSuffix...)
|
||||
stateDiff = bucket.Get(buf)
|
||||
if stateDiff == nil {
|
||||
return errors.New("state diff not found")
|
||||
}
|
||||
buf = append(key, validatorSuffix...)
|
||||
validatorDiff = bucket.Get(buf)
|
||||
if validatorDiff == nil {
|
||||
return errors.New("validator diff not found")
|
||||
}
|
||||
buf = append(key, balancesSuffix...)
|
||||
balancesDiff = bucket.Get(buf)
|
||||
if balancesDiff == nil {
|
||||
return errors.New("balances diff not found")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return hdiff.HdiffBytes{}, err
|
||||
}
|
||||
|
||||
return hdiff.HdiffBytes{
|
||||
StateDiff: stateDiff,
|
||||
ValidatorDiffs: validatorDiff,
|
||||
BalancesDiff: balancesDiff,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Store) getFullSnapshot(slot uint64) (state.BeaconState, error) {
|
||||
key := makeKey(0, slot)
|
||||
var enc []byte
|
||||
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bolt.ErrBucketNotFound
|
||||
}
|
||||
enc = bucket.Get(key)
|
||||
if enc == nil {
|
||||
return errors.New("state not found")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s.decodeStateSnapshot(enc)
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
type stateDiffCache struct {
|
||||
sync.RWMutex
|
||||
anchors []state.ReadOnlyBeaconState
|
||||
offset uint64
|
||||
}
|
||||
|
||||
func newStateDiffCache(s *Store) (*stateDiffCache, error) {
|
||||
var offset uint64
|
||||
|
||||
err := s.db.View(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bbolt.ErrBucketNotFound
|
||||
}
|
||||
|
||||
offsetBytes := bucket.Get([]byte("offset"))
|
||||
if offsetBytes == nil {
|
||||
return errors.New("state diff cache: offset not found")
|
||||
}
|
||||
offset = binary.LittleEndian.Uint64(offsetBytes)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &stateDiffCache{
|
||||
anchors: make([]state.ReadOnlyBeaconState, len(params.StateHierarchyExponents())),
|
||||
offset: offset,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *stateDiffCache) getAnchor(level int) state.ReadOnlyBeaconState {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
return c.anchors[level]
|
||||
}
|
||||
|
||||
func (c *stateDiffCache) setAnchor(level int, anchor state.ReadOnlyBeaconState) error {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
if level >= len(c.anchors) || level < 0 {
|
||||
return errors.New("state diff cache: anchor level out of range")
|
||||
}
|
||||
c.anchors[level] = anchor
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *stateDiffCache) getOffset() uint64 {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
return c.offset
|
||||
}
|
||||
|
||||
func (c *stateDiffCache) setOffset(offset uint64) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.offset = offset
|
||||
}
|
||||
|
||||
func (c *stateDiffCache) clearAnchors() {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.anchors = make([]state.ReadOnlyBeaconState, len(params.StateHierarchyExponents()))
|
||||
}
|
||||
@@ -1,234 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/hdiff"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
var (
|
||||
offsetKey = []byte("offset")
|
||||
ErrSlotBeforeOffset = errors.New("slot is before root offset")
|
||||
)
|
||||
|
||||
func makeKey(level int, slot uint64) []byte {
|
||||
buf := make([]byte, 16)
|
||||
buf[0] = byte(level)
|
||||
binary.LittleEndian.PutUint64(buf[1:], slot)
|
||||
return buf
|
||||
}
|
||||
|
||||
func (s *Store) getAnchorState(offset uint64, lvl int, slot primitives.Slot) (anchor state.ReadOnlyBeaconState, err error) {
|
||||
if lvl <= 0 || lvl >= len(params.StateHierarchyExponents()) {
|
||||
return nil, errors.New("invalid value for level")
|
||||
}
|
||||
|
||||
relSlot := uint64(slot) - offset
|
||||
prevExp := params.StateHierarchyExponents()[lvl-1]
|
||||
span := math.PowerOf2(prevExp)
|
||||
anchorSlot := primitives.Slot((relSlot / span * span) + offset)
|
||||
|
||||
// anchorLvl can be [0, lvl-1]
|
||||
anchorLvl := computeLevel(offset, anchorSlot)
|
||||
if anchorLvl == -1 {
|
||||
return nil, errors.New("could not compute anchor level")
|
||||
}
|
||||
|
||||
// Check if we have the anchor in cache.
|
||||
anchor = s.stateDiffCache.getAnchor(anchorLvl)
|
||||
if anchor != nil {
|
||||
return anchor, nil
|
||||
}
|
||||
|
||||
// If not, load it from the database.
|
||||
anchor, err = s.stateByDiff(context.Background(), anchorSlot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Save it in the cache.
|
||||
err = s.stateDiffCache.setAnchor(anchorLvl, anchor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return anchor, nil
|
||||
}
|
||||
|
||||
// computeLevel computes the level in the diff tree. Returns -1 in case slot should not be in tree.
|
||||
func computeLevel(offset uint64, slot primitives.Slot) int {
|
||||
rel := uint64(slot) - offset
|
||||
for i, exp := range params.StateHierarchyExponents() {
|
||||
span := math.PowerOf2(exp)
|
||||
if rel%span == 0 {
|
||||
return i
|
||||
}
|
||||
}
|
||||
// If rel isn’t on any of the boundaries, we should ignore saving it.
|
||||
return -1
|
||||
}
|
||||
|
||||
func (s *Store) setOffset(slot primitives.Slot) error {
|
||||
err := s.db.Update(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bbolt.ErrBucketNotFound
|
||||
}
|
||||
|
||||
offsetBytes := bucket.Get(offsetKey)
|
||||
if offsetBytes != nil {
|
||||
return fmt.Errorf("offset already set to %d", binary.LittleEndian.Uint64(offsetBytes))
|
||||
}
|
||||
|
||||
offsetBytes = make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(offsetBytes, uint64(slot))
|
||||
if err := bucket.Put(offsetKey, offsetBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Save the offset in the cache.
|
||||
s.stateDiffCache.setOffset(uint64(slot))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) getOffset() uint64 {
|
||||
return s.stateDiffCache.getOffset()
|
||||
}
|
||||
|
||||
func keyForSnapshot(v int) []byte {
|
||||
switch v {
|
||||
case version.Fulu:
|
||||
return fuluKey
|
||||
case version.Electra:
|
||||
return ElectraKey
|
||||
case version.Deneb:
|
||||
return denebKey
|
||||
case version.Capella:
|
||||
return capellaKey
|
||||
case version.Bellatrix:
|
||||
return bellatrixKey
|
||||
case version.Altair:
|
||||
return altairKey
|
||||
default:
|
||||
// Phase0
|
||||
return []byte{}
|
||||
}
|
||||
}
|
||||
|
||||
func addKey(v int, bytes []byte) ([]byte, error) {
|
||||
key := keyForSnapshot(v)
|
||||
enc := make([]byte, len(key)+len(bytes))
|
||||
copy(enc, key)
|
||||
copy(enc[len(key):], bytes)
|
||||
return enc, nil
|
||||
}
|
||||
|
||||
func (s *Store) decodeStateSnapshot(enc []byte) (state.BeaconState, error) {
|
||||
switch {
|
||||
case hasFuluKey(enc):
|
||||
var fuluState ethpb.BeaconStateFulu
|
||||
if err := fuluState.UnmarshalSSZ(enc[len(ElectraKey):]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return state_native.InitializeFromProtoUnsafeFulu(&fuluState)
|
||||
case HasElectraKey(enc):
|
||||
var electraState ethpb.BeaconStateElectra
|
||||
if err := electraState.UnmarshalSSZ(enc[len(ElectraKey):]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return state_native.InitializeFromProtoUnsafeElectra(&electraState)
|
||||
case hasDenebKey(enc):
|
||||
var denebState ethpb.BeaconStateDeneb
|
||||
if err := denebState.UnmarshalSSZ(enc[len(denebKey):]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return state_native.InitializeFromProtoUnsafeDeneb(&denebState)
|
||||
case hasCapellaKey(enc):
|
||||
var capellaState ethpb.BeaconStateCapella
|
||||
if err := capellaState.UnmarshalSSZ(enc[len(capellaKey):]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return state_native.InitializeFromProtoUnsafeCapella(&capellaState)
|
||||
case hasBellatrixKey(enc):
|
||||
var bellatrixState ethpb.BeaconStateBellatrix
|
||||
if err := bellatrixState.UnmarshalSSZ(enc[len(bellatrixKey):]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return state_native.InitializeFromProtoUnsafeBellatrix(&bellatrixState)
|
||||
case hasAltairKey(enc):
|
||||
var altairState ethpb.BeaconStateAltair
|
||||
if err := altairState.UnmarshalSSZ(enc[len(altairKey):]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return state_native.InitializeFromProtoUnsafeAltair(&altairState)
|
||||
default:
|
||||
var phase0State ethpb.BeaconState
|
||||
if err := phase0State.UnmarshalSSZ(enc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return state_native.InitializeFromProtoUnsafePhase0(&phase0State)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Store) getBaseAndDiffChain(offset uint64, slot primitives.Slot) (state.BeaconState, []hdiff.HdiffBytes, error) {
|
||||
rel := uint64(slot) - offset
|
||||
lvl := computeLevel(offset, slot)
|
||||
if lvl == -1 {
|
||||
return nil, nil, errors.New("slot not in tree")
|
||||
}
|
||||
|
||||
exponents := params.StateHierarchyExponents()
|
||||
|
||||
baseSpan := math.PowerOf2(exponents[0])
|
||||
baseAnchorSlot := (rel / baseSpan * baseSpan) + offset
|
||||
|
||||
var diffChainIndices []uint64
|
||||
for i := 1; i <= lvl; i++ {
|
||||
span := math.PowerOf2(exponents[i])
|
||||
diffSlot := rel / span * span
|
||||
if diffSlot == baseAnchorSlot {
|
||||
continue
|
||||
}
|
||||
diffChainIndices = appendUnique(diffChainIndices, diffSlot+offset)
|
||||
}
|
||||
|
||||
baseSnapshot, err := s.getFullSnapshot(baseAnchorSlot)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
diffChain := make([]hdiff.HdiffBytes, 0, len(diffChainIndices))
|
||||
for _, diffSlot := range diffChainIndices {
|
||||
diff, err := s.getDiff(computeLevel(offset, primitives.Slot(diffSlot)), diffSlot)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
diffChain = append(diffChain, diff)
|
||||
}
|
||||
|
||||
return baseSnapshot, diffChain, nil
|
||||
}
|
||||
|
||||
func appendUnique(s []uint64, v uint64) []uint64 {
|
||||
for _, x := range s {
|
||||
if x == v {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return append(s, v)
|
||||
}
|
||||
@@ -1,577 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
func TestStateDiff_LoadOrInitOffset(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
err := setOffsetInDB(db, 10)
|
||||
require.NoError(t, err)
|
||||
offset := db.getOffset()
|
||||
require.Equal(t, uint64(10), offset)
|
||||
|
||||
err = db.setOffset(10)
|
||||
require.ErrorContains(t, "offset already set", err)
|
||||
offset = db.getOffset()
|
||||
require.Equal(t, uint64(10), offset)
|
||||
}
|
||||
|
||||
func TestStateDiff_ComputeLevel(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
err := setOffsetInDB(db, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
offset := db.getOffset()
|
||||
|
||||
// 2 ** 21
|
||||
lvl := computeLevel(offset, primitives.Slot(math.PowerOf2(21)))
|
||||
require.Equal(t, 0, lvl)
|
||||
|
||||
// 2 ** 21 * 3
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(21)*3))
|
||||
require.Equal(t, 0, lvl)
|
||||
|
||||
// 2 ** 18
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(18)))
|
||||
require.Equal(t, 1, lvl)
|
||||
|
||||
// 2 ** 18 * 3
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(18)*3))
|
||||
require.Equal(t, 1, lvl)
|
||||
|
||||
// 2 ** 16
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(16)))
|
||||
require.Equal(t, 2, lvl)
|
||||
|
||||
// 2 ** 16 * 3
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(16)*3))
|
||||
require.Equal(t, 2, lvl)
|
||||
|
||||
// 2 ** 13
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(13)))
|
||||
require.Equal(t, 3, lvl)
|
||||
|
||||
// 2 ** 13 * 3
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(13)*3))
|
||||
require.Equal(t, 3, lvl)
|
||||
|
||||
// 2 ** 11
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(11)))
|
||||
require.Equal(t, 4, lvl)
|
||||
|
||||
// 2 ** 11 * 3
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(11)*3))
|
||||
require.Equal(t, 4, lvl)
|
||||
|
||||
// 2 ** 9
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(9)))
|
||||
require.Equal(t, 5, lvl)
|
||||
|
||||
// 2 ** 9 * 3
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(9)*3))
|
||||
require.Equal(t, 5, lvl)
|
||||
|
||||
// 2 ** 5
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(5)))
|
||||
require.Equal(t, 6, lvl)
|
||||
|
||||
// 2 ** 5 * 3
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(5)*3))
|
||||
require.Equal(t, 6, lvl)
|
||||
|
||||
// 2 ** 7
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(7)))
|
||||
require.Equal(t, 6, lvl)
|
||||
|
||||
// 2 ** 5 + 1
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(5)+1))
|
||||
require.Equal(t, -1, lvl)
|
||||
|
||||
// 2 ** 5 + 16
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(5)+16))
|
||||
require.Equal(t, -1, lvl)
|
||||
|
||||
// 2 ** 5 + 32
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(5)+32))
|
||||
require.Equal(t, 6, lvl)
|
||||
|
||||
}
|
||||
|
||||
func TestStateDiff_SaveFullSnapshot(t *testing.T) {
|
||||
// test for every version
|
||||
for v := 0; v < 6; v++ {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
// Create state with slot 0
|
||||
st, enc := createState(t, 0, v)
|
||||
|
||||
err := setOffsetInDB(db, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.db.View(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bbolt.ErrBucketNotFound
|
||||
}
|
||||
s := bucket.Get(makeKey(0, uint64(0)))
|
||||
if s == nil {
|
||||
return bbolt.ErrIncompatibleValue
|
||||
}
|
||||
require.DeepSSZEqual(t, enc, s)
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateDiff_SaveAndReadFullSnapshot(t *testing.T) {
|
||||
// test for every version
|
||||
for v := 0; v < 6; v++ {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
st, _ := createState(t, 0, v)
|
||||
|
||||
err := setOffsetInDB(db, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
readSt, err := db.stateByDiff(context.Background(), 0)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, readSt)
|
||||
|
||||
stSSZ, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
readStSSZ, err := readSt.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, stSSZ, readStSSZ)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateDiff_SaveDiff(t *testing.T) {
|
||||
// test for every version
|
||||
for v := 0; v < 6; v++ {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
// Create state with slot 2**21
|
||||
slot := primitives.Slot(math.PowerOf2(21))
|
||||
st, enc := createState(t, slot, v)
|
||||
|
||||
err := setOffsetInDB(db, uint64(slot))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.db.View(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bbolt.ErrBucketNotFound
|
||||
}
|
||||
s := bucket.Get(makeKey(0, uint64(slot)))
|
||||
if s == nil {
|
||||
return bbolt.ErrIncompatibleValue
|
||||
}
|
||||
require.DeepSSZEqual(t, enc, s)
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// create state with slot 2**18 (+2**21)
|
||||
slot = primitives.Slot(math.PowerOf2(18) + math.PowerOf2(21))
|
||||
st, _ = createState(t, slot, v)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
key := makeKey(1, uint64(slot))
|
||||
err = db.db.View(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bbolt.ErrBucketNotFound
|
||||
}
|
||||
buf := append(key, "_s"...)
|
||||
s := bucket.Get(buf)
|
||||
if s == nil {
|
||||
return bbolt.ErrIncompatibleValue
|
||||
}
|
||||
buf = append(key, "_v"...)
|
||||
v := bucket.Get(buf)
|
||||
if v == nil {
|
||||
return bbolt.ErrIncompatibleValue
|
||||
}
|
||||
buf = append(key, "_b"...)
|
||||
b := bucket.Get(buf)
|
||||
if b == nil {
|
||||
return bbolt.ErrIncompatibleValue
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateDiff_SaveAndReadDiff(t *testing.T) {
|
||||
// test for every version
|
||||
for v := 0; v < 6; v++ {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
st, _ := createState(t, 0, v)
|
||||
|
||||
err := setOffsetInDB(db, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
slot := primitives.Slot(math.PowerOf2(5))
|
||||
st, _ = createState(t, slot, v)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
readSt, err := db.stateByDiff(context.Background(), slot)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, readSt)
|
||||
|
||||
stSSZ, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
readStSSZ, err := readSt.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, stSSZ, readStSSZ)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateDiff_SaveAndReadDiff_MultipleLevels(t *testing.T) {
|
||||
// test for every version
|
||||
for v := 0; v < 6; v++ {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
st, _ := createState(t, 0, v)
|
||||
|
||||
err := setOffsetInDB(db, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
slot := primitives.Slot(math.PowerOf2(11))
|
||||
st, _ = createState(t, slot, v)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
readSt, err := db.stateByDiff(context.Background(), slot)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, readSt)
|
||||
|
||||
stSSZ, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
readStSSZ, err := readSt.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, stSSZ, readStSSZ)
|
||||
|
||||
slot = primitives.Slot(math.PowerOf2(11) + math.PowerOf2(9))
|
||||
st, _ = createState(t, slot, v)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
readSt, err = db.stateByDiff(context.Background(), slot)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, readSt)
|
||||
|
||||
stSSZ, err = st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
readStSSZ, err = readSt.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, stSSZ, readStSSZ)
|
||||
|
||||
slot = primitives.Slot(math.PowerOf2(11) + math.PowerOf2(9) + math.PowerOf2(5))
|
||||
st, _ = createState(t, slot, v)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
readSt, err = db.stateByDiff(context.Background(), slot)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, readSt)
|
||||
|
||||
stSSZ, err = st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
readStSSZ, err = readSt.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, stSSZ, readStSSZ)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateDiff_SaveAndReadDiffForkTransition(t *testing.T) {
|
||||
// test for every version
|
||||
for v := 0; v < 5; v++ {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
st, _ := createState(t, 0, v)
|
||||
|
||||
err := setOffsetInDB(db, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
slot := primitives.Slot(math.PowerOf2(5))
|
||||
st, _ = createState(t, slot, v+1)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
readSt, err := db.stateByDiff(context.Background(), slot)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, readSt)
|
||||
|
||||
stSSZ, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
readStSSZ, err := readSt.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, stSSZ, readStSSZ)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateDiff_OffsetCache(t *testing.T) {
|
||||
// test for slot numbers 0 and 1 for every version
|
||||
for slotNum := 0; slotNum < 2; slotNum++ {
|
||||
// test for every version
|
||||
for v := 0; v < 6; v++ {
|
||||
t.Run(fmt.Sprintf("slotNum=%d,%s", slotNum, version.String(v)), func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
slot := primitives.Slot(slotNum)
|
||||
err := setOffsetInDB(db, uint64(slot))
|
||||
require.NoError(t, err)
|
||||
st, _ := createState(t, slot, v)
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
offset := db.stateDiffCache.getOffset()
|
||||
require.Equal(t, uint64(slotNum), offset)
|
||||
|
||||
slot2 := primitives.Slot(uint64(slotNum) + math.PowerOf2(params.StateHierarchyExponents()[0]))
|
||||
st2, _ := createState(t, slot2, v)
|
||||
err = db.saveStateByDiff(context.Background(), st2)
|
||||
require.NoError(t, err)
|
||||
|
||||
offset = db.stateDiffCache.getOffset()
|
||||
require.Equal(t, uint64(slot), offset)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateDiff_AnchorCache(t *testing.T) {
|
||||
// test for every version
|
||||
for v := 0; v < 6; v++ {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
exponents := params.StateHierarchyExponents()
|
||||
localCache := make([]state.ReadOnlyBeaconState, len(exponents)-1)
|
||||
db := setupDB(t)
|
||||
err := setOffsetInDB(db, 0) // lvl 0
|
||||
require.NoError(t, err)
|
||||
|
||||
// at first the cache should be empty
|
||||
for i := 0; i < len(params.StateHierarchyExponents()); i++ {
|
||||
anchor := db.stateDiffCache.getAnchor(i)
|
||||
require.IsNil(t, anchor)
|
||||
}
|
||||
|
||||
// add level 0
|
||||
slot := primitives.Slot(0) // offset 0 is already set
|
||||
st, _ := createState(t, slot, v)
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
localCache[0] = st
|
||||
|
||||
// level 0 should be the same
|
||||
require.DeepEqual(t, localCache[0], db.stateDiffCache.getAnchor(0))
|
||||
|
||||
// rest of the cache should be nil
|
||||
for i := 1; i < len(exponents)-1; i++ {
|
||||
require.IsNil(t, db.stateDiffCache.getAnchor(i))
|
||||
}
|
||||
|
||||
// skip last level as it does not get cached
|
||||
for i := len(exponents) - 2; i > 0; i-- {
|
||||
slot = primitives.Slot(math.PowerOf2(exponents[i]))
|
||||
st, _ := createState(t, slot, v)
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
localCache[i] = st
|
||||
|
||||
// anchor cache must match local cache
|
||||
for i := 0; i < len(exponents)-1; i++ {
|
||||
if localCache[i] == nil {
|
||||
require.IsNil(t, db.stateDiffCache.getAnchor(i))
|
||||
continue
|
||||
}
|
||||
localSSZ, err := localCache[i].MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
anchorSSZ, err := db.stateDiffCache.getAnchor(i).MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, localSSZ, anchorSSZ)
|
||||
}
|
||||
}
|
||||
|
||||
// moving to a new tree should invalidate the cache except for level 0
|
||||
twoTo21 := math.PowerOf2(21)
|
||||
slot = primitives.Slot(twoTo21)
|
||||
st, _ = createState(t, slot, v)
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
localCache = make([]state.ReadOnlyBeaconState, len(exponents)-1)
|
||||
localCache[0] = st
|
||||
|
||||
// level 0 should be the same
|
||||
require.DeepEqual(t, localCache[0], db.stateDiffCache.getAnchor(0))
|
||||
|
||||
// rest of the cache should be nil
|
||||
for i := 1; i < len(exponents)-1; i++ {
|
||||
require.IsNil(t, db.stateDiffCache.getAnchor(i))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func createState(t *testing.T, slot primitives.Slot, v int) (state.ReadOnlyBeaconState, []byte) {
|
||||
p := params.BeaconConfig()
|
||||
var st state.BeaconState
|
||||
var err error
|
||||
switch v {
|
||||
case version.Altair:
|
||||
st, err = util.NewBeaconStateAltair()
|
||||
require.NoError(t, err)
|
||||
err = st.SetFork(ðpb.Fork{
|
||||
PreviousVersion: p.GenesisForkVersion,
|
||||
CurrentVersion: p.AltairForkVersion,
|
||||
Epoch: p.AltairForkEpoch,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
case version.Bellatrix:
|
||||
st, err = util.NewBeaconStateBellatrix()
|
||||
require.NoError(t, err)
|
||||
err = st.SetFork(ðpb.Fork{
|
||||
PreviousVersion: p.AltairForkVersion,
|
||||
CurrentVersion: p.BellatrixForkVersion,
|
||||
Epoch: p.BellatrixForkEpoch,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
case version.Capella:
|
||||
st, err = util.NewBeaconStateCapella()
|
||||
require.NoError(t, err)
|
||||
err = st.SetFork(ðpb.Fork{
|
||||
PreviousVersion: p.BellatrixForkVersion,
|
||||
CurrentVersion: p.CapellaForkVersion,
|
||||
Epoch: p.CapellaForkEpoch,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
case version.Deneb:
|
||||
st, err = util.NewBeaconStateDeneb()
|
||||
require.NoError(t, err)
|
||||
err = st.SetFork(ðpb.Fork{
|
||||
PreviousVersion: p.CapellaForkVersion,
|
||||
CurrentVersion: p.DenebForkVersion,
|
||||
Epoch: p.DenebForkEpoch,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
case version.Electra:
|
||||
st, err = util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
err = st.SetFork(ðpb.Fork{
|
||||
PreviousVersion: p.DenebForkVersion,
|
||||
CurrentVersion: p.ElectraForkVersion,
|
||||
Epoch: p.ElectraForkEpoch,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
default:
|
||||
st, err = util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
err = st.SetFork(ðpb.Fork{
|
||||
PreviousVersion: p.GenesisForkVersion,
|
||||
CurrentVersion: p.GenesisForkVersion,
|
||||
Epoch: 0,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err = st.SetSlot(slot)
|
||||
require.NoError(t, err)
|
||||
slashings := make([]uint64, 8192)
|
||||
slashings[0] = uint64(rand.Intn(10))
|
||||
err = st.SetSlashings(slashings)
|
||||
require.NoError(t, err)
|
||||
stssz, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
enc, err := addKey(v, stssz)
|
||||
require.NoError(t, err)
|
||||
return st, enc
|
||||
}
|
||||
|
||||
func setOffsetInDB(s *Store, offset uint64) error {
|
||||
err := s.db.Update(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bbolt.ErrBucketNotFound
|
||||
}
|
||||
|
||||
offsetBytes := bucket.Get(offsetKey)
|
||||
if offsetBytes != nil {
|
||||
return fmt.Errorf("offset already set to %d", binary.LittleEndian.Uint64(offsetBytes))
|
||||
}
|
||||
|
||||
offsetBytes = make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(offsetBytes, offset)
|
||||
if err := bucket.Put(offsetKey, offsetBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sdCache, err := newStateDiffCache(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.stateDiffCache = sdCache
|
||||
return nil
|
||||
}
|
||||
@@ -22,52 +22,50 @@ const (
|
||||
SchemaVersionV3 = "/3"
|
||||
)
|
||||
|
||||
const (
|
||||
// Specifies the protocol prefix for all our Req/Resp topics.
|
||||
protocolPrefix = "/eth2/beacon_chain/req"
|
||||
// Specifies the protocol prefix for all our Req/Resp topics.
|
||||
const protocolPrefix = "/eth2/beacon_chain/req"
|
||||
|
||||
// StatusMessageName specifies the name for the status message topic.
|
||||
StatusMessageName = "/status"
|
||||
// StatusMessageName specifies the name for the status message topic.
|
||||
const StatusMessageName = "/status"
|
||||
|
||||
// GoodbyeMessageName specifies the name for the goodbye message topic.
|
||||
GoodbyeMessageName = "/goodbye"
|
||||
// GoodbyeMessageName specifies the name for the goodbye message topic.
|
||||
const GoodbyeMessageName = "/goodbye"
|
||||
|
||||
// BeaconBlocksByRangeMessageName specifies the name for the beacon blocks by range message topic.
|
||||
BeaconBlocksByRangeMessageName = "/beacon_blocks_by_range"
|
||||
// BeaconBlocksByRangeMessageName specifies the name for the beacon blocks by range message topic.
|
||||
const BeaconBlocksByRangeMessageName = "/beacon_blocks_by_range"
|
||||
|
||||
// BeaconBlocksByRootsMessageName specifies the name for the beacon blocks by root message topic.
|
||||
BeaconBlocksByRootsMessageName = "/beacon_blocks_by_root"
|
||||
// BeaconBlocksByRootsMessageName specifies the name for the beacon blocks by root message topic.
|
||||
const BeaconBlocksByRootsMessageName = "/beacon_blocks_by_root"
|
||||
|
||||
// PingMessageName Specifies the name for the ping message topic.
|
||||
PingMessageName = "/ping"
|
||||
// PingMessageName Specifies the name for the ping message topic.
|
||||
const PingMessageName = "/ping"
|
||||
|
||||
// MetadataMessageName specifies the name for the metadata message topic.
|
||||
MetadataMessageName = "/metadata"
|
||||
// MetadataMessageName specifies the name for the metadata message topic.
|
||||
const MetadataMessageName = "/metadata"
|
||||
|
||||
// BlobSidecarsByRangeName is the name for the BlobSidecarsByRange v1 message topic.
|
||||
BlobSidecarsByRangeName = "/blob_sidecars_by_range"
|
||||
// BlobSidecarsByRangeName is the name for the BlobSidecarsByRange v1 message topic.
|
||||
const BlobSidecarsByRangeName = "/blob_sidecars_by_range"
|
||||
|
||||
// BlobSidecarsByRootName is the name for the BlobSidecarsByRoot v1 message topic.
|
||||
BlobSidecarsByRootName = "/blob_sidecars_by_root"
|
||||
// BlobSidecarsByRootName is the name for the BlobSidecarsByRoot v1 message topic.
|
||||
const BlobSidecarsByRootName = "/blob_sidecars_by_root"
|
||||
|
||||
// LightClientBootstrapName is the name for the LightClientBootstrap message topic,
|
||||
LightClientBootstrapName = "/light_client_bootstrap"
|
||||
// LightClientBootstrapName is the name for the LightClientBootstrap message topic,
|
||||
const LightClientBootstrapName = "/light_client_bootstrap"
|
||||
|
||||
// LightClientUpdatesByRangeName is the name for the LightClientUpdatesByRange topic.
|
||||
LightClientUpdatesByRangeName = "/light_client_updates_by_range"
|
||||
// LightClientUpdatesByRangeName is the name for the LightClientUpdatesByRange topic.
|
||||
const LightClientUpdatesByRangeName = "/light_client_updates_by_range"
|
||||
|
||||
// LightClientFinalityUpdateName is the name for the LightClientFinalityUpdate topic.
|
||||
LightClientFinalityUpdateName = "/light_client_finality_update"
|
||||
// LightClientFinalityUpdateName is the name for the LightClientFinalityUpdate topic.
|
||||
const LightClientFinalityUpdateName = "/light_client_finality_update"
|
||||
|
||||
// LightClientOptimisticUpdateName is the name for the LightClientOptimisticUpdate topic.
|
||||
LightClientOptimisticUpdateName = "/light_client_optimistic_update"
|
||||
// LightClientOptimisticUpdateName is the name for the LightClientOptimisticUpdate topic.
|
||||
const LightClientOptimisticUpdateName = "/light_client_optimistic_update"
|
||||
|
||||
// DataColumnSidecarsByRootName is the name for the DataColumnSidecarsByRoot v1 message topic.
|
||||
DataColumnSidecarsByRootName = "/data_column_sidecars_by_root"
|
||||
// DataColumnSidecarsByRootName is the name for the DataColumnSidecarsByRoot v1 message topic.
|
||||
const DataColumnSidecarsByRootName = "/data_column_sidecars_by_root"
|
||||
|
||||
// DataColumnSidecarsByRangeName is the name for the DataColumnSidecarsByRange v1 message topic.
|
||||
DataColumnSidecarsByRangeName = "/data_column_sidecars_by_range"
|
||||
)
|
||||
// DataColumnSidecarsByRangeName is the name for the DataColumnSidecarsByRange v1 message topic.
|
||||
const DataColumnSidecarsByRangeName = "/data_column_sidecars_by_range"
|
||||
|
||||
const (
|
||||
// V1 RPC Topics
|
||||
@@ -103,9 +101,6 @@ const (
|
||||
// RPCDataColumnSidecarsByRootTopicV1 is a topic for requesting data column sidecars by their block root.
|
||||
// /eth2/beacon_chain/req/data_column_sidecars_by_root/1 - New in Fulu.
|
||||
RPCDataColumnSidecarsByRootTopicV1 = protocolPrefix + DataColumnSidecarsByRootName + SchemaVersionV1
|
||||
// RPCDataColumnSidecarsByRangeTopicV1 is a topic for requesting data column sidecars by their slot.
|
||||
// /eth2/beacon_chain/req/data_column_sidecars_by_range/1 - New in Fulu.
|
||||
RPCDataColumnSidecarsByRangeTopicV1 = protocolPrefix + DataColumnSidecarsByRangeName + SchemaVersionV1
|
||||
|
||||
// V2 RPC Topics
|
||||
// RPCBlocksByRangeTopicV2 defines v2 the topic for the blocks by range rpc method.
|
||||
@@ -126,103 +121,91 @@ const (
|
||||
)
|
||||
|
||||
// RPCTopicMappings map the base message type to the rpc request.
|
||||
var (
|
||||
RPCTopicMappings = map[string]interface{}{
|
||||
// RPC Status Message
|
||||
RPCStatusTopicV1: new(pb.Status),
|
||||
var RPCTopicMappings = map[string]interface{}{
|
||||
// RPC Status Message
|
||||
RPCStatusTopicV1: new(pb.Status),
|
||||
// RPC Goodbye Message
|
||||
RPCGoodByeTopicV1: new(primitives.SSZUint64),
|
||||
// RPC Block By Range Message
|
||||
RPCBlocksByRangeTopicV1: new(pb.BeaconBlocksByRangeRequest),
|
||||
RPCBlocksByRangeTopicV2: new(pb.BeaconBlocksByRangeRequest),
|
||||
// RPC Block By Root Message
|
||||
RPCBlocksByRootTopicV1: new(p2ptypes.BeaconBlockByRootsReq),
|
||||
RPCBlocksByRootTopicV2: new(p2ptypes.BeaconBlockByRootsReq),
|
||||
// RPC Ping Message
|
||||
RPCPingTopicV1: new(primitives.SSZUint64),
|
||||
// RPC Metadata Message
|
||||
RPCMetaDataTopicV1: new(interface{}),
|
||||
RPCMetaDataTopicV2: new(interface{}),
|
||||
RPCMetaDataTopicV3: new(interface{}),
|
||||
// BlobSidecarsByRange v1 Message
|
||||
RPCBlobSidecarsByRangeTopicV1: new(pb.BlobSidecarsByRangeRequest),
|
||||
// BlobSidecarsByRoot v1 Message
|
||||
RPCBlobSidecarsByRootTopicV1: new(p2ptypes.BlobSidecarsByRootReq),
|
||||
|
||||
// RPC Goodbye Message
|
||||
RPCGoodByeTopicV1: new(primitives.SSZUint64),
|
||||
// Light client
|
||||
RPCLightClientBootstrapTopicV1: new([fieldparams.RootLength]byte),
|
||||
RPCLightClientUpdatesByRangeTopicV1: new(pb.LightClientUpdatesByRangeRequest),
|
||||
RPCLightClientFinalityUpdateTopicV1: new(interface{}),
|
||||
RPCLightClientOptimisticUpdateTopicV1: new(interface{}),
|
||||
|
||||
// RPC Block By Range Message
|
||||
RPCBlocksByRangeTopicV1: new(pb.BeaconBlocksByRangeRequest),
|
||||
RPCBlocksByRangeTopicV2: new(pb.BeaconBlocksByRangeRequest),
|
||||
// DataColumnSidecarsByRoot v1 Message
|
||||
RPCDataColumnSidecarsByRootTopicV1: new(p2ptypes.DataColumnsByRootIdentifiers),
|
||||
}
|
||||
|
||||
// RPC Block By Root Message
|
||||
RPCBlocksByRootTopicV1: new(p2ptypes.BeaconBlockByRootsReq),
|
||||
RPCBlocksByRootTopicV2: new(p2ptypes.BeaconBlockByRootsReq),
|
||||
// Maps all registered protocol prefixes.
|
||||
var protocolMapping = map[string]bool{
|
||||
protocolPrefix: true,
|
||||
}
|
||||
|
||||
// RPC Ping Message
|
||||
RPCPingTopicV1: new(primitives.SSZUint64),
|
||||
// Maps all the protocol message names for the different rpc
|
||||
// topics.
|
||||
var messageMapping = map[string]bool{
|
||||
StatusMessageName: true,
|
||||
GoodbyeMessageName: true,
|
||||
BeaconBlocksByRangeMessageName: true,
|
||||
BeaconBlocksByRootsMessageName: true,
|
||||
PingMessageName: true,
|
||||
MetadataMessageName: true,
|
||||
BlobSidecarsByRangeName: true,
|
||||
BlobSidecarsByRootName: true,
|
||||
LightClientBootstrapName: true,
|
||||
LightClientUpdatesByRangeName: true,
|
||||
LightClientFinalityUpdateName: true,
|
||||
LightClientOptimisticUpdateName: true,
|
||||
DataColumnSidecarsByRootName: true,
|
||||
}
|
||||
|
||||
// RPC Metadata Message
|
||||
RPCMetaDataTopicV1: new(interface{}),
|
||||
RPCMetaDataTopicV2: new(interface{}),
|
||||
RPCMetaDataTopicV3: new(interface{}),
|
||||
// Maps all the RPC messages which are to updated in altair.
|
||||
var altairMapping = map[string]bool{
|
||||
BeaconBlocksByRangeMessageName: true,
|
||||
BeaconBlocksByRootsMessageName: true,
|
||||
MetadataMessageName: true,
|
||||
}
|
||||
|
||||
// BlobSidecarsByRange v1 Message
|
||||
RPCBlobSidecarsByRangeTopicV1: new(pb.BlobSidecarsByRangeRequest),
|
||||
// Maps all the RPC messages which are to updated in fulu.
|
||||
var fuluMapping = map[string]bool{
|
||||
MetadataMessageName: true,
|
||||
}
|
||||
|
||||
// BlobSidecarsByRoot v1 Message
|
||||
RPCBlobSidecarsByRootTopicV1: new(p2ptypes.BlobSidecarsByRootReq),
|
||||
var versionMapping = map[string]bool{
|
||||
SchemaVersionV1: true,
|
||||
SchemaVersionV2: true,
|
||||
SchemaVersionV3: true,
|
||||
}
|
||||
|
||||
// Light client
|
||||
RPCLightClientBootstrapTopicV1: new([fieldparams.RootLength]byte),
|
||||
RPCLightClientUpdatesByRangeTopicV1: new(pb.LightClientUpdatesByRangeRequest),
|
||||
RPCLightClientFinalityUpdateTopicV1: new(interface{}),
|
||||
RPCLightClientOptimisticUpdateTopicV1: new(interface{}),
|
||||
|
||||
// DataColumnSidecarsByRange v1 Message
|
||||
RPCDataColumnSidecarsByRangeTopicV1: new(pb.DataColumnSidecarsByRangeRequest),
|
||||
|
||||
// DataColumnSidecarsByRoot v1 Message
|
||||
RPCDataColumnSidecarsByRootTopicV1: new(p2ptypes.DataColumnsByRootIdentifiers),
|
||||
}
|
||||
|
||||
// Maps all registered protocol prefixes.
|
||||
protocolMapping = map[string]bool{
|
||||
protocolPrefix: true,
|
||||
}
|
||||
|
||||
// Maps all the protocol message names for the different rpc topics.
|
||||
messageMapping = map[string]bool{
|
||||
StatusMessageName: true,
|
||||
GoodbyeMessageName: true,
|
||||
BeaconBlocksByRangeMessageName: true,
|
||||
BeaconBlocksByRootsMessageName: true,
|
||||
PingMessageName: true,
|
||||
MetadataMessageName: true,
|
||||
BlobSidecarsByRangeName: true,
|
||||
BlobSidecarsByRootName: true,
|
||||
LightClientBootstrapName: true,
|
||||
LightClientUpdatesByRangeName: true,
|
||||
LightClientFinalityUpdateName: true,
|
||||
LightClientOptimisticUpdateName: true,
|
||||
DataColumnSidecarsByRootName: true,
|
||||
DataColumnSidecarsByRangeName: true,
|
||||
}
|
||||
|
||||
// Maps all the RPC messages which are to updated in altair.
|
||||
altairMapping = map[string]string{
|
||||
BeaconBlocksByRangeMessageName: SchemaVersionV2,
|
||||
BeaconBlocksByRootsMessageName: SchemaVersionV2,
|
||||
MetadataMessageName: SchemaVersionV2,
|
||||
}
|
||||
|
||||
// Maps all the RPC messages which are to updated in fulu.
|
||||
fuluMapping = map[string]string{
|
||||
MetadataMessageName: SchemaVersionV3,
|
||||
}
|
||||
|
||||
versionMapping = map[string]bool{
|
||||
SchemaVersionV1: true,
|
||||
SchemaVersionV2: true,
|
||||
SchemaVersionV3: true,
|
||||
}
|
||||
|
||||
// OmitContextBytesV1 keeps track of which RPC methods do not write context bytes in their v1 incarnations.
|
||||
// Phase0 did not have the notion of context bytes, which prefix wire-encoded values with a [4]byte identifier
|
||||
// to convey the schema for the receiver to use. These RPCs had a version bump to V2 when the context byte encoding
|
||||
// was introduced. For other RPC methods, context bytes are always required.
|
||||
OmitContextBytesV1 = map[string]bool{
|
||||
StatusMessageName: true,
|
||||
GoodbyeMessageName: true,
|
||||
BeaconBlocksByRangeMessageName: true,
|
||||
BeaconBlocksByRootsMessageName: true,
|
||||
PingMessageName: true,
|
||||
MetadataMessageName: true,
|
||||
}
|
||||
)
|
||||
// OmitContextBytesV1 keeps track of which RPC methods do not write context bytes in their v1 incarnations.
|
||||
// Phase0 did not have the notion of context bytes, which prefix wire-encoded values with a [4]byte identifier
|
||||
// to convey the schema for the receiver to use. These RPCs had a version bump to V2 when the context byte encoding
|
||||
// was introduced. For other RPC methods, context bytes are always required.
|
||||
var OmitContextBytesV1 = map[string]bool{
|
||||
StatusMessageName: true,
|
||||
GoodbyeMessageName: true,
|
||||
BeaconBlocksByRangeMessageName: true,
|
||||
BeaconBlocksByRootsMessageName: true,
|
||||
PingMessageName: true,
|
||||
MetadataMessageName: true,
|
||||
}
|
||||
|
||||
// VerifyTopicMapping verifies that the topic and its accompanying
|
||||
// message type is correct.
|
||||
@@ -344,17 +327,13 @@ func TopicFromMessage(msg string, epoch primitives.Epoch) (string, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
// Check if the message is to be updated in fulu.
|
||||
if epoch >= beaconConfig.FuluForkEpoch {
|
||||
if version, ok := fuluMapping[msg]; ok {
|
||||
return protocolPrefix + msg + version, nil
|
||||
}
|
||||
if epoch >= beaconConfig.FuluForkEpoch && fuluMapping[msg] {
|
||||
return protocolPrefix + msg + SchemaVersionV3, nil
|
||||
}
|
||||
|
||||
// Check if the message is to be updated in altair.
|
||||
if epoch >= beaconConfig.AltairForkEpoch {
|
||||
if version, ok := altairMapping[msg]; ok {
|
||||
return protocolPrefix + msg + version, nil
|
||||
}
|
||||
if epoch >= beaconConfig.AltairForkEpoch && altairMapping[msg] {
|
||||
return protocolPrefix + msg + SchemaVersionV2, nil
|
||||
}
|
||||
|
||||
return protocolPrefix + msg + SchemaVersionV1, nil
|
||||
|
||||
@@ -119,31 +119,50 @@ func TestTopicFromMessage_CorrectType(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("after altair fork but before fulu fork", func(t *testing.T) {
|
||||
// Not modified in altair fork.
|
||||
topic, err := TopicFromMessage(GoodbyeMessageName, altairForkEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/eth2/beacon_chain/req/goodbye/1", topic)
|
||||
for m := range messageMapping {
|
||||
topic, err := TopicFromMessage(m, altairForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Modified in altair fork.
|
||||
topic, err = TopicFromMessage(MetadataMessageName, altairForkEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/eth2/beacon_chain/req/metadata/2", topic)
|
||||
if altairMapping[m] {
|
||||
require.Equal(t, true, strings.Contains(topic, SchemaVersionV2))
|
||||
_, _, version, err := TopicDeconstructor(topic)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, SchemaVersionV2, version)
|
||||
continue
|
||||
}
|
||||
|
||||
require.Equal(t, true, strings.Contains(topic, SchemaVersionV1))
|
||||
_, _, version, err := TopicDeconstructor(topic)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, SchemaVersionV1, version)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("after fulu fork", func(t *testing.T) {
|
||||
// Not modified in any fork.
|
||||
topic, err := TopicFromMessage(GoodbyeMessageName, fuluForkEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/eth2/beacon_chain/req/goodbye/1", topic)
|
||||
for m := range messageMapping {
|
||||
topic, err := TopicFromMessage(m, fuluForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Modified in altair fork.
|
||||
topic, err = TopicFromMessage(BeaconBlocksByRangeMessageName, fuluForkEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/eth2/beacon_chain/req/beacon_blocks_by_range/2", topic)
|
||||
if fuluMapping[m] {
|
||||
require.Equal(t, true, strings.Contains(topic, SchemaVersionV3))
|
||||
_, _, version, err := TopicDeconstructor(topic)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, SchemaVersionV3, version)
|
||||
continue
|
||||
}
|
||||
|
||||
// Modified both in altair and fulu fork.
|
||||
topic, err = TopicFromMessage(MetadataMessageName, fuluForkEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/eth2/beacon_chain/req/metadata/3", topic)
|
||||
if altairMapping[m] {
|
||||
require.Equal(t, true, strings.Contains(topic, SchemaVersionV2))
|
||||
_, _, version, err := TopicDeconstructor(topic)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, SchemaVersionV2, version)
|
||||
continue
|
||||
}
|
||||
|
||||
require.Equal(t, true, strings.Contains(topic, SchemaVersionV1))
|
||||
_, _, version, err := TopicDeconstructor(topic)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, SchemaVersionV1, version)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -130,6 +130,7 @@ func (s *Service) rewardsEndpoints(blocker lookup.Blocker, stater lookup.Stater,
|
||||
name: namespace + ".BlockRewards",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.BlockRewards,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -140,6 +141,7 @@ func (s *Service) rewardsEndpoints(blocker lookup.Blocker, stater lookup.Stater,
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.AttestationRewards,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -150,6 +152,7 @@ func (s *Service) rewardsEndpoints(blocker lookup.Blocker, stater lookup.Stater,
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SyncCommitteeRewards,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -172,6 +175,7 @@ func (s *Service) builderEndpoints(stater lookup.Stater) []endpoint {
|
||||
name: namespace + ".ExpectedWithdrawals",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.ExpectedWithdrawals,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -194,6 +198,7 @@ func (s *Service) blobEndpoints(blocker lookup.Blocker) []endpoint {
|
||||
name: namespace + ".Blobs",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.Blobs,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -237,6 +242,7 @@ func (s *Service) validatorEndpoints(
|
||||
name: namespace + ".GetAggregateAttestation",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetAggregateAttestation,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -246,6 +252,7 @@ func (s *Service) validatorEndpoints(
|
||||
name: namespace + ".GetAggregateAttestationV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetAggregateAttestationV2,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -256,6 +263,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitContributionAndProofs,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -267,6 +275,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitAggregateAndProofs,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -277,6 +286,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitAggregateAndProofsV2,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -286,6 +296,7 @@ func (s *Service) validatorEndpoints(
|
||||
name: namespace + ".ProduceSyncCommitteeContribution",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.ProduceSyncCommitteeContribution,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -296,6 +307,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitSyncCommitteeSubscription,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -306,6 +318,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitBeaconCommitteeSubscription,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -315,6 +328,7 @@ func (s *Service) validatorEndpoints(
|
||||
name: namespace + ".GetAttestationData",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetAttestationData,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -325,6 +339,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.RegisterValidator,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -335,6 +350,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetAttesterDuties,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -344,6 +360,7 @@ func (s *Service) validatorEndpoints(
|
||||
name: namespace + ".GetProposerDuties",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetProposerDuties,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -354,6 +371,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetSyncCommitteeDuties,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -364,6 +382,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.PrepareBeaconProposer,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -374,6 +393,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetLiveness,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -383,6 +403,7 @@ func (s *Service) validatorEndpoints(
|
||||
name: namespace + ".ProduceBlockV3",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.ProduceBlockV3,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -429,6 +450,7 @@ func (s *Service) nodeEndpoints() []endpoint {
|
||||
name: namespace + ".GetSyncStatus",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetSyncStatus,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -438,6 +460,7 @@ func (s *Service) nodeEndpoints() []endpoint {
|
||||
name: namespace + ".GetIdentity",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetIdentity,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -447,6 +470,7 @@ func (s *Service) nodeEndpoints() []endpoint {
|
||||
name: namespace + ".GetPeer",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetPeer,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -456,6 +480,7 @@ func (s *Service) nodeEndpoints() []endpoint {
|
||||
name: namespace + ".GetPeers",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetPeers,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -465,6 +490,7 @@ func (s *Service) nodeEndpoints() []endpoint {
|
||||
name: namespace + ".GetPeerCount",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetPeerCount,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -474,6 +500,7 @@ func (s *Service) nodeEndpoints() []endpoint {
|
||||
name: namespace + ".GetVersion",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetVersion,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -483,6 +510,7 @@ func (s *Service) nodeEndpoints() []endpoint {
|
||||
name: namespace + ".GetHealth",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetHealth,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -533,6 +561,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetCommittees",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetCommittees,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -542,6 +571,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetStateFork",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetStateFork,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -551,6 +581,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetStateRoot",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetStateRoot,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -560,6 +591,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetSyncCommittees",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetSyncCommittees,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -569,6 +601,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetRandao",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetRandao,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -580,6 +613,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.PublishBlock,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -591,6 +625,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.PublishBlindedBlock,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -601,6 +636,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.PublishBlockV2,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -611,6 +647,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.PublishBlindedBlockV2,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -620,6 +657,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetBlockV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetBlockV2,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -630,6 +668,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetBlockAttestations",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetBlockAttestations,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -639,6 +678,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetBlockAttestationsV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetBlockAttestationsV2,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -648,6 +688,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetBlindedBlock",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetBlindedBlock,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -657,6 +698,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetBlockRoot",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetBlockRoot,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -667,6 +709,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".ListAttestations",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.ListAttestations,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -676,6 +719,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".ListAttestationsV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.ListAttestationsV2,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -686,6 +730,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitAttestations,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -696,6 +741,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitAttestationsV2,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -705,6 +751,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".ListVoluntaryExits",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.ListVoluntaryExits,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -715,6 +762,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitVoluntaryExit,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -725,6 +773,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitSyncCommitteeSignatures,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -734,6 +783,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".ListBLSToExecutionChanges",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.ListBLSToExecutionChanges,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -744,6 +794,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitBLSToExecutionChanges,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -754,6 +805,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetAttesterSlashings",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetAttesterSlashings,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -763,6 +815,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetAttesterSlashingsV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetAttesterSlashingsV2,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -773,6 +826,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitAttesterSlashings,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -783,6 +837,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitAttesterSlashingsV2,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -792,6 +847,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetProposerSlashings",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetProposerSlashings,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -802,6 +858,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitProposerSlashing,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -811,6 +868,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetBlockHeaders",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetBlockHeaders,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -820,6 +878,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetBlockHeader",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetBlockHeader,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -829,6 +888,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetGenesis",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetGenesis,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -838,6 +898,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetFinalityCheckpoints",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetFinalityCheckpoints,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -848,6 +909,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetValidators,
|
||||
methods: []string{http.MethodGet, http.MethodPost},
|
||||
@@ -857,6 +919,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetValidator",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetValidator,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -867,6 +930,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetValidatorBalances,
|
||||
methods: []string{http.MethodGet, http.MethodPost},
|
||||
@@ -887,6 +951,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetDepositSnapshot",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetDepositSnapshot,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -896,6 +961,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetPendingDeposits",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetPendingDeposits,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -914,6 +980,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetPendingPartialWithdrawals",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetPendingPartialWithdrawals,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -929,6 +996,7 @@ func (*Service) configEndpoints() []endpoint {
|
||||
name: namespace + ".GetDepositContract",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: config.GetDepositContract,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -938,6 +1006,7 @@ func (*Service) configEndpoints() []endpoint {
|
||||
name: namespace + ".GetForkSchedule",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: config.GetForkSchedule,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -947,6 +1016,7 @@ func (*Service) configEndpoints() []endpoint {
|
||||
name: namespace + ".GetSpec",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: config.GetSpec,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -971,6 +1041,7 @@ func (s *Service) lightClientEndpoints(blocker lookup.Blocker, stater lookup.Sta
|
||||
name: namespace + ".GetLightClientBootstrap",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetLightClientBootstrap,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -980,6 +1051,7 @@ func (s *Service) lightClientEndpoints(blocker lookup.Blocker, stater lookup.Sta
|
||||
name: namespace + ".GetLightClientUpdatesByRange",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetLightClientUpdatesByRange,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -989,6 +1061,7 @@ func (s *Service) lightClientEndpoints(blocker lookup.Blocker, stater lookup.Sta
|
||||
name: namespace + ".GetLightClientFinalityUpdate",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetLightClientFinalityUpdate,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -998,6 +1071,7 @@ func (s *Service) lightClientEndpoints(blocker lookup.Blocker, stater lookup.Sta
|
||||
name: namespace + ".GetLightClientOptimisticUpdate",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetLightClientOptimisticUpdate,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1024,6 +1098,7 @@ func (s *Service) debugEndpoints(stater lookup.Stater) []endpoint {
|
||||
name: namespace + ".GetBeaconStateV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetBeaconStateV2,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1033,6 +1108,7 @@ func (s *Service) debugEndpoints(stater lookup.Stater) []endpoint {
|
||||
name: namespace + ".GetForkChoiceHeadsV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetForkChoiceHeadsV2,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1042,6 +1118,7 @@ func (s *Service) debugEndpoints(stater lookup.Stater) []endpoint {
|
||||
name: namespace + ".GetForkChoice",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetForkChoice,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1066,6 +1143,7 @@ func (s *Service) eventsEndpoints() []endpoint {
|
||||
name: namespace + ".StreamEvents",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.EventStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.StreamEvents,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1101,6 +1179,7 @@ func (s *Service) prysmBeaconEndpoints(
|
||||
name: namespace + ".GetWeakSubjectivity",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetWeakSubjectivity,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1110,6 +1189,7 @@ func (s *Service) prysmBeaconEndpoints(
|
||||
name: namespace + ".GetValidatorCount",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetValidatorCount,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1119,6 +1199,7 @@ func (s *Service) prysmBeaconEndpoints(
|
||||
name: namespace + ".GetValidatorCount",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetValidatorCount,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1129,6 +1210,7 @@ func (s *Service) prysmBeaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetIndividualVotes,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -1138,6 +1220,7 @@ func (s *Service) prysmBeaconEndpoints(
|
||||
name: namespace + ".GetChainHead",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetChainHead,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1148,6 +1231,7 @@ func (s *Service) prysmBeaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.PublishBlobs,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -1175,6 +1259,7 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
|
||||
name: namespace + ".ListTrustedPeer",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.ListTrustedPeer,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1184,6 +1269,7 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
|
||||
name: namespace + ".ListTrustedPeer",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.ListTrustedPeer,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1194,6 +1280,7 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.AddTrustedPeer,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -1204,6 +1291,7 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.AddTrustedPeer,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -1213,6 +1301,7 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
|
||||
name: namespace + ".RemoveTrustedPeer",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.RemoveTrustedPeer,
|
||||
methods: []string{http.MethodDelete},
|
||||
@@ -1222,6 +1311,7 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
|
||||
name: namespace + ".RemoveTrustedPeer",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.RemoveTrustedPeer,
|
||||
methods: []string{http.MethodDelete},
|
||||
@@ -1244,6 +1334,7 @@ func (s *Service) prysmValidatorEndpoints(stater lookup.Stater, coreService *cor
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetPerformance,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -1254,6 +1345,7 @@ func (s *Service) prysmValidatorEndpoints(stater lookup.Stater, coreService *cor
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetPerformance,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -1263,6 +1355,7 @@ func (s *Service) prysmValidatorEndpoints(stater lookup.Stater, coreService *cor
|
||||
name: namespace + ".GetParticipation",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetParticipation,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1272,6 +1365,7 @@ func (s *Service) prysmValidatorEndpoints(stater lookup.Stater, coreService *cor
|
||||
name: namespace + ".GetActiveSetChanges",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetActiveSetChanges,
|
||||
methods: []string{http.MethodGet},
|
||||
|
||||
@@ -264,8 +264,6 @@ type WriteOnlyEth1Data interface {
|
||||
AppendEth1DataVotes(val *ethpb.Eth1Data) error
|
||||
SetEth1DepositIndex(val uint64) error
|
||||
ExitEpochAndUpdateChurn(exitBalance primitives.Gwei) (primitives.Epoch, error)
|
||||
SetExitBalanceToConsume(val primitives.Gwei) error
|
||||
SetEarliestExitEpoch(val primitives.Epoch) error
|
||||
}
|
||||
|
||||
// WriteOnlyValidators defines a struct which only has write access to validators methods.
|
||||
@@ -333,7 +331,6 @@ type WriteOnlyWithdrawals interface {
|
||||
DequeuePendingPartialWithdrawals(num uint64) error
|
||||
SetNextWithdrawalIndex(i uint64) error
|
||||
SetNextWithdrawalValidatorIndex(i primitives.ValidatorIndex) error
|
||||
SetPendingPartialWithdrawals(val []*ethpb.PendingPartialWithdrawal) error
|
||||
}
|
||||
|
||||
type WriteOnlyConsolidations interface {
|
||||
|
||||
@@ -75,33 +75,3 @@ func (b *BeaconState) ExitEpochAndUpdateChurn(exitBalance primitives.Gwei) (prim
|
||||
|
||||
return b.earliestExitEpoch, nil
|
||||
}
|
||||
|
||||
// SetExitBalanceToConsume sets the exit balance to consume. This method mutates the state.
|
||||
func (b *BeaconState) SetExitBalanceToConsume(exitBalanceToConsume primitives.Gwei) error {
|
||||
if b.version < version.Electra {
|
||||
return errNotSupported("SetExitBalanceToConsume", b.version)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.exitBalanceToConsume = exitBalanceToConsume
|
||||
b.markFieldAsDirty(types.ExitBalanceToConsume)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetEarliestExitEpoch sets the earliest exit epoch. This method mutates the state.
|
||||
func (b *BeaconState) SetEarliestExitEpoch(earliestExitEpoch primitives.Epoch) error {
|
||||
if b.version < version.Electra {
|
||||
return errNotSupported("SetEarliestExitEpoch", b.version)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.earliestExitEpoch = earliestExitEpoch
|
||||
b.markFieldAsDirty(types.EarliestExitEpoch)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -100,22 +100,3 @@ func (b *BeaconState) DequeuePendingPartialWithdrawals(n uint64) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetPendingPartialWithdrawals sets the pending partial withdrawals. This method mutates the state.
|
||||
func (b *BeaconState) SetPendingPartialWithdrawals(pendingPartialWithdrawals []*eth.PendingPartialWithdrawal) error {
|
||||
if b.version < version.Electra {
|
||||
return errNotSupported("SetPendingPartialWithdrawals", b.version)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if pendingPartialWithdrawals == nil {
|
||||
return errors.New("cannot set nil pending partial withdrawals")
|
||||
}
|
||||
|
||||
b.pendingPartialWithdrawals = pendingPartialWithdrawals
|
||||
b.markFieldAsDirty(types.PendingPartialWithdrawals)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -25,7 +25,6 @@ go_library(
|
||||
"rpc_blob_sidecars_by_range.go",
|
||||
"rpc_blob_sidecars_by_root.go",
|
||||
"rpc_chunked_response.go",
|
||||
"rpc_data_column_sidecars_by_range.go",
|
||||
"rpc_data_column_sidecars_by_root.go",
|
||||
"rpc_goodbye.go",
|
||||
"rpc_light_client.go",
|
||||
@@ -172,7 +171,6 @@ go_test(
|
||||
"rpc_beacon_blocks_by_root_test.go",
|
||||
"rpc_blob_sidecars_by_range_test.go",
|
||||
"rpc_blob_sidecars_by_root_test.go",
|
||||
"rpc_data_column_sidecars_by_range_test.go",
|
||||
"rpc_data_column_sidecars_by_root_test.go",
|
||||
"rpc_goodbye_test.go",
|
||||
"rpc_handler_test.go",
|
||||
@@ -284,7 +282,6 @@ go_test(
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@com_github_stretchr_testify//require:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -89,13 +89,6 @@ var (
|
||||
Buckets: []float64{5, 10, 50, 100, 150, 250, 500, 1000, 2000},
|
||||
},
|
||||
)
|
||||
rpcDataColumnsByRangeResponseLatency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "rpc_data_columns_by_range_response_latency_milliseconds",
|
||||
Help: "Captures total time to respond to rpc DataColumnsByRange requests in a milliseconds distribution",
|
||||
Buckets: []float64{5, 10, 50, 100, 150, 250, 500, 1000, 2000},
|
||||
},
|
||||
)
|
||||
arrivalBlockPropagationHistogram = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "block_arrival_latency_milliseconds",
|
||||
|
||||
@@ -47,10 +47,6 @@ func newRateLimiter(p2pProvider p2p.P2P) *limiter {
|
||||
allowedBlobsPerSecond := float64(flags.Get().BlobBatchLimit)
|
||||
allowedBlobsBurst := int64(flags.Get().BlobBatchLimitBurstFactor * flags.Get().BlobBatchLimit)
|
||||
|
||||
// Initialize data column limits.
|
||||
allowedDataColumnsPerSecond := float64(flags.Get().DataColumnBatchLimit)
|
||||
allowedDataColumnsBurst := int64(flags.Get().DataColumnBatchLimitBurstFactor * flags.Get().DataColumnBatchLimit)
|
||||
|
||||
// Set topic map for all rpc topics.
|
||||
topicMap := make(map[string]*leakybucket.Collector, len(p2p.RPCTopicMappings))
|
||||
// Goodbye Message
|
||||
@@ -71,9 +67,6 @@ func newRateLimiter(p2pProvider p2p.P2P) *limiter {
|
||||
// for BlobSidecarsByRoot and BlobSidecarsByRange
|
||||
blobCollector := leakybucket.NewCollector(allowedBlobsPerSecond, allowedBlobsBurst, blockBucketPeriod, false)
|
||||
|
||||
// for DataColumnSidecarsByRoot and DataColumnSidecarsByRange
|
||||
dataColumnSidecars := leakybucket.NewCollector(allowedDataColumnsPerSecond, allowedDataColumnsBurst, blockBucketPeriod, false)
|
||||
|
||||
// BlocksByRoots requests
|
||||
topicMap[addEncoding(p2p.RPCBlocksByRootTopicV1)] = blockCollector
|
||||
topicMap[addEncoding(p2p.RPCBlocksByRootTopicV2)] = blockCollectorV2
|
||||
@@ -93,11 +86,6 @@ func newRateLimiter(p2pProvider p2p.P2P) *limiter {
|
||||
topicMap[addEncoding(p2p.RPCLightClientOptimisticUpdateTopicV1)] = leakybucket.NewCollector(1, defaultBurstLimit, leakyBucketPeriod, false /* deleteEmptyBuckets */)
|
||||
topicMap[addEncoding(p2p.RPCLightClientFinalityUpdateTopicV1)] = leakybucket.NewCollector(1, defaultBurstLimit, leakyBucketPeriod, false /* deleteEmptyBuckets */)
|
||||
|
||||
// DataColumnSidecarsByRootV1
|
||||
topicMap[addEncoding(p2p.RPCDataColumnSidecarsByRootTopicV1)] = dataColumnSidecars
|
||||
// DataColumnSidecarsByRangeV1
|
||||
topicMap[addEncoding(p2p.RPCDataColumnSidecarsByRangeTopicV1)] = dataColumnSidecars
|
||||
|
||||
// General topic for all rpc requests.
|
||||
topicMap[rpcLimiterTopic] = leakybucket.NewCollector(5, defaultBurstLimit*2, leakyBucketPeriod, false /* deleteEmptyBuckets */)
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
|
||||
func TestNewRateLimiter(t *testing.T) {
|
||||
rlimiter := newRateLimiter(mockp2p.NewTestP2P(t))
|
||||
assert.Equal(t, len(rlimiter.limiterMap), 18, "correct number of topics not registered")
|
||||
assert.Equal(t, len(rlimiter.limiterMap), 16, "correct number of topics not registered")
|
||||
}
|
||||
|
||||
func TestNewRateLimiter_FreeCorrectly(t *testing.T) {
|
||||
|
||||
@@ -42,15 +42,15 @@ func (s *Service) rpcHandlerByTopicFromFork(forkIndex int) (map[string]rpcHandle
|
||||
// Fulu: https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/p2p-interface.md#messages
|
||||
if forkIndex >= version.Fulu {
|
||||
return map[string]rpcHandler{
|
||||
p2p.RPCGoodByeTopicV1: s.goodbyeRPCHandler,
|
||||
p2p.RPCBlocksByRangeTopicV2: s.beaconBlocksByRangeRPCHandler,
|
||||
p2p.RPCBlocksByRootTopicV2: s.beaconBlocksRootRPCHandler,
|
||||
p2p.RPCPingTopicV1: s.pingHandler,
|
||||
p2p.RPCMetaDataTopicV3: s.metaDataHandler, // Modified in Fulu
|
||||
p2p.RPCBlobSidecarsByRootTopicV1: s.blobSidecarByRootRPCHandler,
|
||||
p2p.RPCBlobSidecarsByRangeTopicV1: s.blobSidecarsByRangeRPCHandler,
|
||||
p2p.RPCDataColumnSidecarsByRootTopicV1: s.dataColumnSidecarByRootRPCHandler, // Added in Fulu
|
||||
p2p.RPCDataColumnSidecarsByRangeTopicV1: s.dataColumnSidecarsByRangeRPCHandler, // Added in Fulu
|
||||
p2p.RPCStatusTopicV1: s.statusRPCHandler,
|
||||
p2p.RPCGoodByeTopicV1: s.goodbyeRPCHandler,
|
||||
p2p.RPCBlocksByRangeTopicV2: s.beaconBlocksByRangeRPCHandler,
|
||||
p2p.RPCBlocksByRootTopicV2: s.beaconBlocksRootRPCHandler,
|
||||
p2p.RPCPingTopicV1: s.pingHandler,
|
||||
p2p.RPCMetaDataTopicV3: s.metaDataHandler, // Modified in Fulu
|
||||
p2p.RPCBlobSidecarsByRootTopicV1: s.blobSidecarByRootRPCHandler,
|
||||
p2p.RPCBlobSidecarsByRangeTopicV1: s.blobSidecarsByRangeRPCHandler,
|
||||
p2p.RPCDataColumnSidecarsByRootTopicV1: s.dataColumnSidecarByRootRPCHandler, // Added in Fulu
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,218 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
p2ptypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// We count a single request as a single rate limiting amount, regardless of the number of columns requested.
|
||||
const rateLimitingAmount = 1
|
||||
|
||||
var notDataColumnsByRangeIdentifiersError = errors.New("not data columns by range identifiers")
|
||||
|
||||
// dataColumnSidecarsByRangeRPCHandler looks up the request data columns from the database from a given start slot index
|
||||
func (s *Service) dataColumnSidecarsByRangeRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error {
|
||||
ctx, span := trace.StartSpan(ctx, "sync.DataColumnSidecarsByRangeHandler")
|
||||
defer span.End()
|
||||
|
||||
// Check if the message type is the one expected.
|
||||
request, ok := msg.(*pb.DataColumnSidecarsByRangeRequest)
|
||||
if !ok {
|
||||
return notDataColumnsByRangeIdentifiersError
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, respTimeout)
|
||||
defer cancel()
|
||||
|
||||
SetRPCStreamDeadlines(stream)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
maxRequestDataColumnSidecars := beaconConfig.MaxRequestDataColumnSidecars
|
||||
remotePeer := stream.Conn().RemotePeer()
|
||||
|
||||
requestedColumns := request.Columns
|
||||
|
||||
// Format log fields.
|
||||
var requestedColumnsLog interface{} = "all"
|
||||
if uint64(len(requestedColumns)) != beaconConfig.NumberOfColumns {
|
||||
requestedColumnsLog = requestedColumns
|
||||
}
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"remotePeer": remotePeer,
|
||||
"requestedColumns": requestedColumnsLog,
|
||||
"startSlot": request.StartSlot,
|
||||
"count": request.Count,
|
||||
})
|
||||
|
||||
// Validate the request regarding rate limiting.
|
||||
if err := s.rateLimiter.validateRequest(stream, rateLimitingAmount); err != nil {
|
||||
return errors.Wrap(err, "rate limiter validate request")
|
||||
}
|
||||
|
||||
// Validate the request regarding its parameters.
|
||||
rangeParameters, err := validateDataColumnsByRange(request, s.cfg.chain.CurrentSlot())
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(remotePeer)
|
||||
tracing.AnnotateError(span, err)
|
||||
return errors.Wrap(err, "validate data columns by range")
|
||||
}
|
||||
if rangeParameters == nil {
|
||||
log.Debug("No data columns by range to serve")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Debug("Serving data columns by range request")
|
||||
|
||||
// Ticker to stagger out large requests.
|
||||
ticker := time.NewTicker(time.Second)
|
||||
|
||||
batcher, err := newBlockRangeBatcher(*rangeParameters, s.cfg.beaconDB, s.rateLimiter, s.cfg.chain.IsCanonical, ticker)
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, err)
|
||||
return errors.Wrap(err, "new block range batcher")
|
||||
}
|
||||
|
||||
// Derive the wanted columns for the request.
|
||||
wantedColumns := make([]uint64, len(request.Columns))
|
||||
copy(wantedColumns, request.Columns)
|
||||
|
||||
// Sort the wanted columns.
|
||||
slices.Sort(wantedColumns)
|
||||
|
||||
var batch blockBatch
|
||||
for batch, ok = batcher.next(ctx, stream); ok; batch, ok = batcher.next(ctx, stream) {
|
||||
batchStart := time.Now()
|
||||
maxRequestDataColumnSidecars, err = s.streamDataColumnBatch(ctx, batch, maxRequestDataColumnSidecars, wantedColumns, stream)
|
||||
rpcDataColumnsByRangeResponseLatency.Observe(float64(time.Since(batchStart).Milliseconds()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Once the quota is reached, we're done serving the request.
|
||||
if maxRequestDataColumnSidecars == 0 {
|
||||
log.WithField("initialQuota", beaconConfig.MaxRequestDataColumnSidecars).Debug("Reached quota for data column sidecars by range request")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err := batch.error(); err != nil {
|
||||
log.WithError(err).Debug("error in DataColumnSidecarsByRange batch")
|
||||
|
||||
// If we hit a rate limit, the error response has already been written, and the stream is already closed.
|
||||
if !errors.Is(err, p2ptypes.ErrRateLimited) {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
}
|
||||
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
closeStream(stream, log)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) streamDataColumnBatch(ctx context.Context, batch blockBatch, quota uint64, wantedDataColumnIndices []uint64, stream libp2pcore.Stream) (uint64, error) {
|
||||
_, span := trace.StartSpan(ctx, "sync.streamDataColumnBatch")
|
||||
defer span.End()
|
||||
|
||||
// Defensive check to guard against underflow.
|
||||
if quota == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Loop over the blocks in the batch.
|
||||
for _, block := range batch.canonical() {
|
||||
// Get the block blockRoot.
|
||||
blockRoot := block.Root()
|
||||
|
||||
// Retrieve the data column sidecars from the store.
|
||||
verifiedRODataColumns, err := s.cfg.dataColumnStorage.Get(blockRoot, wantedDataColumnIndices)
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
return quota, errors.Wrapf(err, "get data column sidecars: block root %#x", blockRoot)
|
||||
}
|
||||
|
||||
// Write the retrieved sidecars to the stream.
|
||||
for _, verifiedRODataColumn := range verifiedRODataColumns {
|
||||
sidecar := verifiedRODataColumn.DataColumnSidecar
|
||||
SetStreamWriteDeadline(stream, defaultWriteDuration)
|
||||
|
||||
if err := WriteDataColumnSidecarChunk(stream, s.cfg.chain, s.cfg.p2p.Encoding(), sidecar); err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, err)
|
||||
return quota, errors.Wrap(err, "write data column sidecar chunk")
|
||||
}
|
||||
|
||||
s.rateLimiter.add(stream, rateLimitingAmount)
|
||||
quota -= 1
|
||||
|
||||
// Stop streaming results once the quota of writes for the request is consumed.
|
||||
if quota == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return quota, nil
|
||||
}
|
||||
|
||||
func validateDataColumnsByRange(request *pb.DataColumnSidecarsByRangeRequest, currentSlot primitives.Slot) (*rangeParams, error) {
|
||||
startSlot, count := request.StartSlot, request.Count
|
||||
|
||||
if count == 0 {
|
||||
return nil, errors.Wrap(p2ptypes.ErrInvalidRequest, "invalid request count parameter")
|
||||
}
|
||||
|
||||
endSlot, err := request.StartSlot.SafeAdd(count - 1)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(p2ptypes.ErrInvalidRequest, "overflow start + count -1")
|
||||
}
|
||||
|
||||
// Peers may overshoot the current slot when in initial sync,
|
||||
// so we don't want to penalize them by treating the request as an error.
|
||||
if startSlot > currentSlot {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Compute the oldest slot we'll allow a peer to request, based on the current slot.
|
||||
minStartSlot, err := dataColumnsRPCMinValidSlot(currentSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(p2ptypes.ErrInvalidRequest, "data columns RPC min valid slot")
|
||||
}
|
||||
|
||||
// Return early if there is nothing to serve.
|
||||
if endSlot < minStartSlot {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Do not serve sidecars for slots before the minimum valid slot or after the current slot.
|
||||
startSlot = max(startSlot, minStartSlot)
|
||||
endSlot = min(endSlot, currentSlot)
|
||||
|
||||
sizeMinusOne, err := endSlot.SafeSub(uint64(startSlot))
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("overflow end - start: %d - %d - should never happen", endSlot, startSlot)
|
||||
}
|
||||
|
||||
size, err := sizeMinusOne.SafeAdd(1)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(p2ptypes.ErrInvalidRequest, "overflow end - start + 1")
|
||||
}
|
||||
|
||||
rangeParameters := &rangeParams{start: startSlot, end: endSlot, size: uint64(size)}
|
||||
return rangeParameters, nil
|
||||
}
|
||||
@@ -1,301 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
chainMock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
consensusblocks "github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
func TestDataColumnSidecarsByRangeRPCHandler(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
t.Run("wrong message type", func(t *testing.T) {
|
||||
service := &Service{}
|
||||
err := service.dataColumnSidecarsByRangeRPCHandler(ctx, nil, nil)
|
||||
require.ErrorIs(t, err, notDataColumnsByRangeIdentifiersError)
|
||||
})
|
||||
|
||||
t.Run("invalid request", func(t *testing.T) {
|
||||
slot := primitives.Slot(400)
|
||||
|
||||
localP2P, remoteP2P := p2ptest.NewTestP2P(t), p2ptest.NewTestP2P(t)
|
||||
|
||||
service := &Service{
|
||||
cfg: &config{
|
||||
p2p: localP2P,
|
||||
chain: &chainMock.ChainService{
|
||||
Slot: &slot,
|
||||
},
|
||||
},
|
||||
rateLimiter: newRateLimiter(localP2P),
|
||||
}
|
||||
|
||||
protocolID := protocol.ID(fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRangeTopicV1))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
code, _, err := readStatusCodeNoDeadline(stream, localP2P.Encoding())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, responseCodeInvalidRequest, code)
|
||||
})
|
||||
|
||||
localP2P.Connect(remoteP2P)
|
||||
stream, err := localP2P.BHost.NewStream(ctx, remoteP2P.BHost.ID(), protocolID)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg := &pb.DataColumnSidecarsByRangeRequest{
|
||||
Count: 0, // Invalid count
|
||||
}
|
||||
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) >= 0)
|
||||
|
||||
err = service.dataColumnSidecarsByRangeRPCHandler(ctx, msg, stream)
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) < 0)
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
|
||||
slot := primitives.Slot(400)
|
||||
|
||||
params := []util.DataColumnParam{
|
||||
{Slot: 10, Index: 1}, {Slot: 10, Index: 2}, {Slot: 10, Index: 3},
|
||||
{Slot: 40, Index: 4}, {Slot: 40, Index: 6},
|
||||
{Slot: 45, Index: 7}, {Slot: 45, Index: 8}, {Slot: 45, Index: 9},
|
||||
}
|
||||
|
||||
_, verifiedRODataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, params)
|
||||
|
||||
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
err := storage.Save(verifiedRODataColumns)
|
||||
require.NoError(t, err)
|
||||
|
||||
localP2P, remoteP2P := p2ptest.NewTestP2P(t), p2ptest.NewTestP2P(t)
|
||||
protocolID := protocol.ID(fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRangeTopicV1))
|
||||
|
||||
roots := [][fieldparams.RootLength]byte{
|
||||
verifiedRODataColumns[0].BlockRoot(),
|
||||
verifiedRODataColumns[3].BlockRoot(),
|
||||
verifiedRODataColumns[5].BlockRoot(),
|
||||
}
|
||||
|
||||
slots := []primitives.Slot{
|
||||
verifiedRODataColumns[0].Slot(),
|
||||
verifiedRODataColumns[3].Slot(),
|
||||
verifiedRODataColumns[5].Slot(),
|
||||
}
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
roBlocks := make([]blocks.ROBlock, 0, len(roots))
|
||||
for i := range 3 {
|
||||
signedBeaconBlockPb := util.NewBeaconBlock()
|
||||
signedBeaconBlockPb.Block.Slot = slots[i]
|
||||
if i != 0 {
|
||||
signedBeaconBlockPb.Block.ParentRoot = roots[i-1][:]
|
||||
}
|
||||
|
||||
signedBeaconBlock, err := consensusblocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// There is a discrepancy between the root of the beacon block and the rodata column root,
|
||||
// but for the sake of this test, we actually don't care.
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(signedBeaconBlock, roots[i])
|
||||
require.NoError(t, err)
|
||||
|
||||
roBlocks = append(roBlocks, roblock)
|
||||
}
|
||||
|
||||
err = beaconDB.SaveROBlocks(ctx, roBlocks, false /*cache*/)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{
|
||||
cfg: &config{
|
||||
p2p: localP2P,
|
||||
beaconDB: beaconDB,
|
||||
chain: &chainMock.ChainService{
|
||||
Slot: &slot,
|
||||
},
|
||||
dataColumnStorage: storage,
|
||||
},
|
||||
rateLimiter: newRateLimiter(localP2P),
|
||||
}
|
||||
|
||||
ctxMap := ContextByteVersions{
|
||||
[4]byte{245, 165, 253, 66}: version.Fulu,
|
||||
}
|
||||
|
||||
root0 := verifiedRODataColumns[0].BlockRoot()
|
||||
root3 := verifiedRODataColumns[3].BlockRoot()
|
||||
root5 := verifiedRODataColumns[5].BlockRoot()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
|
||||
sidecars := make([]*blocks.RODataColumn, 0, 5)
|
||||
|
||||
for i := uint64(0); ; /* no stop condition */ i++ {
|
||||
sidecar, err := readChunkedDataColumnSidecar(stream, remoteP2P, ctxMap)
|
||||
if errors.Is(err, io.EOF) {
|
||||
// End of stream.
|
||||
break
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
|
||||
require.Equal(t, 8, len(sidecars))
|
||||
require.Equal(t, root0, sidecars[0].BlockRoot())
|
||||
require.Equal(t, root0, sidecars[1].BlockRoot())
|
||||
require.Equal(t, root0, sidecars[2].BlockRoot())
|
||||
require.Equal(t, root3, sidecars[3].BlockRoot())
|
||||
require.Equal(t, root3, sidecars[4].BlockRoot())
|
||||
require.Equal(t, root5, sidecars[5].BlockRoot())
|
||||
require.Equal(t, root5, sidecars[6].BlockRoot())
|
||||
require.Equal(t, root5, sidecars[7].BlockRoot())
|
||||
|
||||
require.Equal(t, uint64(1), sidecars[0].Index)
|
||||
require.Equal(t, uint64(2), sidecars[1].Index)
|
||||
require.Equal(t, uint64(3), sidecars[2].Index)
|
||||
require.Equal(t, uint64(4), sidecars[3].Index)
|
||||
require.Equal(t, uint64(6), sidecars[4].Index)
|
||||
require.Equal(t, uint64(7), sidecars[5].Index)
|
||||
require.Equal(t, uint64(8), sidecars[6].Index)
|
||||
require.Equal(t, uint64(9), sidecars[7].Index)
|
||||
})
|
||||
|
||||
localP2P.Connect(remoteP2P)
|
||||
stream, err := localP2P.BHost.NewStream(ctx, remoteP2P.BHost.ID(), protocolID)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg := &pb.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: 5,
|
||||
Count: 50,
|
||||
Columns: []uint64{1, 2, 3, 4, 6, 7, 8, 9, 10},
|
||||
}
|
||||
|
||||
err = service.dataColumnSidecarsByRangeRPCHandler(ctx, msg, stream)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestValidateDataColumnsByRange(t *testing.T) {
|
||||
maxUint := primitives.Slot(math.MaxUint64)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.FuluForkEpoch = 10
|
||||
config.MinEpochsForDataColumnSidecarsRequest = 4096
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
startSlot primitives.Slot
|
||||
count uint64
|
||||
currentSlot primitives.Slot
|
||||
expected *rangeParams
|
||||
expectErr bool
|
||||
errContains string
|
||||
}{
|
||||
{
|
||||
name: "zero count returns error",
|
||||
count: 0,
|
||||
expectErr: true,
|
||||
errContains: "invalid request count parameter",
|
||||
},
|
||||
{
|
||||
name: "overflow in addition returns error",
|
||||
startSlot: maxUint - 5,
|
||||
count: 10,
|
||||
currentSlot: maxUint,
|
||||
expectErr: true,
|
||||
errContains: "overflow start + count -1",
|
||||
},
|
||||
{
|
||||
name: "start greater than current returns nil",
|
||||
startSlot: 150,
|
||||
count: 10,
|
||||
currentSlot: 100,
|
||||
expected: nil,
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "end slot greater than min start slot returns nil",
|
||||
startSlot: 150,
|
||||
count: 100,
|
||||
currentSlot: 300,
|
||||
expected: nil,
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "range within limits",
|
||||
startSlot: 350,
|
||||
count: 10,
|
||||
currentSlot: 400,
|
||||
expected: &rangeParams{start: 350, end: 359, size: 10},
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "range exceeds limits",
|
||||
startSlot: 0,
|
||||
count: 10_000,
|
||||
currentSlot: 400,
|
||||
expected: &rangeParams{start: 320, end: 400, size: 81},
|
||||
expectErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
request := &pb.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: tc.startSlot,
|
||||
Count: tc.count,
|
||||
}
|
||||
|
||||
rangeParameters, err := validateDataColumnsByRange(request, tc.currentSlot)
|
||||
if tc.expectErr {
|
||||
require.ErrorContains(t, err, tc.errContains)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expected, rangeParameters)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -170,10 +170,5 @@ func dataColumnsRPCMinValidSlot(currentSlot primitives.Slot) (primitives.Slot, e
|
||||
minStartEpoch = currEpoch - minReqEpochs
|
||||
}
|
||||
|
||||
epochStart, err := slots.EpochStart(minStartEpoch)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "epoch start for epoch %d", minStartEpoch)
|
||||
}
|
||||
|
||||
return epochStart, nil
|
||||
return slots.EpochStart(minStartEpoch)
|
||||
}
|
||||
|
||||
@@ -139,7 +139,7 @@ func TestDataColumnSidecarsByRootRPCHandler(t *testing.T) {
|
||||
sidecars := make([]*blocks.RODataColumn, 0, 5)
|
||||
|
||||
for i := uint64(0); ; /* no stop condition */ i++ {
|
||||
sidecar, err := readChunkedDataColumnSidecar(stream, remoteP2P, ctxMap)
|
||||
sidecar, err := readChunkedDataColumnSideCar(stream, remoteP2P, ctxMap)
|
||||
if errors.Is(err, io.EOF) {
|
||||
// End of stream.
|
||||
break
|
||||
@@ -276,28 +276,21 @@ func TestDataColumnsRPCMinValidSlot(t *testing.T) {
|
||||
expected: primitives.Slot(math.MaxUint64),
|
||||
},
|
||||
{
|
||||
name: "Current epoch is before fulu fork epoch",
|
||||
fuluForkEpoch: 10,
|
||||
minReqEpochs: 5,
|
||||
currentSlot: primitives.Slot(8 * slotsPerEpoch),
|
||||
expected: primitives.Slot(10 * slotsPerEpoch),
|
||||
},
|
||||
{
|
||||
name: "Current epoch is fulu fork epoch",
|
||||
name: "Current epoch equals fork epoch",
|
||||
fuluForkEpoch: 10,
|
||||
minReqEpochs: 5,
|
||||
currentSlot: primitives.Slot(10 * slotsPerEpoch),
|
||||
expected: primitives.Slot(10 * slotsPerEpoch),
|
||||
},
|
||||
{
|
||||
name: "Current epoch between fulu fork epoch and minReqEpochs",
|
||||
name: "Current epoch less than minReqEpochs",
|
||||
fuluForkEpoch: 10,
|
||||
minReqEpochs: 20,
|
||||
currentSlot: primitives.Slot(15 * slotsPerEpoch),
|
||||
expected: primitives.Slot(10 * slotsPerEpoch),
|
||||
},
|
||||
{
|
||||
name: "Current epoch after fulu fork epoch + minReqEpochs",
|
||||
name: "Current epoch greater than minReqEpochs + fork epoch",
|
||||
fuluForkEpoch: 10,
|
||||
minReqEpochs: 5,
|
||||
currentSlot: primitives.Slot(20 * slotsPerEpoch),
|
||||
|
||||
@@ -42,10 +42,6 @@ var (
|
||||
errDataColumnChunkedReadFailure = errors.New("failed to read stream of chunk-encoded data columns")
|
||||
)
|
||||
|
||||
// ------
|
||||
// Blocks
|
||||
// ------
|
||||
|
||||
// BeaconBlockProcessor defines a block processing function, which allows to start utilizing
|
||||
// blocks even before all blocks are ready.
|
||||
type BeaconBlockProcessor func(block interfaces.ReadOnlySignedBeaconBlock) error
|
||||
@@ -159,14 +155,6 @@ func SendBeaconBlocksByRootRequest(
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
// -------------
|
||||
// Blob sidecars
|
||||
// -------------
|
||||
|
||||
// BlobResponseValidation represents a function that can validate aspects of a single unmarshaled blob sidecar
|
||||
// that was received from a peer in response to an rpc request.
|
||||
type BlobResponseValidation func(blocks.ROBlob) error
|
||||
|
||||
func SendBlobsByRangeRequest(ctx context.Context, tor blockchain.TemporalOracle, p2pApi p2p.SenderEncoder, pid peer.ID, ctxMap ContextByteVersions, req *ethpb.BlobSidecarsByRangeRequest, bvs ...BlobResponseValidation) ([]blocks.ROBlob, error) {
|
||||
topic, err := p2p.TopicFromMessage(p2p.BlobSidecarsByRangeName, slots.ToEpoch(tor.CurrentSlot()))
|
||||
if err != nil {
|
||||
@@ -228,6 +216,10 @@ func SendBlobSidecarByRoot(
|
||||
return readChunkEncodedBlobs(stream, p2pApi.Encoding(), ctxMap, blobValidatorFromRootReq(req), max)
|
||||
}
|
||||
|
||||
// BlobResponseValidation represents a function that can validate aspects of a single unmarshaled blob
|
||||
// that was received from a peer in response to an rpc request.
|
||||
type BlobResponseValidation func(blocks.ROBlob) error
|
||||
|
||||
func composeBlobValidations(vf ...BlobResponseValidation) BlobResponseValidation {
|
||||
return func(blob blocks.ROBlob) error {
|
||||
for i := range vf {
|
||||
@@ -393,19 +385,10 @@ func readChunkedBlobSidecar(stream network.Stream, encoding encoder.NetworkEncod
|
||||
return rob, nil
|
||||
}
|
||||
|
||||
// --------------------
|
||||
// Data column sidecars
|
||||
// --------------------
|
||||
|
||||
// DataColumnResponseValidation represents a function that can validate aspects of a single unmarshaled data column sidecar
|
||||
// that was received from a peer in response to an rpc request.
|
||||
type DataColumnResponseValidation func(column blocks.RODataColumn) error
|
||||
|
||||
func readChunkedDataColumnSidecar(
|
||||
func readChunkedDataColumnSideCar(
|
||||
stream network.Stream,
|
||||
p2pApi p2p.P2P,
|
||||
ctxMap ContextByteVersions,
|
||||
validationFunctions ...DataColumnResponseValidation,
|
||||
) (*blocks.RODataColumn, error) {
|
||||
// Read the status code from the stream.
|
||||
statusCode, errMessage, err := ReadStatusCode(stream, p2pApi.Encoding())
|
||||
@@ -449,12 +432,5 @@ func readChunkedDataColumnSidecar(
|
||||
return nil, errors.Wrap(err, "new read only data column")
|
||||
}
|
||||
|
||||
// Run validation functions.
|
||||
for _, validationFunction := range validationFunctions {
|
||||
if err := validationFunction(roDataColumn); err != nil {
|
||||
return nil, errors.Wrap(err, "validation function")
|
||||
}
|
||||
}
|
||||
|
||||
return &roDataColumn, nil
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -21,7 +20,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -884,207 +882,3 @@ func TestSendBlobsByRangeRequest(t *testing.T) {
|
||||
func TestErrInvalidFetchedDataDistinction(t *testing.T) {
|
||||
require.Equal(t, false, errors.Is(ErrInvalidFetchedData, verification.ErrBlobInvalid))
|
||||
}
|
||||
|
||||
func TestReadChunkedDataColumnSidecar(t *testing.T) {
|
||||
t.Run("non nil status code", func(t *testing.T) {
|
||||
const reason = "a dummy reason"
|
||||
|
||||
p1, p2 := p2ptest.NewTestP2P(t), p2ptest.NewTestP2P(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
p2.SetStreamHandler(p2p.RPCDataColumnSidecarsByRootTopicV1, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
|
||||
_, err := readChunkedDataColumnSidecar(stream, p2, nil)
|
||||
require.ErrorContains(t, reason, err)
|
||||
})
|
||||
|
||||
p1.Connect(p2)
|
||||
|
||||
stream, err := p1.BHost.NewStream(t.Context(), p2.PeerID(), p2p.RPCDataColumnSidecarsByRootTopicV1)
|
||||
require.NoError(t, err)
|
||||
|
||||
writeErrorResponseToStream(responseCodeInvalidRequest, reason, stream, p1)
|
||||
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("unrecognized fork digest", func(t *testing.T) {
|
||||
p1, p2 := p2ptest.NewTestP2P(t), p2ptest.NewTestP2P(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
p2.SetStreamHandler(p2p.RPCDataColumnSidecarsByRootTopicV1, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
|
||||
_, err := readChunkedDataColumnSidecar(stream, p2, ContextByteVersions{})
|
||||
require.ErrorContains(t, "unrecognized fork digest", err)
|
||||
})
|
||||
|
||||
p1.Connect(p2)
|
||||
|
||||
stream, err := p1.BHost.NewStream(t.Context(), p2.PeerID(), p2p.RPCDataColumnSidecarsByRootTopicV1)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = stream.Write([]byte{responseCodeSuccess})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = writeContextToStream([]byte{42, 42, 42, 42}, stream)
|
||||
require.NoError(t, err)
|
||||
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("before fulu", func(t *testing.T) {
|
||||
p1, p2 := p2ptest.NewTestP2P(t), p2ptest.NewTestP2P(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
p2.SetStreamHandler(p2p.RPCDataColumnSidecarsByRootTopicV1, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
|
||||
_, err := readChunkedDataColumnSidecar(stream, p2, ContextByteVersions{[4]byte{1, 2, 3, 4}: version.Phase0})
|
||||
require.ErrorContains(t, "unexpected context bytes", err)
|
||||
})
|
||||
|
||||
p1.Connect(p2)
|
||||
|
||||
stream, err := p1.BHost.NewStream(t.Context(), p2.PeerID(), p2p.RPCDataColumnSidecarsByRootTopicV1)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = stream.Write([]byte{responseCodeSuccess})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = writeContextToStream([]byte{1, 2, 3, 4}, stream)
|
||||
require.NoError(t, err)
|
||||
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("one validation failed", func(t *testing.T) {
|
||||
const reason = "a dummy reason"
|
||||
|
||||
p1, p2 := p2ptest.NewTestP2P(t), p2ptest.NewTestP2P(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
p2.SetStreamHandler(p2p.RPCDataColumnSidecarsByRootTopicV1, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
|
||||
validationOne := func(column blocks.RODataColumn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
validationTwo := func(column blocks.RODataColumn) error {
|
||||
return errors.New(reason)
|
||||
}
|
||||
|
||||
_, err := readChunkedDataColumnSidecar(
|
||||
stream,
|
||||
p2,
|
||||
ContextByteVersions{[4]byte{1, 2, 3, 4}: version.Fulu},
|
||||
validationOne, // OK
|
||||
validationTwo, // Fail
|
||||
)
|
||||
|
||||
require.ErrorContains(t, reason, err)
|
||||
})
|
||||
|
||||
p1.Connect(p2)
|
||||
|
||||
stream, err := p1.BHost.NewStream(t.Context(), p2.PeerID(), p2p.RPCDataColumnSidecarsByRootTopicV1)
|
||||
require.NoError(t, err)
|
||||
|
||||
const count = 4
|
||||
kzgCommitmentsInclusionProof := make([][]byte, 0, count)
|
||||
for range count {
|
||||
kzgCommitmentsInclusionProof = append(kzgCommitmentsInclusionProof, make([]byte, 32))
|
||||
}
|
||||
|
||||
// Success response code.
|
||||
_, err = stream.Write([]byte{responseCodeSuccess})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Fork digest.
|
||||
err = writeContextToStream([]byte{1, 2, 3, 4}, stream)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Sidecar.
|
||||
_, err = p1.Encoding().EncodeWithMaxLength(stream, ðpb.DataColumnSidecar{
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, fieldparams.RootLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
BodyRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
if util.WaitTimeout(&wg, time.Minute) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
p1, p2 := p2ptest.NewTestP2P(t), p2ptest.NewTestP2P(t)
|
||||
|
||||
const count = 4
|
||||
kzgCommitmentsInclusionProof := make([][]byte, 0, count)
|
||||
for range count {
|
||||
kzgCommitmentsInclusionProof = append(kzgCommitmentsInclusionProof, make([]byte, 32))
|
||||
}
|
||||
|
||||
expected := ðpb.DataColumnSidecar{
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, fieldparams.RootLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
BodyRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
p2.SetStreamHandler(p2p.RPCDataColumnSidecarsByRootTopicV1, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
|
||||
actual, err := readChunkedDataColumnSidecar(stream, p2, ContextByteVersions{[4]byte{1, 2, 3, 4}: version.Fulu})
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, expected, actual.DataColumnSidecar)
|
||||
})
|
||||
|
||||
p1.Connect(p2)
|
||||
|
||||
stream, err := p1.BHost.NewStream(t.Context(), p2.PeerID(), p2p.RPCDataColumnSidecarsByRootTopicV1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Success response code.
|
||||
_, err = stream.Write([]byte{responseCodeSuccess})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Fork digest.
|
||||
err = writeContextToStream([]byte{1, 2, 3, 4}, stream)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Sidecar.
|
||||
_, err = p1.Encoding().EncodeWithMaxLength(stream, expected)
|
||||
require.NoError(t, err)
|
||||
|
||||
if util.WaitTimeout(&wg, time.Minute) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -15,12 +15,10 @@ func TestMain(m *testing.M) {
|
||||
|
||||
resetFlags := flags.Get()
|
||||
flags.Init(&flags.GlobalFlags{
|
||||
BlockBatchLimit: 64,
|
||||
BlockBatchLimitBurstFactor: 10,
|
||||
BlobBatchLimit: 32,
|
||||
BlobBatchLimitBurstFactor: 2,
|
||||
DataColumnBatchLimit: 4096,
|
||||
DataColumnBatchLimitBurstFactor: 2,
|
||||
BlockBatchLimit: 64,
|
||||
BlockBatchLimitBurstFactor: 10,
|
||||
BlobBatchLimit: 32,
|
||||
BlobBatchLimitBurstFactor: 2,
|
||||
})
|
||||
defer func() {
|
||||
flags.Init(resetFlags)
|
||||
|
||||
@@ -30,16 +30,16 @@ var (
|
||||
ErrBlobIndexInvalid = errors.Join(ErrBlobInvalid, errors.New("incorrect blob sidecar index"))
|
||||
|
||||
// errFromFutureSlot means RequireSlotNotTooEarly failed.
|
||||
errFromFutureSlot = errors.New("slot is too far in the future")
|
||||
errFromFutureSlot = errors.Join(ErrBlobInvalid, errors.New("slot is too far in the future"))
|
||||
|
||||
// errSlotNotAfterFinalized means RequireSlotAboveFinalized failed.
|
||||
errSlotNotAfterFinalized = errors.New("slot <= finalized checkpoint")
|
||||
errSlotNotAfterFinalized = errors.Join(ErrBlobInvalid, errors.New("slot <= finalized checkpoint"))
|
||||
|
||||
// ErrInvalidProposerSignature means RequireValidProposerSignature failed.
|
||||
ErrInvalidProposerSignature = errors.Join(ErrBlobInvalid, errors.New("proposer signature could not be verified"))
|
||||
|
||||
// errSidecarParentNotSeen means RequireSidecarParentSeen failed.
|
||||
errSidecarParentNotSeen = errors.New("parent root has not been seen")
|
||||
errSidecarParentNotSeen = errors.Join(ErrBlobInvalid, errors.New("parent root has not been seen"))
|
||||
|
||||
// errSidecarParentInvalid means RequireSidecarParentValid failed.
|
||||
errSidecarParentInvalid = errors.Join(ErrBlobInvalid, errors.New("parent block is not valid"))
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
### Added
|
||||
|
||||
- New ssz-only flag for validator client to enable calling rest apis in SSZ, starting with get block endpoint.
|
||||
|
||||
### Changed
|
||||
|
||||
- when REST api is enabled the get Block api defaults to requesting and receiving SSZ instead of JSON, JSON is the fallback.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Added
|
||||
- Implement `dataColumnSidecarsByRangeRPCHandler`.
|
||||
@@ -1,9 +0,0 @@
|
||||
### Changed
|
||||
- In `TopicFromMessage`: Do not assume anymore that all Fulu specific topic are V3 only.
|
||||
- `readChunkedDataColumnSidecar`: Add `validationFunctions` parameter and add tests.
|
||||
|
||||
### Added
|
||||
- New `StatusV2` proto message.
|
||||
|
||||
### Removed
|
||||
- Unused `DataColumnIdentifier` proto message.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- Add native state diff type and marshalling functions
|
||||
5
changelog/rose2221-develop.md
Normal file
5
changelog/rose2221-develop.md
Normal file
@@ -0,0 +1,5 @@
|
||||
### Added
|
||||
|
||||
- **Gzip Compression for Beacon API:**
|
||||
Fixed an issue where the beacon chain server ignored the `Accept-Encoding: gzip` header and returned uncompressed JSON responses. With this change, endpoints that use the `AcceptHeaderHandler` now also compress responses when a client requests gzip encoding.
|
||||
Fixes [#14593](https://github.com/prysmaticlabs/prysm/issues/14593).
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- Remove "invalid" from logs for incoming blob sidecar that is missing parent or out of range slot
|
||||
@@ -212,18 +212,11 @@ var (
|
||||
Usage: "The factor by which blob batch limit may increase on burst.",
|
||||
Value: 3,
|
||||
}
|
||||
// DataColumnBatchLimit specifies the requested data column batch size.
|
||||
DataColumnBatchLimit = &cli.IntFlag{
|
||||
Name: "data-column-batch-limit",
|
||||
Usage: "The amount of data columns the local peer is bounded to request and respond to in a batch.",
|
||||
Value: 4096,
|
||||
}
|
||||
// DataColumnBatchLimitBurstFactor specifies the factor by which data column batch size may increase.
|
||||
DataColumnBatchLimitBurstFactor = &cli.IntFlag{
|
||||
Name: "data-column-batch-limit-burst-factor",
|
||||
Usage: "The factor by which data column batch limit may increase on burst.",
|
||||
Value: 2,
|
||||
}
|
||||
// DisableDebugRPCEndpoints disables the debug Beacon API namespace.
|
||||
DisableDebugRPCEndpoints = &cli.BoolFlag{
|
||||
Name: "disable-debug-rpc-endpoints",
|
||||
|
||||
@@ -8,17 +8,16 @@ import (
|
||||
// GlobalFlags specifies all the global flags for the
|
||||
// beacon node.
|
||||
type GlobalFlags struct {
|
||||
SubscribeToAllSubnets bool
|
||||
SubscribeAllDataSubnets bool
|
||||
MinimumSyncPeers int
|
||||
MinimumPeersPerSubnet int
|
||||
MaxConcurrentDials int
|
||||
BlockBatchLimit int
|
||||
BlockBatchLimitBurstFactor int
|
||||
BlobBatchLimit int
|
||||
BlobBatchLimitBurstFactor int
|
||||
DataColumnBatchLimit int
|
||||
DataColumnBatchLimitBurstFactor int
|
||||
SubscribeToAllSubnets bool
|
||||
SubscribeAllDataSubnets bool
|
||||
MinimumSyncPeers int
|
||||
MinimumPeersPerSubnet int
|
||||
MaxConcurrentDials int
|
||||
BlockBatchLimit int
|
||||
BlockBatchLimitBurstFactor int
|
||||
BlobBatchLimit int
|
||||
DataColumnBatchLimit int
|
||||
BlobBatchLimitBurstFactor int
|
||||
}
|
||||
|
||||
var globalConfig *GlobalFlags
|
||||
@@ -56,10 +55,8 @@ func ConfigureGlobalFlags(ctx *cli.Context) {
|
||||
cfg.BlobBatchLimit = ctx.Int(BlobBatchLimit.Name)
|
||||
cfg.BlobBatchLimitBurstFactor = ctx.Int(BlobBatchLimitBurstFactor.Name)
|
||||
cfg.DataColumnBatchLimit = ctx.Int(DataColumnBatchLimit.Name)
|
||||
cfg.DataColumnBatchLimitBurstFactor = ctx.Int(DataColumnBatchLimitBurstFactor.Name)
|
||||
cfg.MinimumPeersPerSubnet = ctx.Int(MinPeersPerSubnet.Name)
|
||||
cfg.MaxConcurrentDials = ctx.Int(MaxConcurrentDials.Name)
|
||||
|
||||
configureMinimumPeers(ctx, cfg)
|
||||
|
||||
Init(cfg)
|
||||
|
||||
@@ -52,8 +52,6 @@ type Flags struct {
|
||||
EnableExperimentalAttestationPool bool // EnableExperimentalAttestationPool enables an experimental attestation pool design.
|
||||
EnableDutiesV2 bool // EnableDutiesV2 sets validator client to use the get Duties V2 endpoint
|
||||
EnableWeb bool // EnableWeb enables the webui on the validator client
|
||||
SSZOnly bool // SSZOnly forces the validator client to use SSZ for communication with the beacon node when REST mode is enabled (useful for debugging)
|
||||
EnableStateDiff bool // EnableStateDiff enables the experimental state diff feature for the beacon node.
|
||||
// Logging related toggles.
|
||||
DisableGRPCConnectionLogs bool // Disables logging when a new grpc client has connected.
|
||||
EnableFullSSZDataLogging bool // Enables logging for full ssz data on rejected gossip messages
|
||||
@@ -278,10 +276,6 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
|
||||
logEnabled(enableExperimentalAttestationPool)
|
||||
cfg.EnableExperimentalAttestationPool = true
|
||||
}
|
||||
if ctx.IsSet(enableStateDiff.Name) {
|
||||
logEnabled(enableStateDiff)
|
||||
cfg.EnableStateDiff = true
|
||||
}
|
||||
if ctx.IsSet(forceHeadFlag.Name) {
|
||||
logEnabled(forceHeadFlag)
|
||||
cfg.ForceHead = ctx.String(forceHeadFlag.Name)
|
||||
@@ -350,11 +344,6 @@ func ConfigureValidator(ctx *cli.Context) error {
|
||||
logEnabled(EnableWebFlag)
|
||||
cfg.EnableWeb = true
|
||||
}
|
||||
if ctx.Bool(SSZOnly.Name) {
|
||||
logEnabled(SSZOnly)
|
||||
cfg.SSZOnly = true
|
||||
}
|
||||
|
||||
cfg.KeystoreImportDebounceInterval = ctx.Duration(dynamicKeyReloadDebounceInterval.Name)
|
||||
Init(cfg)
|
||||
return nil
|
||||
|
||||
@@ -176,10 +176,6 @@ var (
|
||||
Name: "enable-experimental-attestation-pool",
|
||||
Usage: "Enables an experimental attestation pool design.",
|
||||
}
|
||||
enableStateDiff = &cli.BoolFlag{
|
||||
Name: "enable-state-diff",
|
||||
Usage: "Enables the experimental state diff feature.",
|
||||
}
|
||||
// forceHeadFlag is a flag to force the head of the beacon chain to a specific block.
|
||||
forceHeadFlag = &cli.StringFlag{
|
||||
Name: "sync-from",
|
||||
@@ -205,12 +201,6 @@ var (
|
||||
Usage: "(Work in progress): Enables the web portal for the validator client.",
|
||||
Value: false,
|
||||
}
|
||||
|
||||
// SSZOnly forces the validator client to use SSZ for communication with the beacon node when REST mode is enabled
|
||||
SSZOnly = &cli.BoolFlag{
|
||||
Name: "ssz-only",
|
||||
Usage: "(debug): Forces the validator client to use SSZ for communication with the beacon node when REST mode is enabled",
|
||||
}
|
||||
)
|
||||
|
||||
// devModeFlags holds list of flags that are set when development mode is on.
|
||||
@@ -233,7 +223,6 @@ var ValidatorFlags = append(deprecatedFlags, []cli.Flag{
|
||||
EnableBeaconRESTApi,
|
||||
EnableDutiesV2,
|
||||
EnableWebFlag,
|
||||
SSZOnly,
|
||||
}...)
|
||||
|
||||
// E2EValidatorFlags contains a list of the validator feature flags to be tested in E2E.
|
||||
@@ -271,7 +260,6 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
|
||||
DisableQUIC,
|
||||
EnableDiscoveryReboot,
|
||||
enableExperimentalAttestationPool,
|
||||
enableStateDiff,
|
||||
forceHeadFlag,
|
||||
blacklistRoots,
|
||||
}, deprecatedBeaconFlags, deprecatedFlags, upcomingDeprecation)
|
||||
|
||||
@@ -14,7 +14,6 @@ go_library(
|
||||
"mainnet_config.go",
|
||||
"minimal_config.go",
|
||||
"network_config.go",
|
||||
"state_diff_config.go",
|
||||
"testnet_e2e_config.go",
|
||||
"testnet_holesky_config.go",
|
||||
"testnet_hoodi_config.go",
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
package params
|
||||
|
||||
var (
|
||||
stateHierarchyExponents = []uint64{21, 18, 16, 13, 11, 9, 5}
|
||||
)
|
||||
|
||||
func StateHierarchyExponents() []uint64 {
|
||||
return stateHierarchyExponents
|
||||
}
|
||||
@@ -40,12 +40,6 @@ func NewWrappedExecutionData(v proto.Message) (interfaces.ExecutionData, error)
|
||||
case *enginev1.ExecutionBundleElectra:
|
||||
// note: no payload changes in electra so using deneb
|
||||
return WrappedExecutionPayloadDeneb(pbStruct.Payload)
|
||||
case *enginev1.ExecutionPayloadHeader:
|
||||
return WrappedExecutionPayloadHeader(pbStruct)
|
||||
case *enginev1.ExecutionPayloadHeaderCapella:
|
||||
return WrappedExecutionPayloadHeaderCapella(pbStruct)
|
||||
case *enginev1.ExecutionPayloadHeaderDeneb:
|
||||
return WrappedExecutionPayloadHeaderDeneb(pbStruct)
|
||||
default:
|
||||
return nil, ErrUnsupportedVersion
|
||||
}
|
||||
|
||||
@@ -1,53 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["state_diff.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/consensus-types/hdiff",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/capella:go_default_library",
|
||||
"//beacon-chain/core/deneb:go_default_library",
|
||||
"//beacon-chain/core/electra:go_default_library",
|
||||
"//beacon-chain/core/execution:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/helpers:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"fuzz_test.go",
|
||||
"property_test.go",
|
||||
"security_test.go",
|
||||
"state_diff_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,491 +0,0 @@
|
||||
package hdiff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
// FuzzNewHdiff tests parsing variations of realistic diffs
|
||||
func FuzzNewHdiff(f *testing.F) {
|
||||
// Add seed corpus with various valid diffs from realistic scenarios
|
||||
sizes := []uint64{8, 16, 32}
|
||||
for _, size := range sizes {
|
||||
source, _ := util.DeterministicGenesisStateElectra(f, size)
|
||||
|
||||
// Create various realistic target states
|
||||
scenarios := []string{"slot_change", "balance_change", "validator_change", "multiple_changes"}
|
||||
for _, scenario := range scenarios {
|
||||
target := source.Copy()
|
||||
|
||||
switch scenario {
|
||||
case "slot_change":
|
||||
_ = target.SetSlot(source.Slot() + 1)
|
||||
case "balance_change":
|
||||
balances := target.Balances()
|
||||
if len(balances) > 0 {
|
||||
balances[0] += 1000000000
|
||||
_ = target.SetBalances(balances)
|
||||
}
|
||||
case "validator_change":
|
||||
validators := target.Validators()
|
||||
if len(validators) > 0 {
|
||||
validators[0].EffectiveBalance += 1000000000
|
||||
_ = target.SetValidators(validators)
|
||||
}
|
||||
case "multiple_changes":
|
||||
_ = target.SetSlot(source.Slot() + 5)
|
||||
balances := target.Balances()
|
||||
validators := target.Validators()
|
||||
if len(balances) > 0 && len(validators) > 0 {
|
||||
balances[0] += 2000000000
|
||||
validators[0].EffectiveBalance += 1000000000
|
||||
_ = target.SetBalances(balances)
|
||||
_ = target.SetValidators(validators)
|
||||
}
|
||||
}
|
||||
|
||||
validDiff, err := Diff(source, target)
|
||||
if err == nil {
|
||||
f.Add(validDiff.StateDiff, validDiff.ValidatorDiffs, validDiff.BalancesDiff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
f.Fuzz(func(t *testing.T, stateDiff, validatorDiffs, balancesDiff []byte) {
|
||||
// Limit input sizes to reasonable bounds
|
||||
if len(stateDiff) > 5000 || len(validatorDiffs) > 5000 || len(balancesDiff) > 5000 {
|
||||
return
|
||||
}
|
||||
|
||||
input := HdiffBytes{
|
||||
StateDiff: stateDiff,
|
||||
ValidatorDiffs: validatorDiffs,
|
||||
BalancesDiff: balancesDiff,
|
||||
}
|
||||
|
||||
// Test parsing - should not panic even with corrupted but bounded data
|
||||
_, err := newHdiff(input)
|
||||
_ = err // Expected to fail with corrupted data
|
||||
})
|
||||
}
|
||||
|
||||
// FuzzNewStateDiff tests the newStateDiff function with random compressed input
|
||||
func FuzzNewStateDiff(f *testing.F) {
|
||||
// Add seed corpus
|
||||
source, _ := util.DeterministicGenesisStateElectra(f, 16)
|
||||
target := source.Copy()
|
||||
_ = target.SetSlot(source.Slot() + 5)
|
||||
|
||||
diff, err := diffToState(source, target)
|
||||
if err == nil {
|
||||
serialized := diff.serialize()
|
||||
f.Add(serialized)
|
||||
}
|
||||
|
||||
// Add edge cases
|
||||
f.Add([]byte{})
|
||||
f.Add([]byte{0x01})
|
||||
f.Add([]byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07})
|
||||
|
||||
f.Fuzz(func(t *testing.T, data []byte) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Errorf("newStateDiff panicked: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
// Should never panic, only return error
|
||||
_, err := newStateDiff(data)
|
||||
_ = err
|
||||
})
|
||||
}
|
||||
|
||||
// FuzzNewValidatorDiffs tests validator diff deserialization
|
||||
func FuzzNewValidatorDiffs(f *testing.F) {
|
||||
// Add seed corpus
|
||||
source, _ := util.DeterministicGenesisStateElectra(f, 8)
|
||||
target := source.Copy()
|
||||
vals := target.Validators()
|
||||
if len(vals) > 0 {
|
||||
modifiedVal := ðpb.Validator{
|
||||
PublicKey: vals[0].PublicKey,
|
||||
WithdrawalCredentials: vals[0].WithdrawalCredentials,
|
||||
EffectiveBalance: vals[0].EffectiveBalance + 1000,
|
||||
Slashed: !vals[0].Slashed,
|
||||
ActivationEligibilityEpoch: vals[0].ActivationEligibilityEpoch,
|
||||
ActivationEpoch: vals[0].ActivationEpoch,
|
||||
ExitEpoch: vals[0].ExitEpoch,
|
||||
WithdrawableEpoch: vals[0].WithdrawableEpoch,
|
||||
}
|
||||
vals[0] = modifiedVal
|
||||
_ = target.SetValidators(vals)
|
||||
|
||||
// Create a simple diff for fuzzing - we'll just use raw bytes
|
||||
_, err := diffToVals(source, target)
|
||||
if err == nil {
|
||||
// Add some realistic validator diff bytes for the corpus
|
||||
f.Add([]byte{1, 0, 0, 0, 0, 0, 0, 0}) // Simple validator diff
|
||||
}
|
||||
}
|
||||
|
||||
// Add edge cases
|
||||
f.Add([]byte{})
|
||||
f.Add([]byte{0x01, 0x02, 0x03, 0x04})
|
||||
|
||||
f.Fuzz(func(t *testing.T, data []byte) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Errorf("newValidatorDiffs panicked: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
_, err := newValidatorDiffs(data)
|
||||
_ = err
|
||||
})
|
||||
}
|
||||
|
||||
// FuzzNewBalancesDiff tests balance diff deserialization
|
||||
func FuzzNewBalancesDiff(f *testing.F) {
|
||||
// Add seed corpus
|
||||
source, _ := util.DeterministicGenesisStateElectra(f, 8)
|
||||
target := source.Copy()
|
||||
balances := target.Balances()
|
||||
if len(balances) > 0 {
|
||||
balances[0] += 1000
|
||||
_ = target.SetBalances(balances)
|
||||
|
||||
// Create a simple diff for fuzzing - we'll just use raw bytes
|
||||
_, err := diffToBalances(source, target)
|
||||
if err == nil {
|
||||
// Add some realistic balance diff bytes for the corpus
|
||||
f.Add([]byte{1, 0, 0, 0, 0, 0, 0, 0}) // Simple balance diff
|
||||
}
|
||||
}
|
||||
|
||||
// Add edge cases
|
||||
f.Add([]byte{})
|
||||
f.Add([]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08})
|
||||
|
||||
f.Fuzz(func(t *testing.T, data []byte) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Errorf("newBalancesDiff panicked: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
_, err := newBalancesDiff(data)
|
||||
_ = err
|
||||
})
|
||||
}
|
||||
|
||||
// FuzzApplyDiff tests applying variations of valid diffs
|
||||
func FuzzApplyDiff(f *testing.F) {
|
||||
// Test with realistic state variations, not random data
|
||||
ctx := context.Background()
|
||||
|
||||
// Add seed corpus with various valid scenarios
|
||||
sizes := []uint64{8, 16, 32, 64}
|
||||
for _, size := range sizes {
|
||||
source, _ := util.DeterministicGenesisStateElectra(f, size)
|
||||
target := source.Copy()
|
||||
|
||||
// Different types of realistic changes
|
||||
scenarios := []func(){
|
||||
func() { _ = target.SetSlot(source.Slot() + 1) }, // Slot change
|
||||
func() { // Balance change
|
||||
balances := target.Balances()
|
||||
if len(balances) > 0 {
|
||||
balances[0] += 1000000000 // 1 ETH
|
||||
_ = target.SetBalances(balances)
|
||||
}
|
||||
},
|
||||
func() { // Validator change
|
||||
validators := target.Validators()
|
||||
if len(validators) > 0 {
|
||||
validators[0].EffectiveBalance += 1000000000
|
||||
_ = target.SetValidators(validators)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
for _, scenario := range scenarios {
|
||||
testTarget := source.Copy()
|
||||
scenario()
|
||||
|
||||
validDiff, err := Diff(source, testTarget)
|
||||
if err == nil {
|
||||
f.Add(validDiff.StateDiff, validDiff.ValidatorDiffs, validDiff.BalancesDiff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
f.Fuzz(func(t *testing.T, stateDiff, validatorDiffs, balancesDiff []byte) {
|
||||
// Only test with reasonable sized inputs
|
||||
if len(stateDiff) > 10000 || len(validatorDiffs) > 10000 || len(balancesDiff) > 10000 {
|
||||
return
|
||||
}
|
||||
|
||||
// Create fresh source state for each test
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 8)
|
||||
|
||||
diff := HdiffBytes{
|
||||
StateDiff: stateDiff,
|
||||
ValidatorDiffs: validatorDiffs,
|
||||
BalancesDiff: balancesDiff,
|
||||
}
|
||||
|
||||
// Apply diff - errors are expected for fuzzed data
|
||||
_, err := ApplyDiff(ctx, source, diff)
|
||||
_ = err // Expected to fail with invalid data
|
||||
})
|
||||
}
|
||||
|
||||
// FuzzReadPendingAttestation tests the pending attestation deserialization
|
||||
func FuzzReadPendingAttestation(f *testing.F) {
|
||||
// Add edge cases - this function is particularly vulnerable
|
||||
f.Add([]byte{})
|
||||
f.Add([]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}) // 8 bytes
|
||||
f.Add(make([]byte, 200)) // Larger than expected
|
||||
|
||||
// Add a case with large reported length
|
||||
largeLength := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(largeLength, 0xFFFFFFFF) // Large bits length
|
||||
f.Add(largeLength)
|
||||
|
||||
f.Fuzz(func(t *testing.T, data []byte) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Errorf("readPendingAttestation panicked: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
// Make a copy since the function modifies the slice
|
||||
dataCopy := make([]byte, len(data))
|
||||
copy(dataCopy, data)
|
||||
|
||||
_, err := readPendingAttestation(&dataCopy)
|
||||
_ = err
|
||||
})
|
||||
}
|
||||
|
||||
// FuzzKmpIndex tests the KMP algorithm implementation
|
||||
func FuzzKmpIndex(f *testing.F) {
|
||||
// Test with integer pointers to match the actual usage
|
||||
f.Add(0, "1,2,3", "1,2,3,4,5")
|
||||
f.Add(3, "1,2,3", "1,2,3,1,2,3")
|
||||
f.Add(0, "", "1,2,3")
|
||||
|
||||
f.Fuzz(func(t *testing.T, lens int, patternStr string, textStr string) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Errorf("kmpIndex panicked: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
// Parse comma-separated strings into int slices
|
||||
var pattern, text []int
|
||||
if patternStr != "" {
|
||||
for _, s := range strings.Split(patternStr, ",") {
|
||||
if val, err := strconv.Atoi(strings.TrimSpace(s)); err == nil {
|
||||
pattern = append(pattern, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
if textStr != "" {
|
||||
for _, s := range strings.Split(textStr, ",") {
|
||||
if val, err := strconv.Atoi(strings.TrimSpace(s)); err == nil {
|
||||
text = append(text, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to pointer slices as used in actual code
|
||||
patternPtrs := make([]*int, len(pattern))
|
||||
for i := range pattern {
|
||||
val := pattern[i]
|
||||
patternPtrs[i] = &val
|
||||
}
|
||||
|
||||
textPtrs := make([]*int, len(text))
|
||||
for i := range text {
|
||||
val := text[i]
|
||||
textPtrs[i] = &val
|
||||
}
|
||||
|
||||
integerEquals := func(a, b *int) bool {
|
||||
if a == nil && b == nil {
|
||||
return true
|
||||
}
|
||||
if a == nil || b == nil {
|
||||
return false
|
||||
}
|
||||
return *a == *b
|
||||
}
|
||||
|
||||
// Clamp lens to reasonable range to avoid infinite loops
|
||||
if lens < 0 {
|
||||
lens = 0
|
||||
}
|
||||
if lens > len(textPtrs) {
|
||||
lens = len(textPtrs)
|
||||
}
|
||||
|
||||
result := kmpIndex(lens, textPtrs, integerEquals)
|
||||
|
||||
// Basic sanity check
|
||||
if result < 0 || result > lens {
|
||||
t.Errorf("kmpIndex returned invalid result: %d for lens=%d", result, lens)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// FuzzComputeLPS tests the LPS computation for KMP
|
||||
func FuzzComputeLPS(f *testing.F) {
|
||||
// Add seed cases
|
||||
f.Add("1,2,1")
|
||||
f.Add("1,1,1")
|
||||
f.Add("1,2,3,4")
|
||||
f.Add("")
|
||||
|
||||
f.Fuzz(func(t *testing.T, patternStr string) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Errorf("computeLPS panicked: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
// Parse comma-separated string into int slice
|
||||
var pattern []int
|
||||
if patternStr != "" {
|
||||
for _, s := range strings.Split(patternStr, ",") {
|
||||
if val, err := strconv.Atoi(strings.TrimSpace(s)); err == nil {
|
||||
pattern = append(pattern, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to pointer slice
|
||||
patternPtrs := make([]*int, len(pattern))
|
||||
for i := range pattern {
|
||||
val := pattern[i]
|
||||
patternPtrs[i] = &val
|
||||
}
|
||||
|
||||
integerEquals := func(a, b *int) bool {
|
||||
if a == nil && b == nil {
|
||||
return true
|
||||
}
|
||||
if a == nil || b == nil {
|
||||
return false
|
||||
}
|
||||
return *a == *b
|
||||
}
|
||||
|
||||
result := computeLPS(patternPtrs, integerEquals)
|
||||
|
||||
// Verify result length matches input
|
||||
if len(result) != len(pattern) {
|
||||
t.Errorf("computeLPS returned wrong length: got %d, expected %d", len(result), len(pattern))
|
||||
}
|
||||
|
||||
// Verify all LPS values are non-negative and within bounds
|
||||
for i, lps := range result {
|
||||
if lps < 0 || lps > i {
|
||||
t.Errorf("Invalid LPS value at index %d: %d", i, lps)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// FuzzDiffToBalances tests balance diff computation
|
||||
func FuzzDiffToBalances(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, sourceData, targetData []byte) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Errorf("diffToBalances panicked: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
// Convert byte data to balance arrays
|
||||
var sourceBalances, targetBalances []uint64
|
||||
|
||||
// Parse source balances (8 bytes per uint64)
|
||||
for i := 0; i+7 < len(sourceData) && len(sourceBalances) < 100; i += 8 {
|
||||
balance := binary.LittleEndian.Uint64(sourceData[i : i+8])
|
||||
sourceBalances = append(sourceBalances, balance)
|
||||
}
|
||||
|
||||
// Parse target balances
|
||||
for i := 0; i+7 < len(targetData) && len(targetBalances) < 100; i += 8 {
|
||||
balance := binary.LittleEndian.Uint64(targetData[i : i+8])
|
||||
targetBalances = append(targetBalances, balance)
|
||||
}
|
||||
|
||||
// Create states with the provided balances
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
target, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
|
||||
if len(sourceBalances) > 0 {
|
||||
_ = source.SetBalances(sourceBalances)
|
||||
}
|
||||
if len(targetBalances) > 0 {
|
||||
_ = target.SetBalances(targetBalances)
|
||||
}
|
||||
|
||||
result, err := diffToBalances(source, target)
|
||||
|
||||
// If no error, verify result consistency
|
||||
if err == nil && len(result) > 0 {
|
||||
// Result length should match target length
|
||||
if len(result) != len(target.Balances()) {
|
||||
t.Errorf("diffToBalances result length mismatch: got %d, expected %d",
|
||||
len(result), len(target.Balances()))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// FuzzValidatorsEqual tests validator comparison
|
||||
func FuzzValidatorsEqual(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, data []byte) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Errorf("validatorsEqual panicked: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
// Create two validators and fuzz their fields
|
||||
if len(data) < 16 {
|
||||
return
|
||||
}
|
||||
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 2)
|
||||
validators := source.Validators()
|
||||
if len(validators) < 2 {
|
||||
return
|
||||
}
|
||||
|
||||
val1 := validators[0]
|
||||
val2 := validators[1]
|
||||
|
||||
// Modify validator fields based on fuzz data
|
||||
if len(data) > 0 && data[0]%2 == 0 {
|
||||
val2.EffectiveBalance = val1.EffectiveBalance + uint64(data[0])
|
||||
}
|
||||
if len(data) > 1 && data[1]%2 == 0 {
|
||||
val2.Slashed = !val1.Slashed
|
||||
}
|
||||
|
||||
// Create ReadOnlyValidator wrappers if needed
|
||||
// Since validatorsEqual expects ReadOnlyValidator interface,
|
||||
// we'll skip this test for now as it requires state wrapper implementation
|
||||
_ = val1
|
||||
_ = val2
|
||||
})
|
||||
}
|
||||
@@ -1,391 +0,0 @@
|
||||
package hdiff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
// PropertyTestRoundTrip verifies that diff->apply is idempotent with realistic data
|
||||
func FuzzPropertyRoundTrip(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, slotDelta uint64, balanceData []byte, validatorData []byte) {
|
||||
// Limit to realistic ranges
|
||||
if slotDelta > 32 { // Max one epoch
|
||||
slotDelta = slotDelta % 32
|
||||
}
|
||||
|
||||
// Convert byte data to realistic deltas and changes
|
||||
var balanceDeltas []int64
|
||||
var validatorChanges []bool
|
||||
|
||||
// Parse balance deltas - limit to realistic amounts (8 bytes per int64)
|
||||
for i := 0; i+7 < len(balanceData) && len(balanceDeltas) < 20; i += 8 {
|
||||
delta := int64(binary.LittleEndian.Uint64(balanceData[i : i+8]))
|
||||
// Keep deltas realistic (max 10 ETH change)
|
||||
if delta > 10000000000 {
|
||||
delta = delta % 10000000000
|
||||
}
|
||||
if delta < -10000000000 {
|
||||
delta = -((-delta) % 10000000000)
|
||||
}
|
||||
balanceDeltas = append(balanceDeltas, delta)
|
||||
}
|
||||
|
||||
// Parse validator changes (1 byte per bool) - limit to small number
|
||||
for i := 0; i < len(validatorData) && len(validatorChanges) < 10; i++ {
|
||||
validatorChanges = append(validatorChanges, validatorData[i]%2 == 0)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create source state with reasonable size
|
||||
validatorCount := uint64(len(validatorChanges) + 8) // Minimum 8 validators
|
||||
if validatorCount > 64 {
|
||||
validatorCount = 64 // Cap at 64 for performance
|
||||
}
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, validatorCount)
|
||||
|
||||
// Create target state with modifications
|
||||
target := source.Copy()
|
||||
|
||||
// Apply slot change
|
||||
_ = target.SetSlot(source.Slot() + primitives.Slot(slotDelta))
|
||||
|
||||
// Apply realistic balance changes
|
||||
if len(balanceDeltas) > 0 {
|
||||
balances := target.Balances()
|
||||
for i, delta := range balanceDeltas {
|
||||
if i >= len(balances) {
|
||||
break
|
||||
}
|
||||
// Apply realistic balance changes with safe bounds
|
||||
if delta < 0 {
|
||||
if uint64(-delta) > balances[i] {
|
||||
balances[i] = 0 // Can't go below 0
|
||||
} else {
|
||||
balances[i] -= uint64(-delta)
|
||||
}
|
||||
} else {
|
||||
// Cap at reasonable maximum (1000 ETH)
|
||||
maxBalance := uint64(1000000000000) // 1000 ETH in Gwei
|
||||
if balances[i]+uint64(delta) > maxBalance {
|
||||
balances[i] = maxBalance
|
||||
} else {
|
||||
balances[i] += uint64(delta)
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = target.SetBalances(balances)
|
||||
}
|
||||
|
||||
// Apply realistic validator changes
|
||||
if len(validatorChanges) > 0 {
|
||||
validators := target.Validators()
|
||||
for i, shouldChange := range validatorChanges {
|
||||
if i >= len(validators) {
|
||||
break
|
||||
}
|
||||
if shouldChange {
|
||||
// Make realistic changes - small effective balance adjustments
|
||||
validators[i].EffectiveBalance += 1000000000 // 1 ETH
|
||||
}
|
||||
}
|
||||
_ = target.SetValidators(validators)
|
||||
}
|
||||
|
||||
// Create diff
|
||||
diff, err := Diff(source, target)
|
||||
if err != nil {
|
||||
// If diff creation fails, that's acceptable for malformed inputs
|
||||
return
|
||||
}
|
||||
|
||||
// Apply diff
|
||||
result, err := ApplyDiff(ctx, source, diff)
|
||||
if err != nil {
|
||||
// If diff application fails, that's acceptable
|
||||
return
|
||||
}
|
||||
|
||||
// Verify round-trip property: source + diff = target
|
||||
require.Equal(t, target.Slot(), result.Slot())
|
||||
|
||||
// Verify balance consistency
|
||||
targetBalances := target.Balances()
|
||||
resultBalances := result.Balances()
|
||||
require.Equal(t, len(targetBalances), len(resultBalances))
|
||||
for i := range targetBalances {
|
||||
require.Equal(t, targetBalances[i], resultBalances[i], "Balance mismatch at index %d", i)
|
||||
}
|
||||
|
||||
// Verify validator consistency
|
||||
targetVals := target.Validators()
|
||||
resultVals := result.Validators()
|
||||
require.Equal(t, len(targetVals), len(resultVals))
|
||||
for i := range targetVals {
|
||||
require.Equal(t, targetVals[i].Slashed, resultVals[i].Slashed, "Validator slashing mismatch at index %d", i)
|
||||
require.Equal(t, targetVals[i].EffectiveBalance, resultVals[i].EffectiveBalance, "Validator balance mismatch at index %d", i)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// PropertyTestReasonablePerformance verifies operations complete quickly with realistic data
|
||||
func FuzzPropertyResourceBounds(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, validatorCount uint8, slotDelta uint8, changeCount uint8) {
|
||||
// Use realistic parameters
|
||||
validators := uint64(validatorCount%64 + 8) // 8-71 validators
|
||||
slots := uint64(slotDelta % 32) // 0-31 slots
|
||||
changes := int(changeCount % 10) // 0-9 changes
|
||||
|
||||
// Create realistic states
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, validators)
|
||||
target := source.Copy()
|
||||
|
||||
// Apply realistic changes
|
||||
_ = target.SetSlot(source.Slot() + primitives.Slot(slots))
|
||||
|
||||
if changes > 0 {
|
||||
validatorList := target.Validators()
|
||||
for i := 0; i < changes && i < len(validatorList); i++ {
|
||||
validatorList[i].EffectiveBalance += 1000000000 // 1 ETH
|
||||
}
|
||||
_ = target.SetValidators(validatorList)
|
||||
}
|
||||
|
||||
// Operations should complete quickly
|
||||
start := time.Now()
|
||||
diff, err := Diff(source, target)
|
||||
duration := time.Since(start)
|
||||
|
||||
if err == nil {
|
||||
// Should be fast
|
||||
require.Equal(t, true, duration < time.Second, "Diff creation too slow: %v", duration)
|
||||
|
||||
// Apply should also be fast
|
||||
start = time.Now()
|
||||
_, err = ApplyDiff(context.Background(), source, diff)
|
||||
duration = time.Since(start)
|
||||
|
||||
if err == nil {
|
||||
require.Equal(t, true, duration < time.Second, "Diff application too slow: %v", duration)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// PropertyTestDiffSize verifies that diffs are smaller than full states for typical cases
|
||||
func FuzzPropertyDiffEfficiency(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, slotDelta uint64, numChanges uint8) {
|
||||
if slotDelta > 100 {
|
||||
slotDelta = slotDelta % 100
|
||||
}
|
||||
if numChanges > 10 {
|
||||
numChanges = numChanges % 10
|
||||
}
|
||||
|
||||
// Create states with small differences
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 64)
|
||||
target := source.Copy()
|
||||
|
||||
_ = target.SetSlot(source.Slot() + primitives.Slot(slotDelta))
|
||||
|
||||
// Make a few small changes
|
||||
if numChanges > 0 {
|
||||
validators := target.Validators()
|
||||
for i := uint8(0); i < numChanges && int(i) < len(validators); i++ {
|
||||
validators[i].EffectiveBalance += 1000
|
||||
}
|
||||
_ = target.SetValidators(validators)
|
||||
}
|
||||
|
||||
// Create diff
|
||||
diff, err := Diff(source, target)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// For small changes, diff should be much smaller than full state
|
||||
sourceSSZ, err := source.MarshalSSZ()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
diffSize := len(diff.StateDiff) + len(diff.ValidatorDiffs) + len(diff.BalancesDiff)
|
||||
|
||||
// Diff should be smaller than full state for small changes
|
||||
if numChanges <= 5 && slotDelta <= 10 {
|
||||
require.Equal(t, true, diffSize < len(sourceSSZ)/2,
|
||||
"Diff size %d should be less than half of state size %d", diffSize, len(sourceSSZ))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// PropertyTestBalanceConservation verifies that balance operations don't create/destroy value unexpectedly
|
||||
func FuzzPropertyBalanceConservation(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, balanceData []byte) {
|
||||
// Convert byte data to balance changes
|
||||
var balanceChanges []int64
|
||||
for i := 0; i+7 < len(balanceData) && len(balanceChanges) < 50; i += 8 {
|
||||
change := int64(binary.LittleEndian.Uint64(balanceData[i : i+8]))
|
||||
balanceChanges = append(balanceChanges, change)
|
||||
}
|
||||
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, uint64(len(balanceChanges)+10))
|
||||
originalBalances := source.Balances()
|
||||
|
||||
// Calculate total before
|
||||
var totalBefore uint64
|
||||
for _, balance := range originalBalances {
|
||||
totalBefore += balance
|
||||
}
|
||||
|
||||
// Apply balance changes via diff system
|
||||
target := source.Copy()
|
||||
targetBalances := target.Balances()
|
||||
|
||||
var totalDelta int64
|
||||
for i, delta := range balanceChanges {
|
||||
if i >= len(targetBalances) {
|
||||
break
|
||||
}
|
||||
|
||||
// Prevent underflow
|
||||
if delta < 0 && uint64(-delta) > targetBalances[i] {
|
||||
totalDelta += int64(targetBalances[i]) // Lost amount
|
||||
targetBalances[i] = 0
|
||||
} else if delta < 0 {
|
||||
targetBalances[i] -= uint64(-delta)
|
||||
totalDelta += delta
|
||||
} else {
|
||||
// Prevent overflow
|
||||
if uint64(delta) > math.MaxUint64-targetBalances[i] {
|
||||
gained := math.MaxUint64 - targetBalances[i]
|
||||
totalDelta += int64(gained)
|
||||
targetBalances[i] = math.MaxUint64
|
||||
} else {
|
||||
targetBalances[i] += uint64(delta)
|
||||
totalDelta += delta
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = target.SetBalances(targetBalances)
|
||||
|
||||
// Apply through diff system
|
||||
diff, err := Diff(source, target)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
result, err := ApplyDiff(context.Background(), source, diff)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Calculate total after
|
||||
resultBalances := result.Balances()
|
||||
var totalAfter uint64
|
||||
for _, balance := range resultBalances {
|
||||
totalAfter += balance
|
||||
}
|
||||
|
||||
// Verify conservation (accounting for intended changes)
|
||||
expectedTotal := totalBefore
|
||||
if totalDelta >= 0 {
|
||||
expectedTotal += uint64(totalDelta)
|
||||
} else {
|
||||
if uint64(-totalDelta) <= expectedTotal {
|
||||
expectedTotal -= uint64(-totalDelta)
|
||||
} else {
|
||||
expectedTotal = 0
|
||||
}
|
||||
}
|
||||
|
||||
require.Equal(t, expectedTotal, totalAfter,
|
||||
"Balance conservation violated: before=%d, delta=%d, expected=%d, actual=%d",
|
||||
totalBefore, totalDelta, expectedTotal, totalAfter)
|
||||
})
|
||||
}
|
||||
|
||||
// PropertyTestMonotonicSlot verifies slot only increases
|
||||
func FuzzPropertyMonotonicSlot(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, slotDelta uint64) {
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 16)
|
||||
target := source.Copy()
|
||||
|
||||
targetSlot := source.Slot() + primitives.Slot(slotDelta)
|
||||
_ = target.SetSlot(targetSlot)
|
||||
|
||||
diff, err := Diff(source, target)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
result, err := ApplyDiff(context.Background(), source, diff)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Slot should never decrease
|
||||
require.Equal(t, true, result.Slot() >= source.Slot(),
|
||||
"Slot decreased from %d to %d", source.Slot(), result.Slot())
|
||||
|
||||
// Slot should match target
|
||||
require.Equal(t, targetSlot, result.Slot())
|
||||
})
|
||||
}
|
||||
|
||||
// PropertyTestValidatorIndexIntegrity verifies validator indices remain consistent
|
||||
func FuzzPropertyValidatorIndices(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, changeData []byte) {
|
||||
// Convert byte data to boolean changes
|
||||
var changes []bool
|
||||
for i := 0; i < len(changeData) && len(changes) < 20; i++ {
|
||||
changes = append(changes, changeData[i]%2 == 0)
|
||||
}
|
||||
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, uint64(len(changes)+5))
|
||||
target := source.Copy()
|
||||
|
||||
// Apply changes
|
||||
validators := target.Validators()
|
||||
for i, shouldChange := range changes {
|
||||
if i >= len(validators) {
|
||||
break
|
||||
}
|
||||
if shouldChange {
|
||||
validators[i].EffectiveBalance += 1000
|
||||
}
|
||||
}
|
||||
_ = target.SetValidators(validators)
|
||||
|
||||
diff, err := Diff(source, target)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
result, err := ApplyDiff(context.Background(), source, diff)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Validator count should not decrease
|
||||
require.Equal(t, true, len(result.Validators()) >= len(source.Validators()),
|
||||
"Validator count decreased from %d to %d", len(source.Validators()), len(result.Validators()))
|
||||
|
||||
// Public keys should be preserved for existing validators
|
||||
sourceVals := source.Validators()
|
||||
resultVals := result.Validators()
|
||||
for i := range sourceVals {
|
||||
if i < len(resultVals) {
|
||||
require.Equal(t, sourceVals[i].PublicKey, resultVals[i].PublicKey,
|
||||
"Public key changed at validator index %d", i)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,393 +0,0 @@
|
||||
package hdiff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
// TestIntegerOverflowProtection tests protection against balance overflow attacks
|
||||
func TestIntegerOverflowProtection(t *testing.T) {
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 8)
|
||||
|
||||
// Test balance overflow in diffToBalances - use realistic values
|
||||
t.Run("balance_diff_overflow", func(t *testing.T) {
|
||||
target := source.Copy()
|
||||
balances := target.Balances()
|
||||
|
||||
// Set high but realistic balance values (32 ETH in Gwei = 32e9)
|
||||
balances[0] = 32000000000 // 32 ETH
|
||||
balances[1] = 64000000000 // 64 ETH
|
||||
_ = target.SetBalances(balances)
|
||||
|
||||
// This should work fine with realistic values
|
||||
diffs, err := diffToBalances(source, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the diffs are reasonable
|
||||
require.Equal(t, true, len(diffs) > 0, "Should have balance diffs")
|
||||
})
|
||||
|
||||
// Test reasonable balance changes
|
||||
t.Run("realistic_balance_changes", func(t *testing.T) {
|
||||
// Create realistic balance changes (slashing, rewards)
|
||||
balancesDiff := []int64{1000000000, -500000000, 2000000000} // 1 ETH gain, 0.5 ETH loss, 2 ETH gain
|
||||
|
||||
// Apply to state with normal balances
|
||||
testSource := source.Copy()
|
||||
normalBalances := []uint64{32000000000, 32000000000, 32000000000} // 32 ETH each
|
||||
_ = testSource.SetBalances(normalBalances)
|
||||
|
||||
// This should work fine
|
||||
result, err := applyBalancesDiff(testSource, balancesDiff)
|
||||
require.NoError(t, err)
|
||||
|
||||
resultBalances := result.Balances()
|
||||
require.Equal(t, uint64(33000000000), resultBalances[0]) // 33 ETH
|
||||
require.Equal(t, uint64(31500000000), resultBalances[1]) // 31.5 ETH
|
||||
require.Equal(t, uint64(34000000000), resultBalances[2]) // 34 ETH
|
||||
})
|
||||
}
|
||||
|
||||
// TestReasonablePerformance tests that operations complete in reasonable time
|
||||
func TestReasonablePerformance(t *testing.T) {
|
||||
t.Run("large_state_performance", func(t *testing.T) {
|
||||
// Test with a large but realistic validator set
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 1000) // 1000 validators
|
||||
target := source.Copy()
|
||||
|
||||
// Make realistic changes
|
||||
_ = target.SetSlot(source.Slot() + 32) // One epoch
|
||||
validators := target.Validators()
|
||||
for i := 0; i < 100; i++ { // 10% of validators changed
|
||||
validators[i].EffectiveBalance += 1000000000 // 1 ETH change
|
||||
}
|
||||
_ = target.SetValidators(validators)
|
||||
|
||||
// Should complete quickly
|
||||
start := time.Now()
|
||||
diff, err := Diff(source, target)
|
||||
duration := time.Since(start)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, duration < time.Second, "Diff creation took too long: %v", duration)
|
||||
require.Equal(t, true, len(diff.StateDiff) > 0, "Should have state diff")
|
||||
})
|
||||
|
||||
t.Run("realistic_diff_application", func(t *testing.T) {
|
||||
// Test applying diffs to large states
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 500)
|
||||
target := source.Copy()
|
||||
_ = target.SetSlot(source.Slot() + 1)
|
||||
|
||||
// Create and apply diff
|
||||
diff, err := Diff(source, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
start := time.Now()
|
||||
result, err := ApplyDiff(context.Background(), source, diff)
|
||||
duration := time.Since(start)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, target.Slot(), result.Slot())
|
||||
require.Equal(t, true, duration < time.Second, "Diff application took too long: %v", duration)
|
||||
})
|
||||
}
|
||||
|
||||
// TestStateTransitionValidation tests realistic state transition scenarios
|
||||
func TestStateTransitionValidation(t *testing.T) {
|
||||
t.Run("validator_slashing_scenario", func(t *testing.T) {
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 10)
|
||||
target := source.Copy()
|
||||
|
||||
// Simulate validator slashing (realistic scenario)
|
||||
validators := target.Validators()
|
||||
validators[0].Slashed = true
|
||||
validators[0].EffectiveBalance = 0 // Slashed validator loses balance
|
||||
_ = target.SetValidators(validators)
|
||||
|
||||
// This should work fine
|
||||
diff, err := Diff(source, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
result, err := ApplyDiff(context.Background(), source, diff)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, result.Validators()[0].Slashed)
|
||||
require.Equal(t, uint64(0), result.Validators()[0].EffectiveBalance)
|
||||
})
|
||||
|
||||
t.Run("epoch_transition_scenario", func(t *testing.T) {
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 64)
|
||||
target := source.Copy()
|
||||
|
||||
// Simulate epoch transition with multiple changes
|
||||
_ = target.SetSlot(source.Slot() + 32) // One epoch
|
||||
|
||||
// Some validators get rewards, others get penalties
|
||||
balances := target.Balances()
|
||||
for i := 0; i < len(balances); i++ {
|
||||
if i%2 == 0 {
|
||||
balances[i] += 100000000 // 0.1 ETH reward
|
||||
} else {
|
||||
if balances[i] > 50000000 {
|
||||
balances[i] -= 50000000 // 0.05 ETH penalty
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = target.SetBalances(balances)
|
||||
|
||||
// This should work smoothly
|
||||
diff, err := Diff(source, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
result, err := ApplyDiff(context.Background(), source, diff)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, target.Slot(), result.Slot())
|
||||
})
|
||||
|
||||
t.Run("consistent_state_root", func(t *testing.T) {
|
||||
// Test that diffs preserve state consistency
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 32)
|
||||
target := source.Copy()
|
||||
|
||||
// Make minimal changes
|
||||
_ = target.SetSlot(source.Slot() + 1)
|
||||
|
||||
// Diff and apply should be consistent
|
||||
diff, err := Diff(source, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
result, err := ApplyDiff(context.Background(), source, diff)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Result should match target
|
||||
require.Equal(t, target.Slot(), result.Slot())
|
||||
require.Equal(t, len(target.Validators()), len(result.Validators()))
|
||||
require.Equal(t, len(target.Balances()), len(result.Balances()))
|
||||
})
|
||||
}
|
||||
|
||||
// TestSerializationRoundTrip tests serialization consistency
|
||||
func TestSerializationRoundTrip(t *testing.T) {
|
||||
t.Run("diff_serialization_consistency", func(t *testing.T) {
|
||||
// Test that serialization and deserialization are consistent
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 16)
|
||||
target := source.Copy()
|
||||
|
||||
// Make changes
|
||||
_ = target.SetSlot(source.Slot() + 5)
|
||||
validators := target.Validators()
|
||||
validators[0].EffectiveBalance += 1000000000
|
||||
_ = target.SetValidators(validators)
|
||||
|
||||
// Create diff
|
||||
diff1, err := Diff(source, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Deserialize and re-serialize
|
||||
hdiff, err := newHdiff(diff1)
|
||||
require.NoError(t, err)
|
||||
|
||||
diff2 := hdiff.serialize()
|
||||
|
||||
// Apply both diffs - should get same result
|
||||
result1, err := ApplyDiff(context.Background(), source, diff1)
|
||||
require.NoError(t, err)
|
||||
|
||||
result2, err := ApplyDiff(context.Background(), source, diff2)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, result1.Slot(), result2.Slot())
|
||||
require.Equal(t, result1.Validators()[0].EffectiveBalance, result2.Validators()[0].EffectiveBalance)
|
||||
})
|
||||
|
||||
t.Run("empty_diff_handling", func(t *testing.T) {
|
||||
// Test that empty diffs are handled correctly
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 8)
|
||||
target := source.Copy() // No changes
|
||||
|
||||
// Should create minimal diff
|
||||
diff, err := Diff(source, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Apply should work and return equivalent state
|
||||
result, err := ApplyDiff(context.Background(), source, diff)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, source.Slot(), result.Slot())
|
||||
require.Equal(t, len(source.Validators()), len(result.Validators()))
|
||||
})
|
||||
|
||||
t.Run("compression_efficiency", func(t *testing.T) {
|
||||
// Test that compression is working effectively
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 100)
|
||||
target := source.Copy()
|
||||
|
||||
// Make small changes
|
||||
_ = target.SetSlot(source.Slot() + 1)
|
||||
validators := target.Validators()
|
||||
validators[0].EffectiveBalance += 1000000000
|
||||
_ = target.SetValidators(validators)
|
||||
|
||||
// Create diff
|
||||
diff, err := Diff(source, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get full state size
|
||||
fullStateSSZ, err := target.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Diff should be much smaller than full state
|
||||
diffSize := len(diff.StateDiff) + len(diff.ValidatorDiffs) + len(diff.BalancesDiff)
|
||||
require.Equal(t, true, diffSize < len(fullStateSSZ)/2,
|
||||
"Diff should be smaller than full state: diff=%d, full=%d", diffSize, len(fullStateSSZ))
|
||||
})
|
||||
}
|
||||
|
||||
// TestKMPSecurity tests the KMP algorithm for security issues
|
||||
func TestKMPSecurity(t *testing.T) {
|
||||
t.Run("nil_pointer_handling", func(t *testing.T) {
|
||||
// Test with nil pointers in the pattern/text
|
||||
pattern := []*int{nil, nil, nil}
|
||||
text := []*int{nil, nil, nil, nil, nil}
|
||||
|
||||
equals := func(a, b *int) bool {
|
||||
if a == nil && b == nil {
|
||||
return true
|
||||
}
|
||||
if a == nil || b == nil {
|
||||
return false
|
||||
}
|
||||
return *a == *b
|
||||
}
|
||||
|
||||
// Should not panic - result can be any integer
|
||||
result := kmpIndex(len(pattern), text, equals)
|
||||
_ = result // Any result is valid, just ensure no panic
|
||||
})
|
||||
|
||||
t.Run("empty_pattern_edge_case", func(t *testing.T) {
|
||||
var pattern []*int
|
||||
text := []*int{new(int), new(int)}
|
||||
|
||||
equals := func(a, b *int) bool { return a == b }
|
||||
|
||||
result := kmpIndex(0, text, equals)
|
||||
require.Equal(t, 0, result, "Empty pattern should return 0")
|
||||
_ = pattern // Silence unused variable warning
|
||||
})
|
||||
|
||||
t.Run("realistic_pattern_performance", func(t *testing.T) {
|
||||
// Test with realistic sizes to ensure good performance
|
||||
realisticSize := 100 // More realistic for validator arrays
|
||||
pattern := make([]*int, realisticSize)
|
||||
text := make([]*int, realisticSize*2)
|
||||
|
||||
// Create realistic pattern
|
||||
for i := range pattern {
|
||||
val := i % 10 // More variation
|
||||
pattern[i] = &val
|
||||
}
|
||||
for i := range text {
|
||||
val := i % 10
|
||||
text[i] = &val
|
||||
}
|
||||
|
||||
equals := func(a, b *int) bool {
|
||||
if a == nil && b == nil {
|
||||
return true
|
||||
}
|
||||
if a == nil || b == nil {
|
||||
return false
|
||||
}
|
||||
return *a == *b
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
result := kmpIndex(len(pattern), text, equals)
|
||||
duration := time.Since(start)
|
||||
|
||||
// Should complete quickly with realistic inputs
|
||||
require.Equal(t, true, duration < time.Second,
|
||||
"KMP took too long: %v", duration)
|
||||
_ = result // Any result is valid, just ensure performance is good
|
||||
})
|
||||
}
|
||||
|
||||
// TestConcurrencySafety tests thread safety of the hdiff operations
|
||||
func TestConcurrencySafety(t *testing.T) {
|
||||
t.Run("concurrent_diff_creation", func(t *testing.T) {
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 32)
|
||||
target := source.Copy()
|
||||
_ = target.SetSlot(source.Slot() + 1)
|
||||
|
||||
const numGoroutines = 10
|
||||
const iterations = 100
|
||||
|
||||
var wg sync.WaitGroup
|
||||
errors := make(chan error, numGoroutines*iterations)
|
||||
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func(workerID int) {
|
||||
defer wg.Done()
|
||||
|
||||
for j := 0; j < iterations; j++ {
|
||||
_, err := Diff(source, target)
|
||||
if err != nil {
|
||||
errors <- fmt.Errorf("worker %d iteration %d: %v", workerID, j, err)
|
||||
}
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errors)
|
||||
|
||||
// Check for any errors
|
||||
for err := range errors {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("concurrent_diff_application", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 16)
|
||||
target := source.Copy()
|
||||
_ = target.SetSlot(source.Slot() + 5)
|
||||
|
||||
diff, err := Diff(source, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
const numGoroutines = 10
|
||||
var wg sync.WaitGroup
|
||||
errors := make(chan error, numGoroutines)
|
||||
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func(workerID int) {
|
||||
defer wg.Done()
|
||||
|
||||
// Each goroutine needs its own copy of the source state
|
||||
localSource := source.Copy()
|
||||
_, err := ApplyDiff(ctx, localSource, diff)
|
||||
if err != nil {
|
||||
errors <- fmt.Errorf("worker %d: %v", workerID, err)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errors)
|
||||
|
||||
// Check for any errors
|
||||
for err := range errors {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,9 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["comparisons.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/consensus-types/helpers",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//proto/prysm/v1alpha1:go_default_library"],
|
||||
)
|
||||
@@ -1,109 +0,0 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func ForksEqual(s, t *ethpb.Fork) bool {
|
||||
if s == nil && t == nil {
|
||||
return true
|
||||
}
|
||||
if s == nil || t == nil {
|
||||
return false
|
||||
}
|
||||
if s.Epoch != t.Epoch {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(s.PreviousVersion, t.PreviousVersion) {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(s.CurrentVersion, t.CurrentVersion)
|
||||
}
|
||||
|
||||
func BlockHeadersEqual(s, t *ethpb.BeaconBlockHeader) bool {
|
||||
if s == nil && t == nil {
|
||||
return true
|
||||
}
|
||||
if s == nil || t == nil {
|
||||
return false
|
||||
}
|
||||
if s.Slot != t.Slot {
|
||||
return false
|
||||
}
|
||||
if s.ProposerIndex != t.ProposerIndex {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(s.ParentRoot, t.ParentRoot) {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(s.StateRoot, t.StateRoot) {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(s.BodyRoot, t.BodyRoot)
|
||||
}
|
||||
|
||||
func Eth1DataEqual(s, t *ethpb.Eth1Data) bool {
|
||||
if s == nil && t == nil {
|
||||
return true
|
||||
}
|
||||
if s == nil || t == nil {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(s.DepositRoot, t.DepositRoot) {
|
||||
return false
|
||||
}
|
||||
if s.DepositCount != t.DepositCount {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(s.BlockHash, t.BlockHash)
|
||||
}
|
||||
|
||||
func PendingDepositsEqual(s, t *ethpb.PendingDeposit) bool {
|
||||
if s == nil && t == nil {
|
||||
return true
|
||||
}
|
||||
if s == nil || t == nil {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(s.PublicKey, t.PublicKey) {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(s.WithdrawalCredentials, t.WithdrawalCredentials) {
|
||||
return false
|
||||
}
|
||||
if s.Amount != t.Amount {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(s.Signature, t.Signature) {
|
||||
return false
|
||||
}
|
||||
return s.Slot == t.Slot
|
||||
}
|
||||
|
||||
func PendingPartialWithdrawalsEqual(s, t *ethpb.PendingPartialWithdrawal) bool {
|
||||
if s == nil && t == nil {
|
||||
return true
|
||||
}
|
||||
if s == nil || t == nil {
|
||||
return false
|
||||
}
|
||||
if s.Index != t.Index {
|
||||
return false
|
||||
}
|
||||
if s.Amount != t.Amount {
|
||||
return false
|
||||
}
|
||||
return s.WithdrawableEpoch == t.WithdrawableEpoch
|
||||
}
|
||||
|
||||
func PendingConsolidationsEqual(s, t *ethpb.PendingConsolidation) bool {
|
||||
if s == nil && t == nil {
|
||||
return true
|
||||
}
|
||||
if s == nil || t == nil {
|
||||
return false
|
||||
}
|
||||
return s.SourceIndex == t.SourceIndex && s.TargetIndex == t.TargetIndex
|
||||
}
|
||||
@@ -2,13 +2,14 @@
|
||||
# Common
|
||||
##############################################################################
|
||||
|
||||
load("@rules_proto//proto:defs.bzl", "proto_library")
|
||||
|
||||
##############################################################################
|
||||
# Go
|
||||
##############################################################################
|
||||
# gazelle:ignore
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
|
||||
load("@rules_proto//proto:defs.bzl", "proto_library")
|
||||
load("//proto:ssz_proto_library.bzl", "ssz_proto_files")
|
||||
load("//tools:ssz.bzl", "SSZ_DEPS", "ssz_gen_marshal")
|
||||
|
||||
@@ -188,7 +189,6 @@ ssz_fulu_objs = [
|
||||
"DataColumnIdentifier",
|
||||
"DataColumnsByRootIdentifier",
|
||||
"DataColumnSidecar",
|
||||
"StatusV2",
|
||||
"SignedBeaconBlockContentsFulu",
|
||||
"SignedBeaconBlockFulu",
|
||||
"SignedBlindedBeaconBlockFulu",
|
||||
@@ -359,17 +359,15 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = SSZ_DEPS + [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/ext:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_golang_protobuf//proto:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_golang_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library", # keep
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@googleapis//google/api:annotations_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:descriptor_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
|
||||
@@ -384,6 +382,8 @@ go_library(
|
||||
"@org_golang_google_protobuf//runtime/protoimpl:go_default_library",
|
||||
"@org_golang_google_protobuf//types/descriptorpb:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -5,12 +5,6 @@ import (
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
)
|
||||
|
||||
// GenericConverter defines any struct that can be converted to a generic beacon block.
|
||||
// We assume all your versioned block structs implement this method.
|
||||
type GenericConverter interface {
|
||||
ToGeneric() (*GenericBeaconBlock, error)
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Phase 0
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
125
proto/prysm/v1alpha1/data_columns.pb.go
generated
125
proto/prysm/v1alpha1/data_columns.pb.go
generated
@@ -109,6 +109,61 @@ func (x *DataColumnSidecar) GetKzgCommitmentsInclusionProof() [][]byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
type DataColumnIdentifier struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
BlockRoot []byte `protobuf:"bytes,1,opt,name=block_root,json=blockRoot,proto3" json:"block_root,omitempty" ssz-size:"32"`
|
||||
Index uint64 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DataColumnIdentifier) Reset() {
|
||||
*x = DataColumnIdentifier{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_prysm_v1alpha1_data_columns_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DataColumnIdentifier) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DataColumnIdentifier) ProtoMessage() {}
|
||||
|
||||
func (x *DataColumnIdentifier) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_data_columns_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DataColumnIdentifier.ProtoReflect.Descriptor instead.
|
||||
func (*DataColumnIdentifier) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_data_columns_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *DataColumnIdentifier) GetBlockRoot() []byte {
|
||||
if x != nil {
|
||||
return x.BlockRoot
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *DataColumnIdentifier) GetIndex() uint64 {
|
||||
if x != nil {
|
||||
return x.Index
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type DataColumnsByRootIdentifier struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
@@ -121,7 +176,7 @@ type DataColumnsByRootIdentifier struct {
|
||||
func (x *DataColumnsByRootIdentifier) Reset() {
|
||||
*x = DataColumnsByRootIdentifier{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_prysm_v1alpha1_data_columns_proto_msgTypes[1]
|
||||
mi := &file_proto_prysm_v1alpha1_data_columns_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -134,7 +189,7 @@ func (x *DataColumnsByRootIdentifier) String() string {
|
||||
func (*DataColumnsByRootIdentifier) ProtoMessage() {}
|
||||
|
||||
func (x *DataColumnsByRootIdentifier) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_data_columns_proto_msgTypes[1]
|
||||
mi := &file_proto_prysm_v1alpha1_data_columns_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -147,7 +202,7 @@ func (x *DataColumnsByRootIdentifier) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use DataColumnsByRootIdentifier.ProtoReflect.Descriptor instead.
|
||||
func (*DataColumnsByRootIdentifier) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_data_columns_proto_rawDescGZIP(), []int{1}
|
||||
return file_proto_prysm_v1alpha1_data_columns_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *DataColumnsByRootIdentifier) GetBlockRoot() []byte {
|
||||
@@ -198,24 +253,29 @@ var file_proto_prysm_v1alpha1_data_columns_proto_rawDesc = []byte{
|
||||
0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x06, 0x20,
|
||||
0x03, 0x28, 0x0c, 0x42, 0x08, 0x8a, 0xb5, 0x18, 0x04, 0x34, 0x2c, 0x33, 0x32, 0x52, 0x1c, 0x6b,
|
||||
0x7a, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x49, 0x6e, 0x63,
|
||||
0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0x67, 0x0a, 0x1b, 0x44,
|
||||
0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x42, 0x79, 0x52, 0x6f, 0x6f, 0x74,
|
||||
0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c,
|
||||
0x6f, 0x63, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06,
|
||||
0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6f, 0x6f,
|
||||
0x74, 0x12, 0x21, 0x0a, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03,
|
||||
0x28, 0x04, 0x42, 0x07, 0x92, 0xb5, 0x18, 0x03, 0x31, 0x32, 0x38, 0x52, 0x07, 0x63, 0x6f, 0x6c,
|
||||
0x75, 0x6d, 0x6e, 0x73, 0x42, 0x9a, 0x01, 0x0a, 0x19, 0x6f, 0x72, 0x67, 0x2e, 0x65, 0x74, 0x68,
|
||||
0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
|
||||
0x61, 0x31, 0x42, 0x10, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x50,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
|
||||
0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f,
|
||||
0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70,
|
||||
0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x74,
|
||||
0x68, 0xaa, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x74, 0x68,
|
||||
0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65,
|
||||
0x72, 0x65, 0x75, 0x6d, 0x5c, 0x45, 0x74, 0x68, 0x5c, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
|
||||
0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0x53, 0x0a, 0x14, 0x44,
|
||||
0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66,
|
||||
0x69, 0x65, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x72, 0x6f, 0x6f,
|
||||
0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52,
|
||||
0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e,
|
||||
0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78,
|
||||
0x22, 0x67, 0x0a, 0x1b, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x42,
|
||||
0x79, 0x52, 0x6f, 0x6f, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12,
|
||||
0x25, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, 0x62, 0x6c, 0x6f,
|
||||
0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x21, 0x0a, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e,
|
||||
0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x04, 0x42, 0x07, 0x92, 0xb5, 0x18, 0x03, 0x31, 0x32, 0x38,
|
||||
0x52, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x42, 0x9a, 0x01, 0x0a, 0x19, 0x6f, 0x72,
|
||||
0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76,
|
||||
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x10, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c,
|
||||
0x75, 0x6d, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74,
|
||||
0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e,
|
||||
0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
|
||||
0x61, 0x31, 0x3b, 0x65, 0x74, 0x68, 0xaa, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75,
|
||||
0x6d, 0x2e, 0x45, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02,
|
||||
0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c, 0x45, 0x74, 0x68, 0x5c, 0x76, 0x31,
|
||||
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -230,14 +290,15 @@ func file_proto_prysm_v1alpha1_data_columns_proto_rawDescGZIP() []byte {
|
||||
return file_proto_prysm_v1alpha1_data_columns_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_proto_prysm_v1alpha1_data_columns_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_proto_prysm_v1alpha1_data_columns_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
|
||||
var file_proto_prysm_v1alpha1_data_columns_proto_goTypes = []interface{}{
|
||||
(*DataColumnSidecar)(nil), // 0: ethereum.eth.v1alpha1.DataColumnSidecar
|
||||
(*DataColumnsByRootIdentifier)(nil), // 1: ethereum.eth.v1alpha1.DataColumnsByRootIdentifier
|
||||
(*SignedBeaconBlockHeader)(nil), // 2: ethereum.eth.v1alpha1.SignedBeaconBlockHeader
|
||||
(*DataColumnIdentifier)(nil), // 1: ethereum.eth.v1alpha1.DataColumnIdentifier
|
||||
(*DataColumnsByRootIdentifier)(nil), // 2: ethereum.eth.v1alpha1.DataColumnsByRootIdentifier
|
||||
(*SignedBeaconBlockHeader)(nil), // 3: ethereum.eth.v1alpha1.SignedBeaconBlockHeader
|
||||
}
|
||||
var file_proto_prysm_v1alpha1_data_columns_proto_depIdxs = []int32{
|
||||
2, // 0: ethereum.eth.v1alpha1.DataColumnSidecar.signed_block_header:type_name -> ethereum.eth.v1alpha1.SignedBeaconBlockHeader
|
||||
3, // 0: ethereum.eth.v1alpha1.DataColumnSidecar.signed_block_header:type_name -> ethereum.eth.v1alpha1.SignedBeaconBlockHeader
|
||||
1, // [1:1] is the sub-list for method output_type
|
||||
1, // [1:1] is the sub-list for method input_type
|
||||
1, // [1:1] is the sub-list for extension type_name
|
||||
@@ -265,6 +326,18 @@ func file_proto_prysm_v1alpha1_data_columns_proto_init() {
|
||||
}
|
||||
}
|
||||
file_proto_prysm_v1alpha1_data_columns_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DataColumnIdentifier); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_proto_prysm_v1alpha1_data_columns_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DataColumnsByRootIdentifier); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
@@ -283,7 +356,7 @@ func file_proto_prysm_v1alpha1_data_columns_proto_init() {
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_proto_prysm_v1alpha1_data_columns_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 2,
|
||||
NumMessages: 3,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
|
||||
@@ -45,6 +45,11 @@ message DataColumnSidecar {
|
||||
"kzg_commitments_inclusion_proof_depth.size,32" ];
|
||||
}
|
||||
|
||||
message DataColumnIdentifier {
|
||||
bytes block_root = 1 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
uint64 index = 2;
|
||||
}
|
||||
|
||||
message DataColumnsByRootIdentifier {
|
||||
bytes block_root = 1 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
repeated uint64 columns = 2 [ (ethereum.eth.ext.ssz_max) = "128" ];
|
||||
|
||||
@@ -2246,6 +2246,77 @@ func (d *DataColumnSidecar) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalSSZ ssz marshals the DataColumnIdentifier object
|
||||
func (d *DataColumnIdentifier) MarshalSSZ() ([]byte, error) {
|
||||
return ssz.MarshalSSZ(d)
|
||||
}
|
||||
|
||||
// MarshalSSZTo ssz marshals the DataColumnIdentifier object to a target array
|
||||
func (d *DataColumnIdentifier) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
|
||||
// Field (0) 'BlockRoot'
|
||||
if size := len(d.BlockRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.BlockRoot", size, 32)
|
||||
return
|
||||
}
|
||||
dst = append(dst, d.BlockRoot...)
|
||||
|
||||
// Field (1) 'Index'
|
||||
dst = ssz.MarshalUint64(dst, d.Index)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalSSZ ssz unmarshals the DataColumnIdentifier object
|
||||
func (d *DataColumnIdentifier) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size != 40 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
// Field (0) 'BlockRoot'
|
||||
if cap(d.BlockRoot) == 0 {
|
||||
d.BlockRoot = make([]byte, 0, len(buf[0:32]))
|
||||
}
|
||||
d.BlockRoot = append(d.BlockRoot, buf[0:32]...)
|
||||
|
||||
// Field (1) 'Index'
|
||||
d.Index = ssz.UnmarshallUint64(buf[32:40])
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the DataColumnIdentifier object
|
||||
func (d *DataColumnIdentifier) SizeSSZ() (size int) {
|
||||
size = 40
|
||||
return
|
||||
}
|
||||
|
||||
// HashTreeRoot ssz hashes the DataColumnIdentifier object
|
||||
func (d *DataColumnIdentifier) HashTreeRoot() ([32]byte, error) {
|
||||
return ssz.HashWithDefaultHasher(d)
|
||||
}
|
||||
|
||||
// HashTreeRootWith ssz hashes the DataColumnIdentifier object with a hasher
|
||||
func (d *DataColumnIdentifier) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
indx := hh.Index()
|
||||
|
||||
// Field (0) 'BlockRoot'
|
||||
if size := len(d.BlockRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.BlockRoot", size, 32)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(d.BlockRoot)
|
||||
|
||||
// Field (1) 'Index'
|
||||
hh.PutUint64(d.Index)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalSSZ ssz marshals the DataColumnsByRootIdentifier object
|
||||
func (d *DataColumnsByRootIdentifier) MarshalSSZ() ([]byte, error) {
|
||||
return ssz.MarshalSSZ(d)
|
||||
@@ -2365,132 +2436,3 @@ func (d *DataColumnsByRootIdentifier) HashTreeRootWith(hh *ssz.Hasher) (err erro
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalSSZ ssz marshals the StatusV2 object
|
||||
func (s *StatusV2) MarshalSSZ() ([]byte, error) {
|
||||
return ssz.MarshalSSZ(s)
|
||||
}
|
||||
|
||||
// MarshalSSZTo ssz marshals the StatusV2 object to a target array
|
||||
func (s *StatusV2) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
|
||||
// Field (0) 'ForkDigest'
|
||||
if size := len(s.ForkDigest); size != 4 {
|
||||
err = ssz.ErrBytesLengthFn("--.ForkDigest", size, 4)
|
||||
return
|
||||
}
|
||||
dst = append(dst, s.ForkDigest...)
|
||||
|
||||
// Field (1) 'FinalizedRoot'
|
||||
if size := len(s.FinalizedRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.FinalizedRoot", size, 32)
|
||||
return
|
||||
}
|
||||
dst = append(dst, s.FinalizedRoot...)
|
||||
|
||||
// Field (2) 'FinalizedEpoch'
|
||||
dst = ssz.MarshalUint64(dst, uint64(s.FinalizedEpoch))
|
||||
|
||||
// Field (3) 'HeadRoot'
|
||||
if size := len(s.HeadRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.HeadRoot", size, 32)
|
||||
return
|
||||
}
|
||||
dst = append(dst, s.HeadRoot...)
|
||||
|
||||
// Field (4) 'HeadSlot'
|
||||
dst = ssz.MarshalUint64(dst, uint64(s.HeadSlot))
|
||||
|
||||
// Field (5) 'EarliestAvailableSlot'
|
||||
dst = ssz.MarshalUint64(dst, uint64(s.EarliestAvailableSlot))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalSSZ ssz unmarshals the StatusV2 object
|
||||
func (s *StatusV2) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size != 92 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
// Field (0) 'ForkDigest'
|
||||
if cap(s.ForkDigest) == 0 {
|
||||
s.ForkDigest = make([]byte, 0, len(buf[0:4]))
|
||||
}
|
||||
s.ForkDigest = append(s.ForkDigest, buf[0:4]...)
|
||||
|
||||
// Field (1) 'FinalizedRoot'
|
||||
if cap(s.FinalizedRoot) == 0 {
|
||||
s.FinalizedRoot = make([]byte, 0, len(buf[4:36]))
|
||||
}
|
||||
s.FinalizedRoot = append(s.FinalizedRoot, buf[4:36]...)
|
||||
|
||||
// Field (2) 'FinalizedEpoch'
|
||||
s.FinalizedEpoch = github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[36:44]))
|
||||
|
||||
// Field (3) 'HeadRoot'
|
||||
if cap(s.HeadRoot) == 0 {
|
||||
s.HeadRoot = make([]byte, 0, len(buf[44:76]))
|
||||
}
|
||||
s.HeadRoot = append(s.HeadRoot, buf[44:76]...)
|
||||
|
||||
// Field (4) 'HeadSlot'
|
||||
s.HeadSlot = github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[76:84]))
|
||||
|
||||
// Field (5) 'EarliestAvailableSlot'
|
||||
s.EarliestAvailableSlot = github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[84:92]))
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the StatusV2 object
|
||||
func (s *StatusV2) SizeSSZ() (size int) {
|
||||
size = 92
|
||||
return
|
||||
}
|
||||
|
||||
// HashTreeRoot ssz hashes the StatusV2 object
|
||||
func (s *StatusV2) HashTreeRoot() ([32]byte, error) {
|
||||
return ssz.HashWithDefaultHasher(s)
|
||||
}
|
||||
|
||||
// HashTreeRootWith ssz hashes the StatusV2 object with a hasher
|
||||
func (s *StatusV2) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
indx := hh.Index()
|
||||
|
||||
// Field (0) 'ForkDigest'
|
||||
if size := len(s.ForkDigest); size != 4 {
|
||||
err = ssz.ErrBytesLengthFn("--.ForkDigest", size, 4)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(s.ForkDigest)
|
||||
|
||||
// Field (1) 'FinalizedRoot'
|
||||
if size := len(s.FinalizedRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.FinalizedRoot", size, 32)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(s.FinalizedRoot)
|
||||
|
||||
// Field (2) 'FinalizedEpoch'
|
||||
hh.PutUint64(uint64(s.FinalizedEpoch))
|
||||
|
||||
// Field (3) 'HeadRoot'
|
||||
if size := len(s.HeadRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.HeadRoot", size, 32)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(s.HeadRoot)
|
||||
|
||||
// Field (4) 'HeadSlot'
|
||||
hh.PutUint64(uint64(s.HeadSlot))
|
||||
|
||||
// Field (5) 'EarliestAvailableSlot'
|
||||
hh.PutUint64(uint64(s.EarliestAvailableSlot))
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
}
|
||||
|
||||
403
proto/prysm/v1alpha1/p2p_messages.pb.go
generated
403
proto/prysm/v1alpha1/p2p_messages.pb.go
generated
@@ -104,93 +104,6 @@ func (x *Status) GetHeadSlot() github_com_OffchainLabs_prysm_v6_consensus_types_
|
||||
return github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Slot(0)
|
||||
}
|
||||
|
||||
type StatusV2 struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ForkDigest []byte `protobuf:"bytes,1,opt,name=fork_digest,json=forkDigest,proto3" json:"fork_digest,omitempty" ssz-size:"4"`
|
||||
FinalizedRoot []byte `protobuf:"bytes,2,opt,name=finalized_root,json=finalizedRoot,proto3" json:"finalized_root,omitempty" ssz-size:"32"`
|
||||
FinalizedEpoch github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Epoch `protobuf:"varint,3,opt,name=finalized_epoch,json=finalizedEpoch,proto3" json:"finalized_epoch,omitempty" cast-type:"github.com/OffchainLabs/prysm/v6/consensus-types/primitives.Epoch"`
|
||||
HeadRoot []byte `protobuf:"bytes,4,opt,name=head_root,json=headRoot,proto3" json:"head_root,omitempty" ssz-size:"32"`
|
||||
HeadSlot github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Slot `protobuf:"varint,5,opt,name=head_slot,json=headSlot,proto3" json:"head_slot,omitempty" cast-type:"github.com/OffchainLabs/prysm/v6/consensus-types/primitives.Slot"`
|
||||
EarliestAvailableSlot github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Slot `protobuf:"varint,6,opt,name=earliest_available_slot,json=earliestAvailableSlot,proto3" json:"earliest_available_slot,omitempty" cast-type:"github.com/OffchainLabs/prysm/v6/consensus-types/primitives.Slot"`
|
||||
}
|
||||
|
||||
func (x *StatusV2) Reset() {
|
||||
*x = StatusV2{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *StatusV2) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*StatusV2) ProtoMessage() {}
|
||||
|
||||
func (x *StatusV2) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use StatusV2.ProtoReflect.Descriptor instead.
|
||||
func (*StatusV2) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_p2p_messages_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *StatusV2) GetForkDigest() []byte {
|
||||
if x != nil {
|
||||
return x.ForkDigest
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *StatusV2) GetFinalizedRoot() []byte {
|
||||
if x != nil {
|
||||
return x.FinalizedRoot
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *StatusV2) GetFinalizedEpoch() github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Epoch {
|
||||
if x != nil {
|
||||
return x.FinalizedEpoch
|
||||
}
|
||||
return github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Epoch(0)
|
||||
}
|
||||
|
||||
func (x *StatusV2) GetHeadRoot() []byte {
|
||||
if x != nil {
|
||||
return x.HeadRoot
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *StatusV2) GetHeadSlot() github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Slot {
|
||||
if x != nil {
|
||||
return x.HeadSlot
|
||||
}
|
||||
return github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Slot(0)
|
||||
}
|
||||
|
||||
func (x *StatusV2) GetEarliestAvailableSlot() github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Slot {
|
||||
if x != nil {
|
||||
return x.EarliestAvailableSlot
|
||||
}
|
||||
return github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Slot(0)
|
||||
}
|
||||
|
||||
type BeaconBlocksByRangeRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
@@ -204,7 +117,7 @@ type BeaconBlocksByRangeRequest struct {
|
||||
func (x *BeaconBlocksByRangeRequest) Reset() {
|
||||
*x = BeaconBlocksByRangeRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[2]
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -217,7 +130,7 @@ func (x *BeaconBlocksByRangeRequest) String() string {
|
||||
func (*BeaconBlocksByRangeRequest) ProtoMessage() {}
|
||||
|
||||
func (x *BeaconBlocksByRangeRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[2]
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -230,7 +143,7 @@ func (x *BeaconBlocksByRangeRequest) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use BeaconBlocksByRangeRequest.ProtoReflect.Descriptor instead.
|
||||
func (*BeaconBlocksByRangeRequest) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_p2p_messages_proto_rawDescGZIP(), []int{2}
|
||||
return file_proto_prysm_v1alpha1_p2p_messages_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *BeaconBlocksByRangeRequest) GetStartSlot() github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Slot {
|
||||
@@ -267,7 +180,7 @@ type ENRForkID struct {
|
||||
func (x *ENRForkID) Reset() {
|
||||
*x = ENRForkID{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[3]
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -280,7 +193,7 @@ func (x *ENRForkID) String() string {
|
||||
func (*ENRForkID) ProtoMessage() {}
|
||||
|
||||
func (x *ENRForkID) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[3]
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -293,7 +206,7 @@ func (x *ENRForkID) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use ENRForkID.ProtoReflect.Descriptor instead.
|
||||
func (*ENRForkID) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_p2p_messages_proto_rawDescGZIP(), []int{3}
|
||||
return file_proto_prysm_v1alpha1_p2p_messages_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *ENRForkID) GetCurrentForkDigest() []byte {
|
||||
@@ -329,7 +242,7 @@ type MetaDataV0 struct {
|
||||
func (x *MetaDataV0) Reset() {
|
||||
*x = MetaDataV0{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[4]
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -342,7 +255,7 @@ func (x *MetaDataV0) String() string {
|
||||
func (*MetaDataV0) ProtoMessage() {}
|
||||
|
||||
func (x *MetaDataV0) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[4]
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -355,7 +268,7 @@ func (x *MetaDataV0) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use MetaDataV0.ProtoReflect.Descriptor instead.
|
||||
func (*MetaDataV0) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_p2p_messages_proto_rawDescGZIP(), []int{4}
|
||||
return file_proto_prysm_v1alpha1_p2p_messages_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *MetaDataV0) GetSeqNumber() uint64 {
|
||||
@@ -385,7 +298,7 @@ type MetaDataV1 struct {
|
||||
func (x *MetaDataV1) Reset() {
|
||||
*x = MetaDataV1{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[5]
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -398,7 +311,7 @@ func (x *MetaDataV1) String() string {
|
||||
func (*MetaDataV1) ProtoMessage() {}
|
||||
|
||||
func (x *MetaDataV1) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[5]
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -411,7 +324,7 @@ func (x *MetaDataV1) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use MetaDataV1.ProtoReflect.Descriptor instead.
|
||||
func (*MetaDataV1) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_p2p_messages_proto_rawDescGZIP(), []int{5}
|
||||
return file_proto_prysm_v1alpha1_p2p_messages_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (x *MetaDataV1) GetSeqNumber() uint64 {
|
||||
@@ -449,7 +362,7 @@ type MetaDataV2 struct {
|
||||
func (x *MetaDataV2) Reset() {
|
||||
*x = MetaDataV2{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[6]
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -462,7 +375,7 @@ func (x *MetaDataV2) String() string {
|
||||
func (*MetaDataV2) ProtoMessage() {}
|
||||
|
||||
func (x *MetaDataV2) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[6]
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[5]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -475,7 +388,7 @@ func (x *MetaDataV2) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use MetaDataV2.ProtoReflect.Descriptor instead.
|
||||
func (*MetaDataV2) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_p2p_messages_proto_rawDescGZIP(), []int{6}
|
||||
return file_proto_prysm_v1alpha1_p2p_messages_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *MetaDataV2) GetSeqNumber() uint64 {
|
||||
@@ -518,7 +431,7 @@ type BlobSidecarsByRangeRequest struct {
|
||||
func (x *BlobSidecarsByRangeRequest) Reset() {
|
||||
*x = BlobSidecarsByRangeRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[7]
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[6]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -531,7 +444,7 @@ func (x *BlobSidecarsByRangeRequest) String() string {
|
||||
func (*BlobSidecarsByRangeRequest) ProtoMessage() {}
|
||||
|
||||
func (x *BlobSidecarsByRangeRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[7]
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[6]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -544,7 +457,7 @@ func (x *BlobSidecarsByRangeRequest) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use BlobSidecarsByRangeRequest.ProtoReflect.Descriptor instead.
|
||||
func (*BlobSidecarsByRangeRequest) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_p2p_messages_proto_rawDescGZIP(), []int{7}
|
||||
return file_proto_prysm_v1alpha1_p2p_messages_proto_rawDescGZIP(), []int{6}
|
||||
}
|
||||
|
||||
func (x *BlobSidecarsByRangeRequest) GetStartSlot() github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Slot {
|
||||
@@ -574,7 +487,7 @@ type DataColumnSidecarsByRangeRequest struct {
|
||||
func (x *DataColumnSidecarsByRangeRequest) Reset() {
|
||||
*x = DataColumnSidecarsByRangeRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[8]
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[7]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -587,7 +500,7 @@ func (x *DataColumnSidecarsByRangeRequest) String() string {
|
||||
func (*DataColumnSidecarsByRangeRequest) ProtoMessage() {}
|
||||
|
||||
func (x *DataColumnSidecarsByRangeRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[8]
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[7]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -600,7 +513,7 @@ func (x *DataColumnSidecarsByRangeRequest) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use DataColumnSidecarsByRangeRequest.ProtoReflect.Descriptor instead.
|
||||
func (*DataColumnSidecarsByRangeRequest) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_p2p_messages_proto_rawDescGZIP(), []int{8}
|
||||
return file_proto_prysm_v1alpha1_p2p_messages_proto_rawDescGZIP(), []int{7}
|
||||
}
|
||||
|
||||
func (x *DataColumnSidecarsByRangeRequest) GetStartSlot() github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Slot {
|
||||
@@ -636,7 +549,7 @@ type LightClientUpdatesByRangeRequest struct {
|
||||
func (x *LightClientUpdatesByRangeRequest) Reset() {
|
||||
*x = LightClientUpdatesByRangeRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[9]
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[8]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -649,7 +562,7 @@ func (x *LightClientUpdatesByRangeRequest) String() string {
|
||||
func (*LightClientUpdatesByRangeRequest) ProtoMessage() {}
|
||||
|
||||
func (x *LightClientUpdatesByRangeRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[9]
|
||||
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[8]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -662,7 +575,7 @@ func (x *LightClientUpdatesByRangeRequest) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use LightClientUpdatesByRangeRequest.ProtoReflect.Descriptor instead.
|
||||
func (*LightClientUpdatesByRangeRequest) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_p2p_messages_proto_rawDescGZIP(), []int{9}
|
||||
return file_proto_prysm_v1alpha1_p2p_messages_proto_rawDescGZIP(), []int{8}
|
||||
}
|
||||
|
||||
func (x *LightClientUpdatesByRangeRequest) GetStartPeriod() uint64 {
|
||||
@@ -711,38 +624,72 @@ var file_proto_prysm_v1alpha1_p2p_messages_proto_rawDesc = []byte{
|
||||
0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f,
|
||||
0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f,
|
||||
0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x53, 0x6c, 0x6f, 0x74, 0x52,
|
||||
0x08, 0x68, 0x65, 0x61, 0x64, 0x53, 0x6c, 0x6f, 0x74, 0x22, 0xd7, 0x03, 0x0a, 0x08, 0x53, 0x74,
|
||||
0x61, 0x74, 0x75, 0x73, 0x56, 0x32, 0x12, 0x26, 0x0a, 0x0b, 0x66, 0x6f, 0x72, 0x6b, 0x5f, 0x64,
|
||||
0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x05, 0x8a, 0xb5, 0x18,
|
||||
0x01, 0x34, 0x52, 0x0a, 0x66, 0x6f, 0x72, 0x6b, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x2d,
|
||||
0x0a, 0x0e, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x6f, 0x74,
|
||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x0d,
|
||||
0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x6e, 0x0a,
|
||||
0x0f, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68,
|
||||
0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x42, 0x45, 0x82, 0xb5, 0x18, 0x41, 0x67, 0x69, 0x74, 0x68,
|
||||
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c,
|
||||
0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x63, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69,
|
||||
0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x52, 0x0e, 0x66,
|
||||
0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x23, 0x0a,
|
||||
0x09, 0x68, 0x65, 0x61, 0x64, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c,
|
||||
0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x08, 0x68, 0x65, 0x61, 0x64, 0x52, 0x6f,
|
||||
0x6f, 0x74, 0x12, 0x61, 0x0a, 0x09, 0x68, 0x65, 0x61, 0x64, 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x18,
|
||||
0x05, 0x20, 0x01, 0x28, 0x04, 0x42, 0x44, 0x82, 0xb5, 0x18, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75,
|
||||
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61,
|
||||
0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x63, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d,
|
||||
0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x08, 0x68, 0x65, 0x61,
|
||||
0x64, 0x53, 0x6c, 0x6f, 0x74, 0x12, 0x7c, 0x0a, 0x17, 0x65, 0x61, 0x72, 0x6c, 0x69, 0x65, 0x73,
|
||||
0x74, 0x5f, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x6c, 0x6f, 0x74,
|
||||
0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x42, 0x44, 0x82, 0xb5, 0x18, 0x40, 0x67, 0x69, 0x74, 0x68,
|
||||
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c,
|
||||
0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x63, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69,
|
||||
0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x15, 0x65, 0x61,
|
||||
0x72, 0x6c, 0x69, 0x65, 0x73, 0x74, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x53,
|
||||
0x6c, 0x6f, 0x74, 0x22, 0xab, 0x01, 0x0a, 0x1a, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c,
|
||||
0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x08, 0x68, 0x65, 0x61, 0x64, 0x53, 0x6c, 0x6f, 0x74, 0x22, 0xab, 0x01, 0x0a, 0x1a, 0x42, 0x65,
|
||||
0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67,
|
||||
0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x63, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72,
|
||||
0x74, 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x42, 0x44, 0x82, 0xb5,
|
||||
0x18, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66,
|
||||
0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f,
|
||||
0x76, 0x36, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70,
|
||||
0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x53, 0x6c,
|
||||
0x6f, 0x74, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x53, 0x6c, 0x6f, 0x74, 0x12, 0x14, 0x0a,
|
||||
0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f,
|
||||
0x75, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x74, 0x65, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28,
|
||||
0x04, 0x52, 0x04, 0x73, 0x74, 0x65, 0x70, 0x22, 0xe4, 0x01, 0x0a, 0x09, 0x45, 0x4e, 0x52, 0x46,
|
||||
0x6f, 0x72, 0x6b, 0x49, 0x44, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74,
|
||||
0x5f, 0x66, 0x6f, 0x72, 0x6b, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x0c, 0x42, 0x05, 0x8a, 0xb5, 0x18, 0x01, 0x34, 0x52, 0x11, 0x63, 0x75, 0x72, 0x72, 0x65,
|
||||
0x6e, 0x74, 0x46, 0x6f, 0x72, 0x6b, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x11,
|
||||
0x6e, 0x65, 0x78, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x6b, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
|
||||
0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x05, 0x8a, 0xb5, 0x18, 0x01, 0x34, 0x52, 0x0f,
|
||||
0x6e, 0x65, 0x78, 0x74, 0x46, 0x6f, 0x72, 0x6b, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12,
|
||||
0x6d, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x6b, 0x5f, 0x65, 0x70, 0x6f,
|
||||
0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x42, 0x45, 0x82, 0xb5, 0x18, 0x41, 0x67, 0x69,
|
||||
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69,
|
||||
0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x63,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70,
|
||||
0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x52,
|
||||
0x0d, 0x6e, 0x65, 0x78, 0x74, 0x46, 0x6f, 0x72, 0x6b, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x22, 0x80,
|
||||
0x01, 0x0a, 0x0a, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x56, 0x30, 0x12, 0x1d, 0x0a,
|
||||
0x0a, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x04, 0x52, 0x09, 0x73, 0x65, 0x71, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x53, 0x0a, 0x07,
|
||||
0x61, 0x74, 0x74, 0x6e, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x39, 0x82,
|
||||
0xb5, 0x18, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72,
|
||||
0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x67, 0x6f, 0x2d, 0x62,
|
||||
0x69, 0x74, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x2e, 0x42, 0x69, 0x74, 0x76, 0x65, 0x63, 0x74, 0x6f,
|
||||
0x72, 0x36, 0x34, 0x8a, 0xb5, 0x18, 0x01, 0x38, 0x52, 0x07, 0x61, 0x74, 0x74, 0x6e, 0x65, 0x74,
|
||||
0x73, 0x22, 0xd6, 0x01, 0x0a, 0x0a, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x56, 0x31,
|
||||
0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x73, 0x65, 0x71, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12,
|
||||
0x53, 0x0a, 0x07, 0x61, 0x74, 0x74, 0x6e, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c,
|
||||
0x42, 0x39, 0x82, 0xb5, 0x18, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
|
||||
0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x67,
|
||||
0x6f, 0x2d, 0x62, 0x69, 0x74, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x2e, 0x42, 0x69, 0x74, 0x76, 0x65,
|
||||
0x63, 0x74, 0x6f, 0x72, 0x36, 0x34, 0x8a, 0xb5, 0x18, 0x01, 0x38, 0x52, 0x07, 0x61, 0x74, 0x74,
|
||||
0x6e, 0x65, 0x74, 0x73, 0x12, 0x54, 0x0a, 0x08, 0x73, 0x79, 0x6e, 0x63, 0x6e, 0x65, 0x74, 0x73,
|
||||
0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x38, 0x82, 0xb5, 0x18, 0x2f, 0x67, 0x69, 0x74, 0x68,
|
||||
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63,
|
||||
0x6c, 0x61, 0x62, 0x73, 0x2f, 0x67, 0x6f, 0x2d, 0x62, 0x69, 0x74, 0x66, 0x69, 0x65, 0x6c, 0x64,
|
||||
0x2e, 0x42, 0x69, 0x74, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x34, 0x8a, 0xb5, 0x18, 0x01, 0x31,
|
||||
0x52, 0x08, 0x73, 0x79, 0x6e, 0x63, 0x6e, 0x65, 0x74, 0x73, 0x22, 0x86, 0x02, 0x0a, 0x0a, 0x4d,
|
||||
0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x56, 0x32, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x71,
|
||||
0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x73,
|
||||
0x65, 0x71, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x53, 0x0a, 0x07, 0x61, 0x74, 0x74, 0x6e,
|
||||
0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x39, 0x82, 0xb5, 0x18, 0x30, 0x67,
|
||||
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61,
|
||||
0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x67, 0x6f, 0x2d, 0x62, 0x69, 0x74, 0x66, 0x69,
|
||||
0x65, 0x6c, 0x64, 0x2e, 0x42, 0x69, 0x74, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x36, 0x34, 0x8a,
|
||||
0xb5, 0x18, 0x01, 0x38, 0x52, 0x07, 0x61, 0x74, 0x74, 0x6e, 0x65, 0x74, 0x73, 0x12, 0x54, 0x0a,
|
||||
0x08, 0x73, 0x79, 0x6e, 0x63, 0x6e, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42,
|
||||
0x38, 0x82, 0xb5, 0x18, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
|
||||
0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x67, 0x6f,
|
||||
0x2d, 0x62, 0x69, 0x74, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x2e, 0x42, 0x69, 0x74, 0x76, 0x65, 0x63,
|
||||
0x74, 0x6f, 0x72, 0x34, 0x8a, 0xb5, 0x18, 0x01, 0x31, 0x52, 0x08, 0x73, 0x79, 0x6e, 0x63, 0x6e,
|
||||
0x65, 0x74, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x64, 0x79, 0x5f, 0x67,
|
||||
0x72, 0x6f, 0x75, 0x70, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04,
|
||||
0x52, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x64, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x43, 0x6f,
|
||||
0x75, 0x6e, 0x74, 0x22, 0x97, 0x01, 0x0a, 0x1a, 0x42, 0x6c, 0x6f, 0x62, 0x53, 0x69, 0x64, 0x65,
|
||||
0x63, 0x61, 0x72, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x12, 0x63, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x73, 0x6c, 0x6f, 0x74,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x42, 0x44, 0x82, 0xb5, 0x18, 0x40, 0x67, 0x69, 0x74, 0x68,
|
||||
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c,
|
||||
@@ -750,99 +697,36 @@ var file_proto_prysm_v1alpha1_p2p_messages_proto_rawDesc = []byte{
|
||||
0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69,
|
||||
0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x09, 0x73, 0x74,
|
||||
0x61, 0x72, 0x74, 0x53, 0x6c, 0x6f, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74,
|
||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x12, 0x0a,
|
||||
0x04, 0x73, 0x74, 0x65, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x74, 0x65,
|
||||
0x70, 0x22, 0xe4, 0x01, 0x0a, 0x09, 0x45, 0x4e, 0x52, 0x46, 0x6f, 0x72, 0x6b, 0x49, 0x44, 0x12,
|
||||
0x35, 0x0a, 0x13, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x6b, 0x5f,
|
||||
0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x05, 0x8a, 0xb5,
|
||||
0x18, 0x01, 0x34, 0x52, 0x11, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x46, 0x6f, 0x72, 0x6b,
|
||||
0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x11, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x66,
|
||||
0x6f, 0x72, 0x6b, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
|
||||
0x0c, 0x42, 0x05, 0x8a, 0xb5, 0x18, 0x01, 0x34, 0x52, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x46, 0x6f,
|
||||
0x72, 0x6b, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x6d, 0x0a, 0x0f, 0x6e, 0x65, 0x78,
|
||||
0x74, 0x5f, 0x66, 0x6f, 0x72, 0x6b, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01,
|
||||
0x28, 0x04, 0x42, 0x45, 0x82, 0xb5, 0x18, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
|
||||
0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f,
|
||||
0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73,
|
||||
0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69,
|
||||
0x76, 0x65, 0x73, 0x2e, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x46,
|
||||
0x6f, 0x72, 0x6b, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x22, 0x80, 0x01, 0x0a, 0x0a, 0x4d, 0x65, 0x74,
|
||||
0x61, 0x44, 0x61, 0x74, 0x61, 0x56, 0x30, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x71, 0x5f, 0x6e,
|
||||
0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x73, 0x65, 0x71,
|
||||
0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x53, 0x0a, 0x07, 0x61, 0x74, 0x74, 0x6e, 0x65, 0x74,
|
||||
0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x39, 0x82, 0xb5, 0x18, 0x30, 0x67, 0x69, 0x74,
|
||||
0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69,
|
||||
0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x67, 0x6f, 0x2d, 0x62, 0x69, 0x74, 0x66, 0x69, 0x65, 0x6c,
|
||||
0x64, 0x2e, 0x42, 0x69, 0x74, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x36, 0x34, 0x8a, 0xb5, 0x18,
|
||||
0x01, 0x38, 0x52, 0x07, 0x61, 0x74, 0x74, 0x6e, 0x65, 0x74, 0x73, 0x22, 0xd6, 0x01, 0x0a, 0x0a,
|
||||
0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x56, 0x31, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65,
|
||||
0x71, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09,
|
||||
0x73, 0x65, 0x71, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x53, 0x0a, 0x07, 0x61, 0x74, 0x74,
|
||||
0x6e, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x39, 0x82, 0xb5, 0x18, 0x30,
|
||||
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d,
|
||||
0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x67, 0x6f, 0x2d, 0x62, 0x69, 0x74, 0x66,
|
||||
0x69, 0x65, 0x6c, 0x64, 0x2e, 0x42, 0x69, 0x74, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x36, 0x34,
|
||||
0x8a, 0xb5, 0x18, 0x01, 0x38, 0x52, 0x07, 0x61, 0x74, 0x74, 0x6e, 0x65, 0x74, 0x73, 0x12, 0x54,
|
||||
0x0a, 0x08, 0x73, 0x79, 0x6e, 0x63, 0x6e, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c,
|
||||
0x42, 0x38, 0x82, 0xb5, 0x18, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
|
||||
0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x67,
|
||||
0x6f, 0x2d, 0x62, 0x69, 0x74, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x2e, 0x42, 0x69, 0x74, 0x76, 0x65,
|
||||
0x63, 0x74, 0x6f, 0x72, 0x34, 0x8a, 0xb5, 0x18, 0x01, 0x31, 0x52, 0x08, 0x73, 0x79, 0x6e, 0x63,
|
||||
0x6e, 0x65, 0x74, 0x73, 0x22, 0x86, 0x02, 0x0a, 0x0a, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74,
|
||||
0x61, 0x56, 0x32, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65,
|
||||
0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x73, 0x65, 0x71, 0x4e, 0x75, 0x6d, 0x62,
|
||||
0x65, 0x72, 0x12, 0x53, 0x0a, 0x07, 0x61, 0x74, 0x74, 0x6e, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20,
|
||||
0x01, 0x28, 0x0c, 0x42, 0x39, 0x82, 0xb5, 0x18, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
|
||||
0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62,
|
||||
0x73, 0x2f, 0x67, 0x6f, 0x2d, 0x62, 0x69, 0x74, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x2e, 0x42, 0x69,
|
||||
0x74, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x36, 0x34, 0x8a, 0xb5, 0x18, 0x01, 0x38, 0x52, 0x07,
|
||||
0x61, 0x74, 0x74, 0x6e, 0x65, 0x74, 0x73, 0x12, 0x54, 0x0a, 0x08, 0x73, 0x79, 0x6e, 0x63, 0x6e,
|
||||
0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x38, 0x82, 0xb5, 0x18, 0x2f, 0x67,
|
||||
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61,
|
||||
0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x67, 0x6f, 0x2d, 0x62, 0x69, 0x74, 0x66, 0x69,
|
||||
0x65, 0x6c, 0x64, 0x2e, 0x42, 0x69, 0x74, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x34, 0x8a, 0xb5,
|
||||
0x18, 0x01, 0x31, 0x52, 0x08, 0x73, 0x79, 0x6e, 0x63, 0x6e, 0x65, 0x74, 0x73, 0x12, 0x2e, 0x0a,
|
||||
0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x64, 0x79, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x63,
|
||||
0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x63, 0x75, 0x73, 0x74,
|
||||
0x6f, 0x64, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x97, 0x01,
|
||||
0x0a, 0x1a, 0x42, 0x6c, 0x6f, 0x62, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x73, 0x42, 0x79,
|
||||
0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x63, 0x0a, 0x0a,
|
||||
0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04,
|
||||
0x42, 0x44, 0x82, 0xb5, 0x18, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
|
||||
0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72,
|
||||
0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73,
|
||||
0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65,
|
||||
0x73, 0x2e, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x53, 0x6c, 0x6f,
|
||||
0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04,
|
||||
0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc0, 0x01, 0x0a, 0x20, 0x44, 0x61, 0x74, 0x61,
|
||||
0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x73, 0x42, 0x79,
|
||||
0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x63, 0x0a, 0x0a,
|
||||
0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04,
|
||||
0x42, 0x44, 0x82, 0xb5, 0x18, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
|
||||
0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72,
|
||||
0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73,
|
||||
0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65,
|
||||
0x73, 0x2e, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x53, 0x6c, 0x6f,
|
||||
0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04,
|
||||
0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d,
|
||||
0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x04, 0x42, 0x07, 0x92, 0xb5, 0x18, 0x03, 0x31, 0x32,
|
||||
0x38, 0x52, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x22, 0x5b, 0x0a, 0x20, 0x4c, 0x69,
|
||||
0x67, 0x68, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x73,
|
||||
0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21,
|
||||
0x0a, 0x0c, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x65, 0x72, 0x69, 0x6f,
|
||||
0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04,
|
||||
0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x9a, 0x01, 0x0a, 0x19, 0x6f, 0x72, 0x67, 0x2e,
|
||||
0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61,
|
||||
0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x10, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
||||
0x65, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75,
|
||||
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61,
|
||||
0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
|
||||
0x3b, 0x65, 0x74, 0x68, 0xaa, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e,
|
||||
0x45, 0x74, 0x68, 0x2e, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x15, 0x45,
|
||||
0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c, 0x45, 0x74, 0x68, 0x5c, 0x76, 0x31, 0x61, 0x6c,
|
||||
0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc0, 0x01,
|
||||
0x0a, 0x20, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x53, 0x69, 0x64, 0x65,
|
||||
0x63, 0x61, 0x72, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x12, 0x63, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x73, 0x6c, 0x6f, 0x74,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x42, 0x44, 0x82, 0xb5, 0x18, 0x40, 0x67, 0x69, 0x74, 0x68,
|
||||
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c,
|
||||
0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x63, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69,
|
||||
0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x09, 0x73, 0x74,
|
||||
0x61, 0x72, 0x74, 0x53, 0x6c, 0x6f, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74,
|
||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x21, 0x0a,
|
||||
0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x04, 0x42, 0x07,
|
||||
0x92, 0xb5, 0x18, 0x03, 0x31, 0x32, 0x38, 0x52, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73,
|
||||
0x22, 0x5b, 0x0a, 0x20, 0x4c, 0x69, 0x67, 0x68, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x55,
|
||||
0x70, 0x64, 0x61, 0x74, 0x65, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, 0x65,
|
||||
0x72, 0x69, 0x6f, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x72,
|
||||
0x74, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74,
|
||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x9a, 0x01,
|
||||
0x0a, 0x19, 0x6f, 0x72, 0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65,
|
||||
0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x10, 0x50, 0x32, 0x50,
|
||||
0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
|
||||
0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63,
|
||||
0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76,
|
||||
0x36, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31,
|
||||
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x74, 0x68, 0xaa, 0x02, 0x15, 0x45, 0x74, 0x68,
|
||||
0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x74, 0x68, 0x2e, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68,
|
||||
0x61, 0x31, 0xca, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c, 0x45, 0x74,
|
||||
0x68, 0x5c, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -857,18 +741,17 @@ func file_proto_prysm_v1alpha1_p2p_messages_proto_rawDescGZIP() []byte {
|
||||
return file_proto_prysm_v1alpha1_p2p_messages_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 10)
|
||||
var file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
|
||||
var file_proto_prysm_v1alpha1_p2p_messages_proto_goTypes = []interface{}{
|
||||
(*Status)(nil), // 0: ethereum.eth.v1alpha1.Status
|
||||
(*StatusV2)(nil), // 1: ethereum.eth.v1alpha1.StatusV2
|
||||
(*BeaconBlocksByRangeRequest)(nil), // 2: ethereum.eth.v1alpha1.BeaconBlocksByRangeRequest
|
||||
(*ENRForkID)(nil), // 3: ethereum.eth.v1alpha1.ENRForkID
|
||||
(*MetaDataV0)(nil), // 4: ethereum.eth.v1alpha1.MetaDataV0
|
||||
(*MetaDataV1)(nil), // 5: ethereum.eth.v1alpha1.MetaDataV1
|
||||
(*MetaDataV2)(nil), // 6: ethereum.eth.v1alpha1.MetaDataV2
|
||||
(*BlobSidecarsByRangeRequest)(nil), // 7: ethereum.eth.v1alpha1.BlobSidecarsByRangeRequest
|
||||
(*DataColumnSidecarsByRangeRequest)(nil), // 8: ethereum.eth.v1alpha1.DataColumnSidecarsByRangeRequest
|
||||
(*LightClientUpdatesByRangeRequest)(nil), // 9: ethereum.eth.v1alpha1.LightClientUpdatesByRangeRequest
|
||||
(*BeaconBlocksByRangeRequest)(nil), // 1: ethereum.eth.v1alpha1.BeaconBlocksByRangeRequest
|
||||
(*ENRForkID)(nil), // 2: ethereum.eth.v1alpha1.ENRForkID
|
||||
(*MetaDataV0)(nil), // 3: ethereum.eth.v1alpha1.MetaDataV0
|
||||
(*MetaDataV1)(nil), // 4: ethereum.eth.v1alpha1.MetaDataV1
|
||||
(*MetaDataV2)(nil), // 5: ethereum.eth.v1alpha1.MetaDataV2
|
||||
(*BlobSidecarsByRangeRequest)(nil), // 6: ethereum.eth.v1alpha1.BlobSidecarsByRangeRequest
|
||||
(*DataColumnSidecarsByRangeRequest)(nil), // 7: ethereum.eth.v1alpha1.DataColumnSidecarsByRangeRequest
|
||||
(*LightClientUpdatesByRangeRequest)(nil), // 8: ethereum.eth.v1alpha1.LightClientUpdatesByRangeRequest
|
||||
}
|
||||
var file_proto_prysm_v1alpha1_p2p_messages_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
@@ -897,18 +780,6 @@ func file_proto_prysm_v1alpha1_p2p_messages_proto_init() {
|
||||
}
|
||||
}
|
||||
file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*StatusV2); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*BeaconBlocksByRangeRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
@@ -920,7 +791,7 @@ func file_proto_prysm_v1alpha1_p2p_messages_proto_init() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ENRForkID); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
@@ -932,7 +803,7 @@ func file_proto_prysm_v1alpha1_p2p_messages_proto_init() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*MetaDataV0); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
@@ -944,7 +815,7 @@ func file_proto_prysm_v1alpha1_p2p_messages_proto_init() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||
file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*MetaDataV1); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
@@ -956,7 +827,7 @@ func file_proto_prysm_v1alpha1_p2p_messages_proto_init() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
|
||||
file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*MetaDataV2); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
@@ -968,7 +839,7 @@ func file_proto_prysm_v1alpha1_p2p_messages_proto_init() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
|
||||
file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*BlobSidecarsByRangeRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
@@ -980,7 +851,7 @@ func file_proto_prysm_v1alpha1_p2p_messages_proto_init() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
|
||||
file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DataColumnSidecarsByRangeRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
@@ -992,7 +863,7 @@ func file_proto_prysm_v1alpha1_p2p_messages_proto_init() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
|
||||
file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*LightClientUpdatesByRangeRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
@@ -1011,7 +882,7 @@ func file_proto_prysm_v1alpha1_p2p_messages_proto_init() {
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_proto_prysm_v1alpha1_p2p_messages_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 10,
|
||||
NumMessages: 9,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
|
||||
@@ -26,24 +26,6 @@ message Status {
|
||||
];
|
||||
}
|
||||
|
||||
message StatusV2 {
|
||||
bytes fork_digest = 1 [(ethereum.eth.ext.ssz_size) = "4"];
|
||||
bytes finalized_root = 2 [(ethereum.eth.ext.ssz_size) = "32"];
|
||||
uint64 finalized_epoch = 3 [
|
||||
(ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives.Epoch"
|
||||
];
|
||||
bytes head_root = 4 [(ethereum.eth.ext.ssz_size) = "32"];
|
||||
uint64 head_slot = 5 [
|
||||
(ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives.Slot"
|
||||
];
|
||||
uint64 earliest_available_slot = 6 [
|
||||
(ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives.Slot"
|
||||
];
|
||||
}
|
||||
|
||||
message BeaconBlocksByRangeRequest {
|
||||
uint64 start_slot = 1 [
|
||||
(ethereum.eth.ext.cast_type) =
|
||||
|
||||
@@ -248,9 +248,6 @@ func (v *ValidatorNode) Start(ctx context.Context) error {
|
||||
args = append(args,
|
||||
fmt.Sprintf("--%s=http://localhost:%d", flags.BeaconRESTApiProviderFlag.Name, beaconRestApiPort),
|
||||
fmt.Sprintf("--%s", features.EnableBeaconRESTApi.Name))
|
||||
if v.config.UseSSZOnly {
|
||||
args = append(args, fmt.Sprintf("--%s", features.SSZOnly.Name))
|
||||
}
|
||||
}
|
||||
|
||||
// Only apply e2e flags to the current branch. New flags may not exist in previous release.
|
||||
|
||||
@@ -29,10 +29,6 @@ func TestEndToEnd_MinimalConfig_ValidatorRESTApi(t *testing.T) {
|
||||
e2eMinimal(t, types.InitForkCfg(version.Bellatrix, version.Electra, params.E2ETestConfig()), types.WithCheckpointSync(), types.WithValidatorRESTApi()).run()
|
||||
}
|
||||
|
||||
func TestEndToEnd_MinimalConfig_ValidatorRESTApi_SSZ(t *testing.T) {
|
||||
e2eMinimal(t, types.InitForkCfg(version.Bellatrix, version.Electra, params.E2ETestConfig()), types.WithCheckpointSync(), types.WithValidatorRESTApi(), types.WithSSZOnly()).run()
|
||||
}
|
||||
|
||||
func TestEndToEnd_ScenarioRun_EEOffline(t *testing.T) {
|
||||
t.Skip("TODO(#10242) Prysm is current unable to handle an offline e2e")
|
||||
cfg := types.InitForkCfg(version.Bellatrix, version.Deneb, params.E2ETestConfig())
|
||||
|
||||
@@ -51,12 +51,6 @@ func WithValidatorRESTApi() E2EConfigOpt {
|
||||
}
|
||||
}
|
||||
|
||||
func WithSSZOnly() E2EConfigOpt {
|
||||
return func(cfg *E2EConfig) {
|
||||
cfg.UseSSZOnly = true
|
||||
}
|
||||
}
|
||||
|
||||
func WithBuilder() E2EConfigOpt {
|
||||
return func(cfg *E2EConfig) {
|
||||
cfg.UseBuilder = true
|
||||
@@ -76,7 +70,6 @@ type E2EConfig struct {
|
||||
UseFixedPeerIDs bool
|
||||
UseValidatorCrossClient bool
|
||||
UseBeaconRestApi bool
|
||||
UseSSZOnly bool
|
||||
UseBuilder bool
|
||||
EpochsToRun uint64
|
||||
Seed int64
|
||||
|
||||
@@ -87,7 +87,7 @@ func (acm *CLIManager) prepareBeaconClients(ctx context.Context) (*iface.Validat
|
||||
acm.beaconApiTimeout,
|
||||
)
|
||||
|
||||
restHandler := beaconApi.NewBeaconApiRestHandler(
|
||||
restHandler := beaconApi.NewBeaconApiJsonRestHandler(
|
||||
http.Client{Timeout: acm.beaconApiTimeout},
|
||||
acm.beaconApiEndpoint,
|
||||
)
|
||||
|
||||
@@ -18,6 +18,7 @@ go_library(
|
||||
"genesis.go",
|
||||
"get_beacon_block.go",
|
||||
"index.go",
|
||||
"json_rest_handler.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"prepare_beacon_proposer.go",
|
||||
@@ -26,7 +27,6 @@ go_library(
|
||||
"propose_exit.go",
|
||||
"prysm_beacon_chain_client.go",
|
||||
"registration.go",
|
||||
"rest_handler_client.go",
|
||||
"state_validators.go",
|
||||
"status.go",
|
||||
"stream_blocks.go",
|
||||
@@ -47,7 +47,6 @@ go_library(
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
@@ -92,6 +91,7 @@ go_test(
|
||||
"genesis_test.go",
|
||||
"get_beacon_block_test.go",
|
||||
"index_test.go",
|
||||
"json_rest_handler_test.go",
|
||||
"prepare_beacon_proposer_test.go",
|
||||
"propose_attestation_test.go",
|
||||
"propose_beacon_block_altair_test.go",
|
||||
@@ -110,7 +110,6 @@ go_test(
|
||||
"propose_exit_test.go",
|
||||
"prysm_beacon_chain_client_test.go",
|
||||
"registration_test.go",
|
||||
"rest_handler_client_test.go",
|
||||
"state_validators_test.go",
|
||||
"status_test.go",
|
||||
"stream_blocks_test.go",
|
||||
@@ -129,7 +128,6 @@ go_test(
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared/testing:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/validator:go_default_library",
|
||||
@@ -147,8 +145,6 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_golang_protobuf//ptypes/empty",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
|
||||
"@org_uber_go_mock//gomock:go_default_library",
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
|
||||
type beaconApiChainClient struct {
|
||||
fallbackClient iface.ChainClient
|
||||
jsonRestHandler RestHandler
|
||||
jsonRestHandler JsonRestHandler
|
||||
stateValidatorsProvider StateValidatorsProvider
|
||||
}
|
||||
|
||||
@@ -333,7 +333,7 @@ func (c beaconApiChainClient) ValidatorParticipation(ctx context.Context, in *et
|
||||
return nil, errors.New("beaconApiChainClient.ValidatorParticipation is not implemented. To use a fallback client, pass a fallback client as the last argument of NewBeaconApiChainClientWithFallback.")
|
||||
}
|
||||
|
||||
func NewBeaconApiChainClientWithFallback(jsonRestHandler RestHandler, fallbackClient iface.ChainClient) iface.ChainClient {
|
||||
func NewBeaconApiChainClientWithFallback(jsonRestHandler JsonRestHandler, fallbackClient iface.ChainClient) iface.ChainClient {
|
||||
return &beaconApiChainClient{
|
||||
jsonRestHandler: jsonRestHandler,
|
||||
fallbackClient: fallbackClient,
|
||||
|
||||
@@ -20,7 +20,7 @@ var (
|
||||
|
||||
type beaconApiNodeClient struct {
|
||||
fallbackClient iface.NodeClient
|
||||
jsonRestHandler RestHandler
|
||||
jsonRestHandler JsonRestHandler
|
||||
genesisProvider GenesisProvider
|
||||
healthTracker health.Tracker
|
||||
}
|
||||
@@ -111,7 +111,7 @@ func (c *beaconApiNodeClient) HealthTracker() health.Tracker {
|
||||
return c.healthTracker
|
||||
}
|
||||
|
||||
func NewNodeClientWithFallback(jsonRestHandler RestHandler, fallbackClient iface.NodeClient) iface.NodeClient {
|
||||
func NewNodeClientWithFallback(jsonRestHandler JsonRestHandler, fallbackClient iface.NodeClient) iface.NodeClient {
|
||||
b := &beaconApiNodeClient{
|
||||
jsonRestHandler: jsonRestHandler,
|
||||
fallbackClient: fallbackClient,
|
||||
|
||||
@@ -22,13 +22,13 @@ type beaconApiValidatorClient struct {
|
||||
genesisProvider GenesisProvider
|
||||
dutiesProvider dutiesProvider
|
||||
stateValidatorsProvider StateValidatorsProvider
|
||||
jsonRestHandler RestHandler
|
||||
jsonRestHandler JsonRestHandler
|
||||
beaconBlockConverter BeaconBlockConverter
|
||||
prysmChainClient iface.PrysmChainClient
|
||||
isEventStreamRunning bool
|
||||
}
|
||||
|
||||
func NewBeaconApiValidatorClient(jsonRestHandler RestHandler, opts ...ValidatorClientOpt) iface.ValidatorClient {
|
||||
func NewBeaconApiValidatorClient(jsonRestHandler JsonRestHandler, opts ...ValidatorClientOpt) iface.ValidatorClient {
|
||||
c := &beaconApiValidatorClient{
|
||||
genesisProvider: &beaconApiGenesisProvider{jsonRestHandler: jsonRestHandler},
|
||||
dutiesProvider: beaconApiDutiesProvider{jsonRestHandler: jsonRestHandler},
|
||||
|
||||
@@ -27,7 +27,7 @@ type dutiesProvider interface {
|
||||
}
|
||||
|
||||
type beaconApiDutiesProvider struct {
|
||||
jsonRestHandler RestHandler
|
||||
jsonRestHandler JsonRestHandler
|
||||
}
|
||||
|
||||
type attesterDuty struct {
|
||||
|
||||
@@ -20,7 +20,7 @@ type GenesisProvider interface {
|
||||
}
|
||||
|
||||
type beaconApiGenesisProvider struct {
|
||||
jsonRestHandler RestHandler
|
||||
jsonRestHandler JsonRestHandler
|
||||
genesis *structs.Genesis
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
@@ -6,10 +6,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
neturl "net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/apiutil"
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -25,224 +22,160 @@ func (c *beaconApiValidatorClient) beaconBlock(ctx context.Context, slot primiti
|
||||
if len(graffiti) > 0 {
|
||||
queryParams.Add("graffiti", hexutil.Encode(graffiti))
|
||||
}
|
||||
|
||||
queryUrl := apiutil.BuildURL(fmt.Sprintf("/eth/v3/validator/blocks/%d", slot), queryParams)
|
||||
data, header, err := c.jsonRestHandler.GetSSZ(ctx, queryUrl)
|
||||
produceBlockV3ResponseJson := structs.ProduceBlockV3Response{}
|
||||
err := c.jsonRestHandler.Get(ctx, queryUrl, &produceBlockV3ResponseJson)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if strings.Contains(header.Get("Content-Type"), api.OctetStreamMediaType) {
|
||||
ver, err := version.FromString(header.Get(api.VersionHeader))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("unsupported header version %s", header.Get(api.VersionHeader)))
|
||||
}
|
||||
isBlindedRaw := header.Get(api.ExecutionPayloadBlindedHeader)
|
||||
isBlinded, err := strconv.ParseBool(isBlindedRaw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return processBlockSSZResponse(ver, data, isBlinded)
|
||||
} else {
|
||||
decoder := json.NewDecoder(bytes.NewBuffer(data))
|
||||
produceBlockV3ResponseJson := structs.ProduceBlockV3Response{}
|
||||
if err = decoder.Decode(&produceBlockV3ResponseJson); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to decode response body into json for %s", queryUrl)
|
||||
}
|
||||
return processBlockJSONResponse(
|
||||
produceBlockV3ResponseJson.Version,
|
||||
produceBlockV3ResponseJson.ExecutionPayloadBlinded,
|
||||
json.NewDecoder(bytes.NewReader(produceBlockV3ResponseJson.Data)),
|
||||
)
|
||||
}
|
||||
|
||||
return processBlockResponse(
|
||||
produceBlockV3ResponseJson.Version,
|
||||
produceBlockV3ResponseJson.ExecutionPayloadBlinded,
|
||||
json.NewDecoder(bytes.NewReader(produceBlockV3ResponseJson.Data)),
|
||||
)
|
||||
}
|
||||
|
||||
func processBlockSSZResponse(ver int, data []byte, isBlinded bool) (*ethpb.GenericBeaconBlock, error) {
|
||||
if ver >= version.Fulu {
|
||||
return processBlockSSZResponseFulu(data, isBlinded)
|
||||
}
|
||||
if ver >= version.Electra {
|
||||
return processBlockSSZResponseElectra(data, isBlinded)
|
||||
}
|
||||
if ver >= version.Deneb {
|
||||
return processBlockSSZResponseDeneb(data, isBlinded)
|
||||
}
|
||||
if ver >= version.Capella {
|
||||
return processBlockSSZResponseCapella(data, isBlinded)
|
||||
}
|
||||
if ver >= version.Bellatrix {
|
||||
return processBlockSSZResponseBellatrix(data, isBlinded)
|
||||
}
|
||||
if ver >= version.Altair {
|
||||
block := ðpb.BeaconBlockAltair{}
|
||||
if err := block.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Altair{Altair: block}}, nil
|
||||
}
|
||||
if ver >= version.Phase0 {
|
||||
block := ðpb.BeaconBlock{}
|
||||
if err := block.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Phase0{Phase0: block}}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unsupported block version %s", version.String(ver))
|
||||
}
|
||||
|
||||
func processBlockSSZResponseFulu(data []byte, isBlinded bool) (*ethpb.GenericBeaconBlock, error) {
|
||||
if isBlinded {
|
||||
blindedBlock := ðpb.BlindedBeaconBlockFulu{}
|
||||
if err := blindedBlock.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedFulu{BlindedFulu: blindedBlock}, IsBlinded: true}, nil
|
||||
}
|
||||
block := ðpb.BeaconBlockContentsFulu{}
|
||||
if err := block.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Fulu{Fulu: block}}, nil
|
||||
}
|
||||
|
||||
func processBlockSSZResponseElectra(data []byte, isBlinded bool) (*ethpb.GenericBeaconBlock, error) {
|
||||
if isBlinded {
|
||||
blindedBlock := ðpb.BlindedBeaconBlockElectra{}
|
||||
if err := blindedBlock.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedElectra{BlindedElectra: blindedBlock}, IsBlinded: true}, nil
|
||||
}
|
||||
block := ðpb.BeaconBlockContentsElectra{}
|
||||
if err := block.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Electra{Electra: block}}, nil
|
||||
}
|
||||
|
||||
func processBlockSSZResponseDeneb(data []byte, isBlinded bool) (*ethpb.GenericBeaconBlock, error) {
|
||||
if isBlinded {
|
||||
blindedBlock := ðpb.BlindedBeaconBlockDeneb{}
|
||||
if err := blindedBlock.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedDeneb{BlindedDeneb: blindedBlock}, IsBlinded: true}, nil
|
||||
}
|
||||
block := ðpb.BeaconBlockContentsDeneb{}
|
||||
if err := block.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Deneb{Deneb: block}}, nil
|
||||
}
|
||||
|
||||
func processBlockSSZResponseCapella(data []byte, isBlinded bool) (*ethpb.GenericBeaconBlock, error) {
|
||||
if isBlinded {
|
||||
blindedBlock := ðpb.BlindedBeaconBlockCapella{}
|
||||
if err := blindedBlock.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedCapella{BlindedCapella: blindedBlock}, IsBlinded: true}, nil
|
||||
}
|
||||
block := ðpb.BeaconBlockCapella{}
|
||||
if err := block.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Capella{Capella: block}}, nil
|
||||
}
|
||||
|
||||
func processBlockSSZResponseBellatrix(data []byte, isBlinded bool) (*ethpb.GenericBeaconBlock, error) {
|
||||
if isBlinded {
|
||||
blindedBlock := ðpb.BlindedBeaconBlockBellatrix{}
|
||||
if err := blindedBlock.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedBellatrix{BlindedBellatrix: blindedBlock}, IsBlinded: true}, nil
|
||||
}
|
||||
block := ðpb.BeaconBlockBellatrix{}
|
||||
if err := block.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Bellatrix{Bellatrix: block}}, nil
|
||||
}
|
||||
|
||||
func convertBlockToGeneric(decoder *json.Decoder, dest ethpb.GenericConverter, version string, isBlinded bool) (*ethpb.GenericBeaconBlock, error) {
|
||||
typeName := version
|
||||
if isBlinded {
|
||||
typeName = "blinded " + typeName
|
||||
}
|
||||
|
||||
if err := decoder.Decode(dest); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to decode %s block response json", typeName)
|
||||
}
|
||||
|
||||
genericBlock, err := dest.ToGeneric()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to convert %s block", typeName)
|
||||
}
|
||||
return genericBlock, nil
|
||||
}
|
||||
|
||||
func processBlockJSONResponse(ver string, isBlinded bool, decoder *json.Decoder) (*ethpb.GenericBeaconBlock, error) {
|
||||
// nolint: gocognit
|
||||
func processBlockResponse(ver string, isBlinded bool, decoder *json.Decoder) (*ethpb.GenericBeaconBlock, error) {
|
||||
var response *ethpb.GenericBeaconBlock
|
||||
if decoder == nil {
|
||||
return nil, errors.New("no produce block json decoder found")
|
||||
}
|
||||
|
||||
switch ver {
|
||||
case version.String(version.Phase0):
|
||||
return convertBlockToGeneric(decoder, &structs.BeaconBlock{}, version.String(version.Phase0), false)
|
||||
|
||||
jsonPhase0Block := structs.BeaconBlock{}
|
||||
if err := decoder.Decode(&jsonPhase0Block); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decode phase0 block response json")
|
||||
}
|
||||
genericBlock, err := jsonPhase0Block.ToGeneric()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get phase0 block")
|
||||
}
|
||||
response = genericBlock
|
||||
case version.String(version.Altair):
|
||||
return convertBlockToGeneric(decoder, &structs.BeaconBlockAltair{}, "altair", false)
|
||||
|
||||
jsonAltairBlock := structs.BeaconBlockAltair{}
|
||||
if err := decoder.Decode(&jsonAltairBlock); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decode altair block response json")
|
||||
}
|
||||
genericBlock, err := jsonAltairBlock.ToGeneric()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get altair block")
|
||||
}
|
||||
response = genericBlock
|
||||
case version.String(version.Bellatrix):
|
||||
return processBellatrixBlock(decoder, isBlinded)
|
||||
|
||||
if isBlinded {
|
||||
jsonBellatrixBlock := structs.BlindedBeaconBlockBellatrix{}
|
||||
if err := decoder.Decode(&jsonBellatrixBlock); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decode blinded bellatrix block response json")
|
||||
}
|
||||
genericBlock, err := jsonBellatrixBlock.ToGeneric()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get blinded bellatrix block")
|
||||
}
|
||||
response = genericBlock
|
||||
} else {
|
||||
jsonBellatrixBlock := structs.BeaconBlockBellatrix{}
|
||||
if err := decoder.Decode(&jsonBellatrixBlock); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decode bellatrix block response json")
|
||||
}
|
||||
genericBlock, err := jsonBellatrixBlock.ToGeneric()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get bellatrix block")
|
||||
}
|
||||
response = genericBlock
|
||||
}
|
||||
case version.String(version.Capella):
|
||||
return processCapellaBlock(decoder, isBlinded)
|
||||
|
||||
if isBlinded {
|
||||
jsonCapellaBlock := structs.BlindedBeaconBlockCapella{}
|
||||
if err := decoder.Decode(&jsonCapellaBlock); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decode blinded capella block response json")
|
||||
}
|
||||
genericBlock, err := jsonCapellaBlock.ToGeneric()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get blinded capella block")
|
||||
}
|
||||
response = genericBlock
|
||||
} else {
|
||||
jsonCapellaBlock := structs.BeaconBlockCapella{}
|
||||
if err := decoder.Decode(&jsonCapellaBlock); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decode capella block response json")
|
||||
}
|
||||
genericBlock, err := jsonCapellaBlock.ToGeneric()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get capella block")
|
||||
}
|
||||
response = genericBlock
|
||||
}
|
||||
case version.String(version.Deneb):
|
||||
return processDenebBlock(decoder, isBlinded)
|
||||
|
||||
if isBlinded {
|
||||
jsonDenebBlock := structs.BlindedBeaconBlockDeneb{}
|
||||
if err := decoder.Decode(&jsonDenebBlock); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decode blinded deneb block response json")
|
||||
}
|
||||
genericBlock, err := jsonDenebBlock.ToGeneric()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get blinded deneb block")
|
||||
}
|
||||
response = genericBlock
|
||||
} else {
|
||||
jsonDenebBlockContents := structs.BeaconBlockContentsDeneb{}
|
||||
if err := decoder.Decode(&jsonDenebBlockContents); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decode deneb block response json")
|
||||
}
|
||||
genericBlock, err := jsonDenebBlockContents.ToGeneric()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get deneb block")
|
||||
}
|
||||
response = genericBlock
|
||||
}
|
||||
case version.String(version.Electra):
|
||||
return processElectraBlock(decoder, isBlinded)
|
||||
|
||||
if isBlinded {
|
||||
jsonElectraBlock := structs.BlindedBeaconBlockElectra{}
|
||||
if err := decoder.Decode(&jsonElectraBlock); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decode blinded electra block response json")
|
||||
}
|
||||
genericBlock, err := jsonElectraBlock.ToGeneric()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get blinded electra block")
|
||||
}
|
||||
response = genericBlock
|
||||
} else {
|
||||
jsonElectraBlockContents := structs.BeaconBlockContentsElectra{}
|
||||
if err := decoder.Decode(&jsonElectraBlockContents); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decode electra block response json")
|
||||
}
|
||||
genericBlock, err := jsonElectraBlockContents.ToGeneric()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get electra block")
|
||||
}
|
||||
response = genericBlock
|
||||
}
|
||||
case version.String(version.Fulu):
|
||||
return processFuluBlock(decoder, isBlinded)
|
||||
|
||||
if isBlinded {
|
||||
jsonFuluBlock := structs.BlindedBeaconBlockFulu{}
|
||||
if err := decoder.Decode(&jsonFuluBlock); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decode blinded fulu block response json")
|
||||
}
|
||||
genericBlock, err := jsonFuluBlock.ToGeneric()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get blinded fulu block")
|
||||
}
|
||||
response = genericBlock
|
||||
} else {
|
||||
jsonFuluBlockContents := structs.BeaconBlockContentsFulu{}
|
||||
if err := decoder.Decode(&jsonFuluBlockContents); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decode fulu block response json")
|
||||
}
|
||||
genericBlock, err := jsonFuluBlockContents.ToGeneric()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get fulu block")
|
||||
}
|
||||
response = genericBlock
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("unsupported consensus version `%s`", ver)
|
||||
}
|
||||
}
|
||||
|
||||
func processBellatrixBlock(decoder *json.Decoder, isBlinded bool) (*ethpb.GenericBeaconBlock, error) {
|
||||
if isBlinded {
|
||||
return convertBlockToGeneric(decoder, &structs.BlindedBeaconBlockBellatrix{}, "bellatrix", true)
|
||||
}
|
||||
return convertBlockToGeneric(decoder, &structs.BeaconBlockBellatrix{}, "bellatrix", false)
|
||||
}
|
||||
|
||||
func processCapellaBlock(decoder *json.Decoder, isBlinded bool) (*ethpb.GenericBeaconBlock, error) {
|
||||
if isBlinded {
|
||||
return convertBlockToGeneric(decoder, &structs.BlindedBeaconBlockCapella{}, "capella", true)
|
||||
}
|
||||
return convertBlockToGeneric(decoder, &structs.BeaconBlockCapella{}, "capella", false)
|
||||
}
|
||||
|
||||
func processDenebBlock(decoder *json.Decoder, isBlinded bool) (*ethpb.GenericBeaconBlock, error) {
|
||||
if isBlinded {
|
||||
return convertBlockToGeneric(decoder, &structs.BlindedBeaconBlockDeneb{}, "deneb", true)
|
||||
}
|
||||
return convertBlockToGeneric(decoder, &structs.BeaconBlockContentsDeneb{}, "deneb", false)
|
||||
}
|
||||
|
||||
func processElectraBlock(decoder *json.Decoder, isBlinded bool) (*ethpb.GenericBeaconBlock, error) {
|
||||
if isBlinded {
|
||||
return convertBlockToGeneric(decoder, &structs.BlindedBeaconBlockElectra{}, "electra", true)
|
||||
}
|
||||
return convertBlockToGeneric(decoder, &structs.BeaconBlockContentsElectra{}, "electra", false)
|
||||
}
|
||||
|
||||
func processFuluBlock(decoder *json.Decoder, isBlinded bool) (*ethpb.GenericBeaconBlock, error) {
|
||||
if isBlinded {
|
||||
return convertBlockToGeneric(decoder, &structs.BlindedBeaconBlockFulu{}, "fulu", true)
|
||||
}
|
||||
return convertBlockToGeneric(decoder, &structs.BeaconBlockContentsFulu{}, "fulu", false)
|
||||
return response, nil
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -4,53 +4,49 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/network/httputil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type RestHandler interface {
|
||||
type JsonRestHandler interface {
|
||||
Get(ctx context.Context, endpoint string, resp interface{}) error
|
||||
GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error)
|
||||
Post(ctx context.Context, endpoint string, headers map[string]string, data *bytes.Buffer, resp interface{}) error
|
||||
HttpClient() *http.Client
|
||||
Host() string
|
||||
SetHost(host string)
|
||||
}
|
||||
|
||||
type BeaconApiRestHandler struct {
|
||||
type BeaconApiJsonRestHandler struct {
|
||||
client http.Client
|
||||
host string
|
||||
}
|
||||
|
||||
// NewBeaconApiRestHandler returns a RestHandler
|
||||
func NewBeaconApiRestHandler(client http.Client, host string) RestHandler {
|
||||
return &BeaconApiRestHandler{
|
||||
// NewBeaconApiJsonRestHandler returns a JsonRestHandler
|
||||
func NewBeaconApiJsonRestHandler(client http.Client, host string) JsonRestHandler {
|
||||
return &BeaconApiJsonRestHandler{
|
||||
client: client,
|
||||
host: host,
|
||||
}
|
||||
}
|
||||
|
||||
// HttpClient returns the underlying HTTP client of the handler
|
||||
func (c *BeaconApiRestHandler) HttpClient() *http.Client {
|
||||
func (c *BeaconApiJsonRestHandler) HttpClient() *http.Client {
|
||||
return &c.client
|
||||
}
|
||||
|
||||
// Host returns the underlying HTTP host
|
||||
func (c *BeaconApiRestHandler) Host() string {
|
||||
func (c *BeaconApiJsonRestHandler) Host() string {
|
||||
return c.host
|
||||
}
|
||||
|
||||
// Get sends a GET request and decodes the response body as a JSON object into the passed in object.
|
||||
// If an HTTP error is returned, the body is decoded as a DefaultJsonError JSON object and returned as the first return value.
|
||||
func (c *BeaconApiRestHandler) Get(ctx context.Context, endpoint string, resp interface{}) error {
|
||||
func (c *BeaconApiJsonRestHandler) Get(ctx context.Context, endpoint string, resp interface{}) error {
|
||||
url := c.host + endpoint
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
@@ -70,61 +66,9 @@ func (c *BeaconApiRestHandler) Get(ctx context.Context, endpoint string, resp in
|
||||
return decodeResp(httpResp, resp)
|
||||
}
|
||||
|
||||
func (c *BeaconApiRestHandler) GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error) {
|
||||
url := c.host + endpoint
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "failed to create request for endpoint %s", url)
|
||||
}
|
||||
primaryAcceptType := fmt.Sprintf("%s;q=%s", api.OctetStreamMediaType, "0.95")
|
||||
secondaryAcceptType := fmt.Sprintf("%s;q=%s", api.JsonMediaType, "0.9")
|
||||
acceptHeaderString := fmt.Sprintf("%s,%s", primaryAcceptType, secondaryAcceptType)
|
||||
if features.Get().SSZOnly {
|
||||
acceptHeaderString = api.OctetStreamMediaType
|
||||
}
|
||||
req.Header.Set("Accept", acceptHeaderString)
|
||||
httpResp, err := c.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "failed to perform request for endpoint %s", url)
|
||||
}
|
||||
defer func() {
|
||||
if err := httpResp.Body.Close(); err != nil {
|
||||
return
|
||||
}
|
||||
}()
|
||||
contentType := httpResp.Header.Get("Content-Type")
|
||||
body, err := io.ReadAll(httpResp.Body)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "failed to read response body for %s", httpResp.Request.URL)
|
||||
}
|
||||
if !strings.Contains(primaryAcceptType, contentType) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"primaryAcceptType": primaryAcceptType,
|
||||
"secondaryAcceptType": secondaryAcceptType,
|
||||
"receivedAcceptType": contentType,
|
||||
}).Debug("Server responded with non primary accept type")
|
||||
}
|
||||
|
||||
// non-2XX codes are a failure
|
||||
if !strings.HasPrefix(httpResp.Status, "2") {
|
||||
decoder := json.NewDecoder(bytes.NewBuffer(body))
|
||||
errorJson := &httputil.DefaultJsonError{}
|
||||
if err = decoder.Decode(errorJson); err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "failed to decode response body into error json for %s", httpResp.Request.URL)
|
||||
}
|
||||
return nil, nil, errorJson
|
||||
}
|
||||
|
||||
if features.Get().SSZOnly && contentType != api.OctetStreamMediaType {
|
||||
return nil, nil, errors.Errorf("server responded with non primary accept type %s", contentType)
|
||||
}
|
||||
|
||||
return body, httpResp.Header, nil
|
||||
}
|
||||
|
||||
// Post sends a POST request and decodes the response body as a JSON object into the passed in object.
|
||||
// If an HTTP error is returned, the body is decoded as a DefaultJsonError JSON object and returned as the first return value.
|
||||
func (c *BeaconApiRestHandler) Post(
|
||||
func (c *BeaconApiJsonRestHandler) Post(
|
||||
ctx context.Context,
|
||||
apiEndpoint string,
|
||||
headers map[string]string,
|
||||
@@ -192,6 +136,6 @@ func decodeResp(httpResp *http.Response, resp interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *BeaconApiRestHandler) SetHost(host string) {
|
||||
func (c *BeaconApiJsonRestHandler) SetHost(host string) {
|
||||
c.host = host
|
||||
}
|
||||
@@ -2,7 +2,6 @@ package beacon_api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -16,8 +15,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
@@ -42,7 +39,7 @@ func TestGet(t *testing.T) {
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
jsonRestHandler := BeaconApiRestHandler{
|
||||
jsonRestHandler := BeaconApiJsonRestHandler{
|
||||
client: http.Client{Timeout: time.Second * 5},
|
||||
host: server.URL,
|
||||
}
|
||||
@@ -51,98 +48,6 @@ func TestGet(t *testing.T) {
|
||||
assert.DeepEqual(t, genesisJson, resp)
|
||||
}
|
||||
|
||||
func TestGetSSZ(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
const endpoint = "/example/rest/api/ssz"
|
||||
genesisJson := &structs.GetGenesisResponse{
|
||||
Data: &structs.Genesis{
|
||||
GenesisTime: "123",
|
||||
GenesisValidatorsRoot: "0x456",
|
||||
GenesisForkVersion: "0x789",
|
||||
},
|
||||
}
|
||||
|
||||
t.Run("Successful SSZ response", func(t *testing.T) {
|
||||
expectedBody := []byte{10, 20, 30, 40}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc(endpoint, func(w http.ResponseWriter, r *http.Request) {
|
||||
assert.StringContains(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
|
||||
w.Header().Set("Content-Type", api.OctetStreamMediaType)
|
||||
_, err := w.Write(expectedBody)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
jsonRestHandler := BeaconApiRestHandler{
|
||||
client: http.Client{Timeout: time.Second * 5},
|
||||
host: server.URL,
|
||||
}
|
||||
|
||||
body, header, err := jsonRestHandler.GetSSZ(ctx, endpoint)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedBody, body)
|
||||
require.StringContains(t, api.OctetStreamMediaType, header.Get("Content-Type"))
|
||||
})
|
||||
|
||||
t.Run("Json Content-Type response", func(t *testing.T) {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
defer logrus.SetLevel(logrus.InfoLevel) // reset it afterwards
|
||||
logHook := test.NewGlobal()
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc(endpoint, func(w http.ResponseWriter, r *http.Request) {
|
||||
assert.StringContains(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
|
||||
w.Header().Set("Content-Type", api.JsonMediaType)
|
||||
|
||||
marshalledJson, err := json.Marshal(genesisJson)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = w.Write(marshalledJson)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
jsonRestHandler := BeaconApiRestHandler{
|
||||
client: http.Client{Timeout: time.Second * 5},
|
||||
host: server.URL,
|
||||
}
|
||||
|
||||
body, header, err := jsonRestHandler.GetSSZ(ctx, endpoint)
|
||||
require.NoError(t, err)
|
||||
assert.LogsContain(t, logHook, "Server responded with non primary accept type")
|
||||
require.Equal(t, api.JsonMediaType, header.Get("Content-Type"))
|
||||
resp := &structs.GetGenesisResponse{}
|
||||
require.NoError(t, json.Unmarshal(body, resp))
|
||||
require.Equal(t, "123", resp.Data.GenesisTime)
|
||||
})
|
||||
|
||||
t.Run("Wrong Content-Type response, doesn't error out and instead handled downstream", func(t *testing.T) {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
defer logrus.SetLevel(logrus.InfoLevel) // reset it afterwards
|
||||
logHook := test.NewGlobal()
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc(endpoint, func(w http.ResponseWriter, r *http.Request) {
|
||||
assert.StringContains(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
|
||||
w.Header().Set("Content-Type", "text/plain") // Invalid content type
|
||||
_, err := w.Write([]byte("some text"))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
jsonRestHandler := BeaconApiRestHandler{
|
||||
client: http.Client{Timeout: time.Second * 5},
|
||||
host: server.URL,
|
||||
}
|
||||
|
||||
_, _, err := jsonRestHandler.GetSSZ(ctx, endpoint)
|
||||
require.NoError(t, err)
|
||||
assert.LogsContain(t, logHook, "Server responded with non primary accept type")
|
||||
})
|
||||
}
|
||||
|
||||
func TestPost(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
const endpoint = "/example/rest/api/endpoint"
|
||||
@@ -180,7 +85,7 @@ func TestPost(t *testing.T) {
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
jsonRestHandler := BeaconApiRestHandler{
|
||||
jsonRestHandler := BeaconApiJsonRestHandler{
|
||||
client: http.Client{Timeout: time.Second * 5},
|
||||
host: server.URL,
|
||||
}
|
||||
@@ -56,23 +56,6 @@ func (mr *MockJsonRestHandlerMockRecorder) Get(ctx, endpoint, resp any) *gomock.
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockJsonRestHandler)(nil).Get), ctx, endpoint, resp)
|
||||
}
|
||||
|
||||
|
||||
// GetSSZ mocks base method.
|
||||
func (m *MockJsonRestHandler) GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetSSZ", ctx, endpoint)
|
||||
ret0, _ := ret[0].([]byte)
|
||||
ret1, _ := ret[1].(http.Header)
|
||||
ret2, _ := ret[2].(error)
|
||||
return ret0, ret1, ret2
|
||||
}
|
||||
|
||||
// GetSSZ indicates an expected call of GetSSZ.
|
||||
func (mr *MockJsonRestHandlerMockRecorder) GetSSZ(ctx, endpoint any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSSZ", reflect.TypeOf((*MockJsonRestHandler)(nil).GetSSZ), ctx, endpoint)
|
||||
}
|
||||
|
||||
// Host mocks base method.
|
||||
func (m *MockJsonRestHandler) Host() string {
|
||||
m.ctrl.T.Helper()
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
)
|
||||
|
||||
// NewPrysmChainClient returns implementation of iface.PrysmChainClient.
|
||||
func NewPrysmChainClient(jsonRestHandler RestHandler, nodeClient iface.NodeClient) iface.PrysmChainClient {
|
||||
func NewPrysmChainClient(jsonRestHandler JsonRestHandler, nodeClient iface.NodeClient) iface.PrysmChainClient {
|
||||
return prysmChainClient{
|
||||
jsonRestHandler: jsonRestHandler,
|
||||
nodeClient: nodeClient,
|
||||
@@ -26,7 +26,7 @@ func NewPrysmChainClient(jsonRestHandler RestHandler, nodeClient iface.NodeClien
|
||||
}
|
||||
|
||||
type prysmChainClient struct {
|
||||
jsonRestHandler RestHandler
|
||||
jsonRestHandler JsonRestHandler
|
||||
nodeClient iface.NodeClient
|
||||
}
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ type StateValidatorsProvider interface {
|
||||
}
|
||||
|
||||
type beaconApiStateValidatorsProvider struct {
|
||||
jsonRestHandler RestHandler
|
||||
jsonRestHandler JsonRestHandler
|
||||
}
|
||||
|
||||
func (c beaconApiStateValidatorsProvider) StateValidators(
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v6/validator/helpers"
|
||||
)
|
||||
|
||||
func NewChainClient(validatorConn validatorHelpers.NodeConnection, jsonRestHandler beaconApi.RestHandler) iface.ChainClient {
|
||||
func NewChainClient(validatorConn validatorHelpers.NodeConnection, jsonRestHandler beaconApi.JsonRestHandler) iface.ChainClient {
|
||||
grpcClient := grpcApi.NewGrpcChainClient(validatorConn.GetGrpcClientConn())
|
||||
if features.Get().EnableBeaconRESTApi {
|
||||
return beaconApi.NewBeaconApiChainClientWithFallback(jsonRestHandler, grpcClient)
|
||||
@@ -18,7 +18,7 @@ func NewChainClient(validatorConn validatorHelpers.NodeConnection, jsonRestHandl
|
||||
}
|
||||
}
|
||||
|
||||
func NewPrysmChainClient(validatorConn validatorHelpers.NodeConnection, jsonRestHandler beaconApi.RestHandler) iface.PrysmChainClient {
|
||||
func NewPrysmChainClient(validatorConn validatorHelpers.NodeConnection, jsonRestHandler beaconApi.JsonRestHandler) iface.PrysmChainClient {
|
||||
if features.Get().EnableBeaconRESTApi {
|
||||
return beaconApi.NewPrysmChainClient(jsonRestHandler, nodeClientFactory.NewNodeClient(validatorConn, jsonRestHandler))
|
||||
} else {
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v6/validator/helpers"
|
||||
)
|
||||
|
||||
func NewNodeClient(validatorConn validatorHelpers.NodeConnection, jsonRestHandler beaconApi.RestHandler) iface.NodeClient {
|
||||
func NewNodeClient(validatorConn validatorHelpers.NodeConnection, jsonRestHandler beaconApi.JsonRestHandler) iface.NodeClient {
|
||||
grpcClient := grpcApi.NewNodeClient(validatorConn.GetGrpcClientConn())
|
||||
if features.Get().EnableBeaconRESTApi {
|
||||
return beaconApi.NewNodeClientWithFallback(jsonRestHandler, grpcClient)
|
||||
|
||||
@@ -179,7 +179,7 @@ func (v *ValidatorService) Start() {
|
||||
return
|
||||
}
|
||||
|
||||
restHandler := beaconApi.NewBeaconApiRestHandler(
|
||||
restHandler := beaconApi.NewBeaconApiJsonRestHandler(
|
||||
http.Client{Timeout: v.conn.GetBeaconApiTimeout(), Transport: otelhttp.NewTransport(http.DefaultTransport)},
|
||||
hosts[0],
|
||||
)
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
|
||||
func NewValidatorClient(
|
||||
validatorConn validatorHelpers.NodeConnection,
|
||||
jsonRestHandler beaconApi.RestHandler,
|
||||
jsonRestHandler beaconApi.JsonRestHandler,
|
||||
opt ...beaconApi.ValidatorClientOpt,
|
||||
) iface.ValidatorClient {
|
||||
if features.Get().EnableBeaconRESTApi {
|
||||
|
||||
@@ -55,7 +55,7 @@ func (s *Server) registerBeaconClient() error {
|
||||
s.beaconApiTimeout,
|
||||
)
|
||||
|
||||
restHandler := beaconApi.NewBeaconApiRestHandler(
|
||||
restHandler := beaconApi.NewBeaconApiJsonRestHandler(
|
||||
http.Client{Timeout: s.beaconApiTimeout, Transport: otelhttp.NewTransport(http.DefaultTransport)},
|
||||
s.beaconApiEndpoint,
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user