mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 13:58:09 -05:00
Compare commits
23 Commits
rose-devel
...
develop2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
16dcc4bf5e | ||
|
|
46afec9cd2 | ||
|
|
2d2507b907 | ||
|
|
a701f07f3a | ||
|
|
f4bbe5ca40 | ||
|
|
4be8de2476 | ||
|
|
fac509a3e6 | ||
|
|
b1ac8209b2 | ||
|
|
74c9586c66 | ||
|
|
f0ad3dfaeb | ||
|
|
2540196747 | ||
|
|
f133751cce | ||
|
|
bddcc158e4 | ||
|
|
bc7664321b | ||
|
|
97f416b3a7 | ||
|
|
1c1e0f38bb | ||
|
|
121914d0d7 | ||
|
|
e8625cd89d | ||
|
|
667aaf1564 | ||
|
|
e020907d2a | ||
|
|
9927cea35a | ||
|
|
d4233471d2 | ||
|
|
d63ae69920 |
@@ -9,6 +9,7 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/api/server/middleware",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"@com_github_rs_cors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -3,10 +3,10 @@ package middleware
|
||||
import (
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/rs/cors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -119,33 +119,40 @@ func AcceptHeaderHandler(serverAcceptedTypes []string) Middleware {
|
||||
func AcceptEncodingHeaderHandler() Middleware {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") || strings.Contains(r.Header.Get("Accept"), "application/octet-stream") {
|
||||
if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
|
||||
gz := gzip.NewWriter(w)
|
||||
gzipRW := &gzipResponseWriter{gz: gz, ResponseWriter: w}
|
||||
defer func() {
|
||||
if !gzipRW.zipped {
|
||||
return
|
||||
}
|
||||
if err := gz.Close(); err != nil {
|
||||
log.WithError(err).Error("Failed to close gzip writer")
|
||||
}
|
||||
}()
|
||||
|
||||
gzipRW := gzipResponseWriter{Writer: gz, ResponseWriter: w}
|
||||
next.ServeHTTP(gzipRW, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type gzipResponseWriter struct {
|
||||
io.Writer
|
||||
gz *gzip.Writer
|
||||
http.ResponseWriter
|
||||
zipped bool
|
||||
}
|
||||
|
||||
func (g gzipResponseWriter) Write(b []byte) (int, error) {
|
||||
return g.Writer.Write(b)
|
||||
func (g *gzipResponseWriter) Write(b []byte) (int, error) {
|
||||
if strings.Contains(g.Header().Get("Content-Type"), api.JsonMediaType) {
|
||||
g.zipped = true
|
||||
g.Header().Set("Content-Encoding", "gzip")
|
||||
return g.gz.Write(b)
|
||||
}
|
||||
return g.ResponseWriter.Write(b)
|
||||
}
|
||||
|
||||
func MiddlewareChain(h http.Handler, mw []Middleware) http.Handler {
|
||||
|
||||
@@ -2,16 +2,15 @@ package middleware
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"compress/gzip"
|
||||
"io"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func TestNormalizeQueryValuesHandler(t *testing.T) {
|
||||
@@ -130,9 +129,9 @@ func TestContentTypeHandler(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAcceptEncodingHeaderHandler(t *testing.T) {
|
||||
|
||||
dummyContent := "Test gzip middleware content"
|
||||
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", r.Header.Get("Accept"))
|
||||
_, err := w.Write([]byte(dummyContent))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
@@ -141,34 +140,46 @@ func TestAcceptEncodingHeaderHandler(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
accept string
|
||||
acceptEncoding string
|
||||
expectCompressed bool
|
||||
}{
|
||||
{
|
||||
name: "Gzip supported",
|
||||
accept: api.JsonMediaType,
|
||||
acceptEncoding: "gzip",
|
||||
expectCompressed: true,
|
||||
},
|
||||
{
|
||||
name: "Multiple encodings supported",
|
||||
accept: api.JsonMediaType,
|
||||
acceptEncoding: "deflate, gzip",
|
||||
expectCompressed: true,
|
||||
},
|
||||
{
|
||||
name: "Gzip not supported",
|
||||
accept: api.JsonMediaType,
|
||||
acceptEncoding: "deflate",
|
||||
expectCompressed: false,
|
||||
},
|
||||
{
|
||||
name: "No accept encoding header",
|
||||
accept: api.JsonMediaType,
|
||||
acceptEncoding: "",
|
||||
expectCompressed: false,
|
||||
},
|
||||
{
|
||||
name: "SSZ",
|
||||
accept: api.OctetStreamMediaType,
|
||||
acceptEncoding: "gzip",
|
||||
expectCompressed: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
req := httptest.NewRequest("GET", "/", nil)
|
||||
req.Header.Set("Accept", tt.accept)
|
||||
if tt.acceptEncoding != "" {
|
||||
req.Header.Set("Accept-Encoding", tt.acceptEncoding)
|
||||
}
|
||||
|
||||
@@ -2796,7 +2796,7 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
// create and save old update
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
@@ -2848,7 +2848,7 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
// create and save old update
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
scb := make([]byte, 64)
|
||||
@@ -2954,7 +2954,7 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
// create and save old update
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
@@ -3006,7 +3006,7 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
// create and save old update
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
scb := make([]byte, 64)
|
||||
@@ -3112,7 +3112,7 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
// create and save old update
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
@@ -3164,7 +3164,7 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
// create and save old update
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
scb := make([]byte, 64)
|
||||
@@ -3647,7 +3647,7 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
|
||||
expectedVersion = version.Altair
|
||||
case 2:
|
||||
forkEpoch = uint64(params.BeaconConfig().BellatrixForkEpoch)
|
||||
expectedVersion = version.Altair
|
||||
expectedVersion = version.Bellatrix
|
||||
case 3:
|
||||
forkEpoch = uint64(params.BeaconConfig().CapellaForkEpoch)
|
||||
expectedVersion = version.Capella
|
||||
@@ -3656,7 +3656,7 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
|
||||
expectedVersion = version.Deneb
|
||||
case 5:
|
||||
forkEpoch = uint64(params.BeaconConfig().ElectraForkEpoch)
|
||||
expectedVersion = version.Deneb
|
||||
expectedVersion = version.Electra
|
||||
default:
|
||||
t.Errorf("Unsupported fork version %s", version.String(testVersion))
|
||||
}
|
||||
@@ -3801,7 +3801,7 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
|
||||
expectedVersion = version.Altair
|
||||
case 2:
|
||||
forkEpoch = uint64(params.BeaconConfig().BellatrixForkEpoch)
|
||||
expectedVersion = version.Altair
|
||||
expectedVersion = version.Bellatrix
|
||||
case 3:
|
||||
forkEpoch = uint64(params.BeaconConfig().CapellaForkEpoch)
|
||||
expectedVersion = version.Capella
|
||||
|
||||
@@ -38,11 +38,14 @@ const (
|
||||
// SingleAttReceived is sent after a single attestation object is received from gossip or rpc
|
||||
SingleAttReceived = 9
|
||||
|
||||
// DataColumnSidecarReceived is sent after a data column sidecar is received from gossip or rpc.
|
||||
DataColumnSidecarReceived = 10
|
||||
|
||||
// BlockGossipReceived is sent after a block has been received from gossip or API that passes validation rules.
|
||||
BlockGossipReceived = 10
|
||||
BlockGossipReceived = 11
|
||||
|
||||
// DataColumnReceived is sent after a data column has been seen after gossip validation rules.
|
||||
DataColumnReceived = 11
|
||||
DataColumnReceived = 12
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -94,6 +97,11 @@ type SingleAttReceivedData struct {
|
||||
Attestation ethpb.Att
|
||||
}
|
||||
|
||||
// DataColumnSidecarReceivedData is the data sent with DataColumnSidecarReceived events.
|
||||
type DataColumnSidecarReceivedData struct {
|
||||
DataColumn *blocks.VerifiedRODataColumn
|
||||
}
|
||||
|
||||
// BlockGossipReceivedData is the data sent with BlockGossipReceived events.
|
||||
type BlockGossipReceivedData struct {
|
||||
// SignedBlock is the block that was received.
|
||||
|
||||
@@ -403,7 +403,7 @@ func AssignmentForValidator(
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil // validator is not scheduled this epoch
|
||||
return &LiteAssignment{} // validator is not scheduled this epoch
|
||||
}
|
||||
|
||||
// CommitteeAssignments calculates committee assignments for each validator during the specified epoch.
|
||||
|
||||
@@ -912,6 +912,7 @@ func TestAssignmentForValidator(t *testing.T) {
|
||||
{{4, 5, 6}},
|
||||
}
|
||||
got = helpers.AssignmentForValidator(bySlot, start, primitives.ValidatorIndex(99))
|
||||
require.IsNil(t, got)
|
||||
// should be empty to be safe
|
||||
require.DeepEqual(t, &helpers.LiteAssignment{}, got)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"helpers.go",
|
||||
"lightclient.go",
|
||||
"store.go",
|
||||
],
|
||||
@@ -41,7 +42,6 @@ go_test(
|
||||
"//consensus-types:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/light-client:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
243
beacon-chain/core/light-client/helpers.go
Normal file
243
beacon-chain/core/light-client/helpers.go
Normal file
@@ -0,0 +1,243 @@
|
||||
package light_client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
light_client "github.com/OffchainLabs/prysm/v6/consensus-types/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func createDefaultLightClientBootstrap(currentSlot primitives.Slot) (interfaces.LightClientBootstrap, error) {
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
syncCommitteeSize := params.BeaconConfig().SyncCommitteeSize
|
||||
pubKeys := make([][]byte, syncCommitteeSize)
|
||||
for i := uint64(0); i < syncCommitteeSize; i++ {
|
||||
pubKeys[i] = make([]byte, fieldparams.BLSPubkeyLength)
|
||||
}
|
||||
currentSyncCommittee := &pb.SyncCommittee{
|
||||
Pubkeys: pubKeys,
|
||||
AggregatePubkey: make([]byte, fieldparams.BLSPubkeyLength),
|
||||
}
|
||||
|
||||
var currentSyncCommitteeBranch [][]byte
|
||||
if currentEpoch >= params.BeaconConfig().ElectraForkEpoch {
|
||||
currentSyncCommitteeBranch = make([][]byte, fieldparams.SyncCommitteeBranchDepthElectra)
|
||||
} else {
|
||||
currentSyncCommitteeBranch = make([][]byte, fieldparams.SyncCommitteeBranchDepth)
|
||||
}
|
||||
for i := 0; i < len(currentSyncCommitteeBranch); i++ {
|
||||
currentSyncCommitteeBranch[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
|
||||
executionBranch := make([][]byte, fieldparams.ExecutionBranchDepth)
|
||||
for i := 0; i < fieldparams.ExecutionBranchDepth; i++ {
|
||||
executionBranch[i] = make([]byte, 32)
|
||||
}
|
||||
|
||||
var m proto.Message
|
||||
if currentEpoch < params.BeaconConfig().CapellaForkEpoch {
|
||||
m = &pb.LightClientBootstrapAltair{
|
||||
Header: &pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{},
|
||||
},
|
||||
CurrentSyncCommittee: currentSyncCommittee,
|
||||
CurrentSyncCommitteeBranch: currentSyncCommitteeBranch,
|
||||
}
|
||||
} else if currentEpoch < params.BeaconConfig().DenebForkEpoch {
|
||||
m = &pb.LightClientBootstrapCapella{
|
||||
Header: &pb.LightClientHeaderCapella{
|
||||
Beacon: &pb.BeaconBlockHeader{},
|
||||
Execution: &enginev1.ExecutionPayloadHeaderCapella{},
|
||||
ExecutionBranch: executionBranch,
|
||||
},
|
||||
CurrentSyncCommittee: currentSyncCommittee,
|
||||
CurrentSyncCommitteeBranch: currentSyncCommitteeBranch,
|
||||
}
|
||||
} else if currentEpoch < params.BeaconConfig().ElectraForkEpoch {
|
||||
m = &pb.LightClientBootstrapDeneb{
|
||||
Header: &pb.LightClientHeaderDeneb{
|
||||
Beacon: &pb.BeaconBlockHeader{},
|
||||
Execution: &enginev1.ExecutionPayloadHeaderDeneb{},
|
||||
ExecutionBranch: executionBranch,
|
||||
},
|
||||
CurrentSyncCommittee: currentSyncCommittee,
|
||||
CurrentSyncCommitteeBranch: currentSyncCommitteeBranch,
|
||||
}
|
||||
} else {
|
||||
m = &pb.LightClientBootstrapElectra{
|
||||
Header: &pb.LightClientHeaderDeneb{
|
||||
Beacon: &pb.BeaconBlockHeader{},
|
||||
Execution: &enginev1.ExecutionPayloadHeaderDeneb{},
|
||||
ExecutionBranch: executionBranch,
|
||||
},
|
||||
CurrentSyncCommittee: currentSyncCommittee,
|
||||
CurrentSyncCommitteeBranch: currentSyncCommitteeBranch,
|
||||
}
|
||||
}
|
||||
|
||||
return light_client.NewWrappedBootstrap(m)
|
||||
}
|
||||
|
||||
func makeExecutionAndProofDeneb(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*enginev1.ExecutionPayloadHeaderDeneb, [][]byte, error) {
|
||||
if blk.Version() < version.Capella {
|
||||
p, err := execution.EmptyExecutionPayloadHeader(version.Deneb)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get payload header")
|
||||
}
|
||||
payloadHeader, ok := p.(*enginev1.ExecutionPayloadHeaderDeneb)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("payload header type %T is not %T", p, &enginev1.ExecutionPayloadHeaderDeneb{})
|
||||
}
|
||||
payloadProof := emptyPayloadProof()
|
||||
|
||||
return payloadHeader, payloadProof, nil
|
||||
}
|
||||
|
||||
payload, err := blk.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get execution payload")
|
||||
}
|
||||
transactionsRoot, err := ComputeTransactionsRoot(payload)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get transactions root")
|
||||
}
|
||||
withdrawalsRoot, err := ComputeWithdrawalsRoot(payload)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get withdrawals root")
|
||||
}
|
||||
|
||||
payloadHeader := &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: payload.ParentHash(),
|
||||
FeeRecipient: payload.FeeRecipient(),
|
||||
StateRoot: payload.StateRoot(),
|
||||
ReceiptsRoot: payload.ReceiptsRoot(),
|
||||
LogsBloom: payload.LogsBloom(),
|
||||
PrevRandao: payload.PrevRandao(),
|
||||
BlockNumber: payload.BlockNumber(),
|
||||
GasLimit: payload.GasLimit(),
|
||||
GasUsed: payload.GasUsed(),
|
||||
Timestamp: payload.Timestamp(),
|
||||
ExtraData: payload.ExtraData(),
|
||||
BaseFeePerGas: payload.BaseFeePerGas(),
|
||||
BlockHash: payload.BlockHash(),
|
||||
TransactionsRoot: transactionsRoot,
|
||||
WithdrawalsRoot: withdrawalsRoot,
|
||||
BlobGasUsed: 0,
|
||||
ExcessBlobGas: 0,
|
||||
}
|
||||
|
||||
if blk.Version() >= version.Deneb {
|
||||
blobGasUsed, err := payload.BlobGasUsed()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get blob gas used")
|
||||
}
|
||||
excessBlobGas, err := payload.ExcessBlobGas()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get excess blob gas")
|
||||
}
|
||||
|
||||
payloadHeader.BlobGasUsed = blobGasUsed
|
||||
payloadHeader.ExcessBlobGas = excessBlobGas
|
||||
}
|
||||
|
||||
payloadProof, err := blocks.PayloadProof(ctx, blk.Block())
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get execution payload proof")
|
||||
}
|
||||
|
||||
return payloadHeader, payloadProof, nil
|
||||
}
|
||||
|
||||
func makeExecutionAndProofCapella(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*enginev1.ExecutionPayloadHeaderCapella, [][]byte, error) {
|
||||
if blk.Version() > version.Capella {
|
||||
return nil, nil, fmt.Errorf("unsupported block version %s for capella execution payload", version.String(blk.Version()))
|
||||
}
|
||||
if blk.Version() < version.Capella {
|
||||
p, err := execution.EmptyExecutionPayloadHeader(version.Capella)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get payload header")
|
||||
}
|
||||
payloadHeader, ok := p.(*enginev1.ExecutionPayloadHeaderCapella)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("payload header type %T is not %T", p, &enginev1.ExecutionPayloadHeaderCapella{})
|
||||
}
|
||||
payloadProof := emptyPayloadProof()
|
||||
|
||||
return payloadHeader, payloadProof, nil
|
||||
}
|
||||
|
||||
payload, err := blk.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get execution payload")
|
||||
}
|
||||
transactionsRoot, err := ComputeTransactionsRoot(payload)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get transactions root")
|
||||
}
|
||||
withdrawalsRoot, err := ComputeWithdrawalsRoot(payload)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get withdrawals root")
|
||||
}
|
||||
|
||||
payloadHeader := &enginev1.ExecutionPayloadHeaderCapella{
|
||||
ParentHash: payload.ParentHash(),
|
||||
FeeRecipient: payload.FeeRecipient(),
|
||||
StateRoot: payload.StateRoot(),
|
||||
ReceiptsRoot: payload.ReceiptsRoot(),
|
||||
LogsBloom: payload.LogsBloom(),
|
||||
PrevRandao: payload.PrevRandao(),
|
||||
BlockNumber: payload.BlockNumber(),
|
||||
GasLimit: payload.GasLimit(),
|
||||
GasUsed: payload.GasUsed(),
|
||||
Timestamp: payload.Timestamp(),
|
||||
ExtraData: payload.ExtraData(),
|
||||
BaseFeePerGas: payload.BaseFeePerGas(),
|
||||
BlockHash: payload.BlockHash(),
|
||||
TransactionsRoot: transactionsRoot,
|
||||
WithdrawalsRoot: withdrawalsRoot,
|
||||
}
|
||||
|
||||
payloadProof, err := blocks.PayloadProof(ctx, blk.Block())
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get execution payload proof")
|
||||
}
|
||||
|
||||
return payloadHeader, payloadProof, nil
|
||||
}
|
||||
|
||||
func makeBeaconBlockHeader(blk interfaces.ReadOnlySignedBeaconBlock) (*pb.BeaconBlockHeader, error) {
|
||||
parentRoot := blk.Block().ParentRoot()
|
||||
stateRoot := blk.Block().StateRoot()
|
||||
bodyRoot, err := blk.Block().Body().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get body root")
|
||||
}
|
||||
return &pb.BeaconBlockHeader{
|
||||
Slot: blk.Block().Slot(),
|
||||
ProposerIndex: blk.Block().ProposerIndex(),
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: stateRoot[:],
|
||||
BodyRoot: bodyRoot[:],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func emptyPayloadProof() [][]byte {
|
||||
branch := interfaces.LightClientExecutionBranch{}
|
||||
proof := make([][]byte, len(branch))
|
||||
for i, b := range branch {
|
||||
proof[i] = b[:]
|
||||
}
|
||||
return proof
|
||||
}
|
||||
@@ -6,12 +6,10 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
consensus_types "github.com/OffchainLabs/prysm/v6/consensus-types"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
light_client "github.com/OffchainLabs/prysm/v6/consensus-types/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -163,13 +161,13 @@ func NewLightClientUpdateFromBeaconState(
|
||||
updateAttestedPeriod := slots.SyncCommitteePeriod(slots.ToEpoch(attestedBlock.Block().Slot()))
|
||||
|
||||
// update = LightClientUpdate()
|
||||
result, err := CreateDefaultLightClientUpdate(currentSlot, attestedState)
|
||||
result, err := CreateDefaultLightClientUpdate(attestedBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create default light client update")
|
||||
}
|
||||
|
||||
// update.attested_header = block_to_light_client_header(attested_block)
|
||||
attestedLightClientHeader, err := BlockToLightClientHeader(ctx, currentSlot, attestedBlock)
|
||||
attestedLightClientHeader, err := BlockToLightClientHeader(ctx, attestedBlock.Version(), attestedBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get attested light client header")
|
||||
}
|
||||
@@ -210,7 +208,7 @@ func NewLightClientUpdateFromBeaconState(
|
||||
// if finalized_block.message.slot != GENESIS_SLOT
|
||||
if finalizedBlock.Block().Slot() != 0 {
|
||||
// update.finalized_header = block_to_light_client_header(finalized_block)
|
||||
finalizedLightClientHeader, err := BlockToLightClientHeader(ctx, currentSlot, finalizedBlock)
|
||||
finalizedLightClientHeader, err := BlockToLightClientHeader(ctx, attestedBlock.Version(), finalizedBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get finalized light client header")
|
||||
}
|
||||
@@ -247,9 +245,7 @@ func NewLightClientUpdateFromBeaconState(
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func CreateDefaultLightClientUpdate(currentSlot primitives.Slot, attestedState state.BeaconState) (interfaces.LightClientUpdate, error) {
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
func CreateDefaultLightClientUpdate(attestedBlock interfaces.ReadOnlySignedBeaconBlock) (interfaces.LightClientUpdate, error) {
|
||||
syncCommitteeSize := params.BeaconConfig().SyncCommitteeSize
|
||||
pubKeys := make([][]byte, syncCommitteeSize)
|
||||
for i := uint64(0); i < syncCommitteeSize; i++ {
|
||||
@@ -261,7 +257,7 @@ func CreateDefaultLightClientUpdate(currentSlot primitives.Slot, attestedState s
|
||||
}
|
||||
|
||||
var nextSyncCommitteeBranch [][]byte
|
||||
if attestedState.Version() >= version.Electra {
|
||||
if attestedBlock.Version() >= version.Electra {
|
||||
nextSyncCommitteeBranch = make([][]byte, fieldparams.SyncCommitteeBranchDepthElectra)
|
||||
} else {
|
||||
nextSyncCommitteeBranch = make([][]byte, fieldparams.SyncCommitteeBranchDepth)
|
||||
@@ -276,7 +272,7 @@ func CreateDefaultLightClientUpdate(currentSlot primitives.Slot, attestedState s
|
||||
}
|
||||
|
||||
var finalityBranch [][]byte
|
||||
if attestedState.Version() >= version.Electra {
|
||||
if attestedBlock.Version() >= version.Electra {
|
||||
finalityBranch = make([][]byte, fieldparams.FinalityBranchDepthElectra)
|
||||
} else {
|
||||
finalityBranch = make([][]byte, fieldparams.FinalityBranchDepth)
|
||||
@@ -286,10 +282,12 @@ func CreateDefaultLightClientUpdate(currentSlot primitives.Slot, attestedState s
|
||||
}
|
||||
|
||||
var m proto.Message
|
||||
if currentEpoch < params.BeaconConfig().CapellaForkEpoch {
|
||||
switch attestedBlock.Version() {
|
||||
case version.Altair, version.Bellatrix:
|
||||
m = &pb.LightClientUpdateAltair{
|
||||
AttestedHeader: &pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: attestedBlock.Block().Slot(),
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
@@ -310,10 +308,11 @@ func CreateDefaultLightClientUpdate(currentSlot primitives.Slot, attestedState s
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
} else if currentEpoch < params.BeaconConfig().DenebForkEpoch {
|
||||
case version.Capella:
|
||||
m = &pb.LightClientUpdateCapella{
|
||||
AttestedHeader: &pb.LightClientHeaderCapella{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: attestedBlock.Block().Slot(),
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
@@ -362,10 +361,11 @@ func CreateDefaultLightClientUpdate(currentSlot primitives.Slot, attestedState s
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
} else if currentEpoch < params.BeaconConfig().ElectraForkEpoch {
|
||||
case version.Deneb:
|
||||
m = &pb.LightClientUpdateDeneb{
|
||||
AttestedHeader: &pb.LightClientHeaderDeneb{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: attestedBlock.Block().Slot(),
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
@@ -418,120 +418,65 @@ func CreateDefaultLightClientUpdate(currentSlot primitives.Slot, attestedState s
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
if attestedState.Version() >= version.Electra {
|
||||
m = &pb.LightClientUpdateElectra{
|
||||
AttestedHeader: &pb.LightClientHeaderDeneb{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Execution: &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
|
||||
GasLimit: 0,
|
||||
GasUsed: 0,
|
||||
},
|
||||
ExecutionBranch: executionBranch,
|
||||
case version.Electra, version.Fulu:
|
||||
m = &pb.LightClientUpdateElectra{
|
||||
AttestedHeader: &pb.LightClientHeaderDeneb{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: attestedBlock.Block().Slot(),
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
NextSyncCommitteeBranch: nextSyncCommitteeBranch,
|
||||
FinalityBranch: finalityBranch,
|
||||
FinalizedHeader: &pb.LightClientHeaderDeneb{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Execution: &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
|
||||
GasLimit: 0,
|
||||
GasUsed: 0,
|
||||
},
|
||||
ExecutionBranch: executionBranch,
|
||||
Execution: &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
|
||||
GasLimit: 0,
|
||||
GasUsed: 0,
|
||||
},
|
||||
SyncAggregate: &pb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, 64),
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
ExecutionBranch: executionBranch,
|
||||
},
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
NextSyncCommitteeBranch: nextSyncCommitteeBranch,
|
||||
FinalityBranch: finalityBranch,
|
||||
FinalizedHeader: &pb.LightClientHeaderDeneb{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
m = &pb.LightClientUpdateDeneb{
|
||||
AttestedHeader: &pb.LightClientHeaderDeneb{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Execution: &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
|
||||
GasLimit: 0,
|
||||
GasUsed: 0,
|
||||
},
|
||||
ExecutionBranch: executionBranch,
|
||||
Execution: &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
|
||||
GasLimit: 0,
|
||||
GasUsed: 0,
|
||||
},
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
NextSyncCommitteeBranch: nextSyncCommitteeBranch,
|
||||
FinalityBranch: finalityBranch,
|
||||
FinalizedHeader: &pb.LightClientHeaderDeneb{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Execution: &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
|
||||
GasLimit: 0,
|
||||
GasUsed: 0,
|
||||
},
|
||||
ExecutionBranch: executionBranch,
|
||||
},
|
||||
SyncAggregate: &pb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, 64),
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
ExecutionBranch: executionBranch,
|
||||
},
|
||||
SyncAggregate: &pb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, 64),
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("unsupported beacon chain version %s", version.String(attestedBlock.Version()))
|
||||
}
|
||||
|
||||
return light_client.NewWrappedUpdate(m)
|
||||
@@ -575,189 +520,52 @@ func ComputeWithdrawalsRoot(payload interfaces.ExecutionData) ([]byte, error) {
|
||||
|
||||
func BlockToLightClientHeader(
|
||||
ctx context.Context,
|
||||
currentSlot primitives.Slot,
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
attestedBlockVersion int, // this is the version that the light client header should be in, based on the attested block.
|
||||
block interfaces.ReadOnlySignedBeaconBlock, // this block is either the attested block, or the finalized block.
|
||||
// in case of the latter, we might need to upgrade it to the attested block's version.
|
||||
) (interfaces.LightClientHeader, error) {
|
||||
var m proto.Message
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
blockEpoch := slots.ToEpoch(block.Block().Slot())
|
||||
parentRoot := block.Block().ParentRoot()
|
||||
stateRoot := block.Block().StateRoot()
|
||||
bodyRoot, err := block.Block().Body().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get body root")
|
||||
if block.Version() > attestedBlockVersion {
|
||||
return nil, errors.Errorf("block version %s is greater than attested block version %s", version.String(block.Version()), version.String(attestedBlockVersion))
|
||||
}
|
||||
|
||||
if currentEpoch < params.BeaconConfig().CapellaForkEpoch {
|
||||
beacon, err := makeBeaconBlockHeader(block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not make beacon block header")
|
||||
}
|
||||
|
||||
var m proto.Message
|
||||
switch attestedBlockVersion {
|
||||
case version.Altair, version.Bellatrix:
|
||||
m = &pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: block.Block().Slot(),
|
||||
ProposerIndex: block.Block().ProposerIndex(),
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: stateRoot[:],
|
||||
BodyRoot: bodyRoot[:],
|
||||
},
|
||||
Beacon: beacon,
|
||||
}
|
||||
} else if currentEpoch < params.BeaconConfig().DenebForkEpoch {
|
||||
var payloadHeader *enginev1.ExecutionPayloadHeaderCapella
|
||||
var payloadProof [][]byte
|
||||
|
||||
if blockEpoch < params.BeaconConfig().CapellaForkEpoch {
|
||||
var ok bool
|
||||
|
||||
p, err := execution.EmptyExecutionPayloadHeader(version.Capella)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get payload header")
|
||||
}
|
||||
payloadHeader, ok = p.(*enginev1.ExecutionPayloadHeaderCapella)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("payload header type %T is not %T", p, &enginev1.ExecutionPayloadHeaderCapella{})
|
||||
}
|
||||
payloadProof = emptyPayloadProof()
|
||||
} else {
|
||||
payload, err := block.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution payload")
|
||||
}
|
||||
transactionsRoot, err := ComputeTransactionsRoot(payload)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get transactions root")
|
||||
}
|
||||
withdrawalsRoot, err := ComputeWithdrawalsRoot(payload)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get withdrawals root")
|
||||
}
|
||||
|
||||
payloadHeader = &enginev1.ExecutionPayloadHeaderCapella{
|
||||
ParentHash: payload.ParentHash(),
|
||||
FeeRecipient: payload.FeeRecipient(),
|
||||
StateRoot: payload.StateRoot(),
|
||||
ReceiptsRoot: payload.ReceiptsRoot(),
|
||||
LogsBloom: payload.LogsBloom(),
|
||||
PrevRandao: payload.PrevRandao(),
|
||||
BlockNumber: payload.BlockNumber(),
|
||||
GasLimit: payload.GasLimit(),
|
||||
GasUsed: payload.GasUsed(),
|
||||
Timestamp: payload.Timestamp(),
|
||||
ExtraData: payload.ExtraData(),
|
||||
BaseFeePerGas: payload.BaseFeePerGas(),
|
||||
BlockHash: payload.BlockHash(),
|
||||
TransactionsRoot: transactionsRoot,
|
||||
WithdrawalsRoot: withdrawalsRoot,
|
||||
}
|
||||
|
||||
payloadProof, err = blocks.PayloadProof(ctx, block.Block())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution payload proof")
|
||||
}
|
||||
case version.Capella:
|
||||
payloadHeader, payloadProof, err := makeExecutionAndProofCapella(ctx, block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not make execution payload header and proof")
|
||||
}
|
||||
|
||||
m = &pb.LightClientHeaderCapella{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: block.Block().Slot(),
|
||||
ProposerIndex: block.Block().ProposerIndex(),
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: stateRoot[:],
|
||||
BodyRoot: bodyRoot[:],
|
||||
},
|
||||
Beacon: beacon,
|
||||
Execution: payloadHeader,
|
||||
ExecutionBranch: payloadProof,
|
||||
}
|
||||
} else {
|
||||
var payloadHeader *enginev1.ExecutionPayloadHeaderDeneb
|
||||
var payloadProof [][]byte
|
||||
|
||||
if blockEpoch < params.BeaconConfig().CapellaForkEpoch {
|
||||
var ok bool
|
||||
|
||||
p, err := execution.EmptyExecutionPayloadHeader(version.Deneb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get payload header")
|
||||
}
|
||||
payloadHeader, ok = p.(*enginev1.ExecutionPayloadHeaderDeneb)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("payload header type %T is not %T", p, &enginev1.ExecutionPayloadHeaderDeneb{})
|
||||
}
|
||||
payloadProof = emptyPayloadProof()
|
||||
} else {
|
||||
payload, err := block.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution payload")
|
||||
}
|
||||
transactionsRoot, err := ComputeTransactionsRoot(payload)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get transactions root")
|
||||
}
|
||||
withdrawalsRoot, err := ComputeWithdrawalsRoot(payload)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get withdrawals root")
|
||||
}
|
||||
|
||||
var blobGasUsed uint64
|
||||
var excessBlobGas uint64
|
||||
|
||||
if blockEpoch >= params.BeaconConfig().DenebForkEpoch {
|
||||
blobGasUsed, err = payload.BlobGasUsed()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get blob gas used")
|
||||
}
|
||||
excessBlobGas, err = payload.ExcessBlobGas()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get excess blob gas")
|
||||
}
|
||||
}
|
||||
|
||||
payloadHeader = &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: payload.ParentHash(),
|
||||
FeeRecipient: payload.FeeRecipient(),
|
||||
StateRoot: payload.StateRoot(),
|
||||
ReceiptsRoot: payload.ReceiptsRoot(),
|
||||
LogsBloom: payload.LogsBloom(),
|
||||
PrevRandao: payload.PrevRandao(),
|
||||
BlockNumber: payload.BlockNumber(),
|
||||
GasLimit: payload.GasLimit(),
|
||||
GasUsed: payload.GasUsed(),
|
||||
Timestamp: payload.Timestamp(),
|
||||
ExtraData: payload.ExtraData(),
|
||||
BaseFeePerGas: payload.BaseFeePerGas(),
|
||||
BlockHash: payload.BlockHash(),
|
||||
TransactionsRoot: transactionsRoot,
|
||||
WithdrawalsRoot: withdrawalsRoot,
|
||||
BlobGasUsed: blobGasUsed,
|
||||
ExcessBlobGas: excessBlobGas,
|
||||
}
|
||||
|
||||
payloadProof, err = blocks.PayloadProof(ctx, block.Block())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution payload proof")
|
||||
}
|
||||
case version.Deneb, version.Electra, version.Fulu:
|
||||
payloadHeader, payloadProof, err := makeExecutionAndProofDeneb(ctx, block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not make execution payload header and proof")
|
||||
}
|
||||
|
||||
m = &pb.LightClientHeaderDeneb{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: block.Block().Slot(),
|
||||
ProposerIndex: block.Block().ProposerIndex(),
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: stateRoot[:],
|
||||
BodyRoot: bodyRoot[:],
|
||||
},
|
||||
Beacon: beacon,
|
||||
Execution: payloadHeader,
|
||||
ExecutionBranch: payloadProof,
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported attested block version %s", version.String(attestedBlockVersion))
|
||||
}
|
||||
|
||||
return light_client.NewWrappedHeader(m)
|
||||
}
|
||||
|
||||
func emptyPayloadProof() [][]byte {
|
||||
branch := interfaces.LightClientExecutionBranch{}
|
||||
proof := make([][]byte, len(branch))
|
||||
for i, b := range branch {
|
||||
proof[i] = b[:]
|
||||
}
|
||||
return proof
|
||||
}
|
||||
|
||||
func HasRelevantSyncCommittee(update interfaces.LightClientUpdate) (bool, error) {
|
||||
if update.Version() >= version.Electra {
|
||||
branch, err := update.NextSyncCommitteeBranchElectra()
|
||||
@@ -909,7 +717,7 @@ func NewLightClientBootstrapFromBeaconState(
|
||||
return nil, errors.Wrap(err, "could not create default light client bootstrap")
|
||||
}
|
||||
|
||||
lightClientHeader, err := BlockToLightClientHeader(ctx, currentSlot, block)
|
||||
lightClientHeader, err := BlockToLightClientHeader(ctx, state.Version(), block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert block to light client header")
|
||||
}
|
||||
@@ -942,78 +750,6 @@ func NewLightClientBootstrapFromBeaconState(
|
||||
return bootstrap, nil
|
||||
}
|
||||
|
||||
func createDefaultLightClientBootstrap(currentSlot primitives.Slot) (interfaces.LightClientBootstrap, error) {
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
syncCommitteeSize := params.BeaconConfig().SyncCommitteeSize
|
||||
pubKeys := make([][]byte, syncCommitteeSize)
|
||||
for i := uint64(0); i < syncCommitteeSize; i++ {
|
||||
pubKeys[i] = make([]byte, fieldparams.BLSPubkeyLength)
|
||||
}
|
||||
currentSyncCommittee := &pb.SyncCommittee{
|
||||
Pubkeys: pubKeys,
|
||||
AggregatePubkey: make([]byte, fieldparams.BLSPubkeyLength),
|
||||
}
|
||||
|
||||
var currentSyncCommitteeBranch [][]byte
|
||||
if currentEpoch >= params.BeaconConfig().ElectraForkEpoch {
|
||||
currentSyncCommitteeBranch = make([][]byte, fieldparams.SyncCommitteeBranchDepthElectra)
|
||||
} else {
|
||||
currentSyncCommitteeBranch = make([][]byte, fieldparams.SyncCommitteeBranchDepth)
|
||||
}
|
||||
for i := 0; i < len(currentSyncCommitteeBranch); i++ {
|
||||
currentSyncCommitteeBranch[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
|
||||
executionBranch := make([][]byte, fieldparams.ExecutionBranchDepth)
|
||||
for i := 0; i < fieldparams.ExecutionBranchDepth; i++ {
|
||||
executionBranch[i] = make([]byte, 32)
|
||||
}
|
||||
|
||||
// TODO: can this be based on the current epoch?
|
||||
var m proto.Message
|
||||
if currentEpoch < params.BeaconConfig().CapellaForkEpoch {
|
||||
m = &pb.LightClientBootstrapAltair{
|
||||
Header: &pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{},
|
||||
},
|
||||
CurrentSyncCommittee: currentSyncCommittee,
|
||||
CurrentSyncCommitteeBranch: currentSyncCommitteeBranch,
|
||||
}
|
||||
} else if currentEpoch < params.BeaconConfig().DenebForkEpoch {
|
||||
m = &pb.LightClientBootstrapCapella{
|
||||
Header: &pb.LightClientHeaderCapella{
|
||||
Beacon: &pb.BeaconBlockHeader{},
|
||||
Execution: &enginev1.ExecutionPayloadHeaderCapella{},
|
||||
ExecutionBranch: executionBranch,
|
||||
},
|
||||
CurrentSyncCommittee: currentSyncCommittee,
|
||||
CurrentSyncCommitteeBranch: currentSyncCommitteeBranch,
|
||||
}
|
||||
} else if currentEpoch < params.BeaconConfig().ElectraForkEpoch {
|
||||
m = &pb.LightClientBootstrapDeneb{
|
||||
Header: &pb.LightClientHeaderDeneb{
|
||||
Beacon: &pb.BeaconBlockHeader{},
|
||||
Execution: &enginev1.ExecutionPayloadHeaderDeneb{},
|
||||
ExecutionBranch: executionBranch,
|
||||
},
|
||||
CurrentSyncCommittee: currentSyncCommittee,
|
||||
CurrentSyncCommitteeBranch: currentSyncCommitteeBranch,
|
||||
}
|
||||
} else {
|
||||
m = &pb.LightClientBootstrapElectra{
|
||||
Header: &pb.LightClientHeaderDeneb{
|
||||
Beacon: &pb.BeaconBlockHeader{},
|
||||
Execution: &enginev1.ExecutionPayloadHeaderDeneb{},
|
||||
ExecutionBranch: executionBranch,
|
||||
},
|
||||
CurrentSyncCommittee: currentSyncCommittee,
|
||||
CurrentSyncCommitteeBranch: currentSyncCommitteeBranch,
|
||||
}
|
||||
}
|
||||
|
||||
return light_client.NewWrappedBootstrap(m)
|
||||
}
|
||||
|
||||
func UpdateHasSupermajority(syncAggregate *pb.SyncAggregate) bool {
|
||||
maxActiveParticipants := syncAggregate.SyncCommitteeBits.Len()
|
||||
numActiveParticipants := syncAggregate.SyncCommitteeBits.Count()
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
light_client "github.com/OffchainLabs/prysm/v6/consensus-types/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
@@ -547,7 +546,7 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
|
||||
|
||||
header, err := lightClient.BlockToLightClientHeader(
|
||||
l.Ctx,
|
||||
primitives.Slot(params.BeaconConfig().AltairForkEpoch)*params.BeaconConfig().SlotsPerEpoch,
|
||||
version.Altair,
|
||||
l.Block,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
@@ -570,7 +569,7 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
|
||||
|
||||
header, err := lightClient.BlockToLightClientHeader(
|
||||
l.Ctx,
|
||||
primitives.Slot(params.BeaconConfig().BellatrixForkEpoch)*params.BeaconConfig().SlotsPerEpoch,
|
||||
version.Bellatrix,
|
||||
l.Block,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
@@ -594,7 +593,7 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
|
||||
|
||||
header, err := lightClient.BlockToLightClientHeader(
|
||||
l.Ctx,
|
||||
primitives.Slot(params.BeaconConfig().CapellaForkEpoch)*params.BeaconConfig().SlotsPerEpoch,
|
||||
version.Capella,
|
||||
l.Block,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
@@ -655,7 +654,7 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
|
||||
|
||||
header, err := lightClient.BlockToLightClientHeader(
|
||||
l.Ctx,
|
||||
primitives.Slot(params.BeaconConfig().CapellaForkEpoch)*params.BeaconConfig().SlotsPerEpoch,
|
||||
version.Capella,
|
||||
l.Block,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
@@ -718,7 +717,7 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
|
||||
|
||||
header, err := lightClient.BlockToLightClientHeader(
|
||||
l.Ctx,
|
||||
primitives.Slot(params.BeaconConfig().DenebForkEpoch)*params.BeaconConfig().SlotsPerEpoch,
|
||||
version.Deneb,
|
||||
l.Block,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
@@ -787,7 +786,7 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
|
||||
|
||||
header, err := lightClient.BlockToLightClientHeader(
|
||||
l.Ctx,
|
||||
primitives.Slot(params.BeaconConfig().DenebForkEpoch)*params.BeaconConfig().SlotsPerEpoch,
|
||||
version.Deneb,
|
||||
l.Block,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
@@ -856,7 +855,7 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
|
||||
t.Run("Non-Blinded Beacon Block", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, version.Electra)
|
||||
|
||||
header, err := lightClient.BlockToLightClientHeader(l.Ctx, l.State.Slot(), l.Block)
|
||||
header, err := lightClient.BlockToLightClientHeader(l.Ctx, version.Electra, l.Block)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, header, "header is nil")
|
||||
|
||||
@@ -921,7 +920,7 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
|
||||
t.Run("Blinded Beacon Block", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, version.Electra, util.WithBlinded())
|
||||
|
||||
header, err := lightClient.BlockToLightClientHeader(l.Ctx, l.State.Slot(), l.Block)
|
||||
header, err := lightClient.BlockToLightClientHeader(l.Ctx, version.Electra, l.Block)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, header, "header is nil")
|
||||
|
||||
@@ -989,7 +988,7 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
|
||||
|
||||
header, err := lightClient.BlockToLightClientHeader(
|
||||
l.Ctx,
|
||||
primitives.Slot(params.BeaconConfig().CapellaForkEpoch)*params.BeaconConfig().SlotsPerEpoch,
|
||||
version.Capella,
|
||||
l.Block)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, header, "header is nil")
|
||||
@@ -1011,7 +1010,7 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
|
||||
|
||||
header, err := lightClient.BlockToLightClientHeader(
|
||||
l.Ctx,
|
||||
primitives.Slot(params.BeaconConfig().DenebForkEpoch)*params.BeaconConfig().SlotsPerEpoch,
|
||||
version.Deneb,
|
||||
l.Block)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, header, "header is nil")
|
||||
@@ -1034,7 +1033,7 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
|
||||
|
||||
header, err := lightClient.BlockToLightClientHeader(
|
||||
l.Ctx,
|
||||
primitives.Slot(params.BeaconConfig().DenebForkEpoch)*params.BeaconConfig().SlotsPerEpoch,
|
||||
version.Deneb,
|
||||
l.Block)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, header, "header is nil")
|
||||
@@ -1094,7 +1093,7 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
|
||||
|
||||
header, err := lightClient.BlockToLightClientHeader(
|
||||
l.Ctx,
|
||||
primitives.Slot(params.BeaconConfig().DenebForkEpoch)*params.BeaconConfig().SlotsPerEpoch,
|
||||
version.Deneb,
|
||||
l.Block)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, header, "header is nil")
|
||||
@@ -1180,14 +1179,13 @@ func createNonEmptyFinalityBranch() [][]byte {
|
||||
}
|
||||
|
||||
func TestIsBetterUpdate(t *testing.T) {
|
||||
config := params.BeaconConfig()
|
||||
st, err := util.NewBeaconStateAltair()
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockAltair())
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("new has supermajority but old doesn't", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
@@ -1203,9 +1201,9 @@ func TestIsBetterUpdate(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("old has supermajority but new doesn't", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
@@ -1221,9 +1219,9 @@ func TestIsBetterUpdate(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("new doesn't have supermajority and newNumActiveParticipants is greater than oldNumActiveParticipants", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
@@ -1239,9 +1237,9 @@ func TestIsBetterUpdate(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("new doesn't have supermajority and newNumActiveParticipants is lesser than oldNumActiveParticipants", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
@@ -1257,9 +1255,9 @@ func TestIsBetterUpdate(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("new has relevant sync committee but old doesn't", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
@@ -1296,9 +1294,9 @@ func TestIsBetterUpdate(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("old has relevant sync committee but new doesn't", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
@@ -1335,9 +1333,9 @@ func TestIsBetterUpdate(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("new has finality but old doesn't", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
@@ -1378,9 +1376,9 @@ func TestIsBetterUpdate(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("old has finality but new doesn't", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
@@ -1421,9 +1419,9 @@ func TestIsBetterUpdate(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("new has finality and sync committee finality both but old doesn't have sync committee finality", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
@@ -1482,9 +1480,9 @@ func TestIsBetterUpdate(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("new has finality but doesn't have sync committee finality and old has sync committee finality", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
@@ -1543,9 +1541,9 @@ func TestIsBetterUpdate(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("new has more active participants than old", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
@@ -1561,9 +1559,9 @@ func TestIsBetterUpdate(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("new has less active participants than old", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
@@ -1579,9 +1577,9 @@ func TestIsBetterUpdate(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("new's attested header's slot is lesser than old's attested header's slot", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
@@ -1640,9 +1638,9 @@ func TestIsBetterUpdate(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("new's attested header's slot is greater than old's attested header's slot", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
@@ -1701,9 +1699,9 @@ func TestIsBetterUpdate(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("none of the above conditions are met and new signature's slot is less than old signature's slot", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
@@ -1762,9 +1760,9 @@ func TestIsBetterUpdate(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("none of the above conditions are met and new signature's slot is greater than old signature's slot", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
|
||||
@@ -13,6 +13,10 @@ type Store struct {
|
||||
lastOptimisticUpdate interfaces.LightClientOptimisticUpdate
|
||||
}
|
||||
|
||||
func NewLightClientStore() *Store {
|
||||
return &Store{}
|
||||
}
|
||||
|
||||
func (s *Store) SetLastFinalityUpdate(update interfaces.LightClientFinalityUpdate) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
@@ -153,6 +153,13 @@ func decodeLightClientBootstrap(enc []byte) (interfaces.LightClientBootstrap, []
|
||||
}
|
||||
m = bootstrap
|
||||
syncCommitteeHash = enc[len(altairKey) : len(altairKey)+32]
|
||||
case hasBellatrixKey(enc):
|
||||
bootstrap := ðpb.LightClientBootstrapAltair{}
|
||||
if err := bootstrap.UnmarshalSSZ(enc[len(bellatrixKey)+32:]); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not unmarshal Bellatrix light client bootstrap")
|
||||
}
|
||||
m = bootstrap
|
||||
syncCommitteeHash = enc[len(bellatrixKey) : len(bellatrixKey)+32]
|
||||
case hasCapellaKey(enc):
|
||||
bootstrap := ðpb.LightClientBootstrapCapella{}
|
||||
if err := bootstrap.UnmarshalSSZ(enc[len(capellaKey)+32:]); err != nil {
|
||||
@@ -265,6 +272,12 @@ func decodeLightClientUpdate(enc []byte) (interfaces.LightClientUpdate, error) {
|
||||
return nil, errors.Wrap(err, "could not unmarshal Altair light client update")
|
||||
}
|
||||
m = update
|
||||
case hasBellatrixKey(enc):
|
||||
update := ðpb.LightClientUpdateAltair{}
|
||||
if err := update.UnmarshalSSZ(enc[len(bellatrixKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal Bellatrix light client update")
|
||||
}
|
||||
m = update
|
||||
case hasCapellaKey(enc):
|
||||
update := ðpb.LightClientUpdateCapella{}
|
||||
if err := update.UnmarshalSSZ(enc[len(capellaKey):]); err != nil {
|
||||
@@ -297,6 +310,8 @@ func keyForLightClientUpdate(v int) ([]byte, error) {
|
||||
return denebKey, nil
|
||||
case version.Capella:
|
||||
return capellaKey, nil
|
||||
case version.Bellatrix:
|
||||
return bellatrixKey, nil
|
||||
case version.Altair:
|
||||
return altairKey, nil
|
||||
default:
|
||||
|
||||
@@ -46,7 +46,21 @@ func createUpdate(t *testing.T, v int) (interfaces.LightClientUpdate, error) {
|
||||
slot = primitives.Slot(config.AltairForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
|
||||
header, err = light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
Slot: slot,
|
||||
ProposerIndex: primitives.ValidatorIndex(rand.Int()),
|
||||
ParentRoot: sampleRoot,
|
||||
StateRoot: sampleRoot,
|
||||
BodyRoot: sampleRoot,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
st, err = util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
case version.Bellatrix:
|
||||
slot = primitives.Slot(config.BellatrixForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
|
||||
header, err = light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: slot,
|
||||
ProposerIndex: primitives.ValidatorIndex(rand.Int()),
|
||||
ParentRoot: sampleRoot,
|
||||
StateRoot: sampleRoot,
|
||||
@@ -60,7 +74,7 @@ func createUpdate(t *testing.T, v int) (interfaces.LightClientUpdate, error) {
|
||||
slot = primitives.Slot(config.CapellaForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
|
||||
header, err = light_client.NewWrappedHeader(&pb.LightClientHeaderCapella{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
Slot: slot,
|
||||
ProposerIndex: primitives.ValidatorIndex(rand.Int()),
|
||||
ParentRoot: sampleRoot,
|
||||
StateRoot: sampleRoot,
|
||||
@@ -88,7 +102,7 @@ func createUpdate(t *testing.T, v int) (interfaces.LightClientUpdate, error) {
|
||||
slot = primitives.Slot(config.DenebForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
|
||||
header, err = light_client.NewWrappedHeader(&pb.LightClientHeaderDeneb{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
Slot: slot,
|
||||
ProposerIndex: primitives.ValidatorIndex(rand.Int()),
|
||||
ParentRoot: sampleRoot,
|
||||
StateRoot: sampleRoot,
|
||||
@@ -116,7 +130,7 @@ func createUpdate(t *testing.T, v int) (interfaces.LightClientUpdate, error) {
|
||||
slot = primitives.Slot(config.ElectraForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
|
||||
header, err = light_client.NewWrappedHeader(&pb.LightClientHeaderDeneb{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
Slot: slot,
|
||||
ProposerIndex: primitives.ValidatorIndex(rand.Int()),
|
||||
ParentRoot: sampleRoot,
|
||||
StateRoot: sampleRoot,
|
||||
@@ -144,7 +158,7 @@ func createUpdate(t *testing.T, v int) (interfaces.LightClientUpdate, error) {
|
||||
slot = primitives.Slot(config.FuluForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
|
||||
header, err = light_client.NewWrappedHeader(&pb.LightClientHeaderDeneb{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
Slot: slot,
|
||||
ProposerIndex: primitives.ValidatorIndex(rand.Int()),
|
||||
ParentRoot: sampleRoot,
|
||||
StateRoot: sampleRoot,
|
||||
@@ -192,71 +206,30 @@ func TestStore_LightClientUpdate_CanSaveRetrieve(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.AltairForkEpoch = 0
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
cfg.FuluForkEpoch = 3
|
||||
cfg.BellatrixForkEpoch = 1
|
||||
cfg.CapellaForkEpoch = 2
|
||||
cfg.DenebForkEpoch = 3
|
||||
cfg.ElectraForkEpoch = 4
|
||||
cfg.FuluForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
db := setupDB(t)
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
update, err := createUpdate(t, version.Altair)
|
||||
require.NoError(t, err)
|
||||
period := uint64(1)
|
||||
for testVersion := version.Altair; testVersion <= version.Electra; testVersion++ {
|
||||
t.Run(version.String(testVersion), func(t *testing.T) {
|
||||
update, err := createUpdate(t, testVersion)
|
||||
require.NoError(t, err)
|
||||
period := uint64(1)
|
||||
|
||||
err = db.SaveLightClientUpdate(ctx, period, update)
|
||||
require.NoError(t, err)
|
||||
err = db.SaveLightClientUpdate(ctx, period, update)
|
||||
require.NoError(t, err)
|
||||
|
||||
retrievedUpdate, err := db.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, update, retrievedUpdate, "retrieved update does not match saved update")
|
||||
})
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
update, err := createUpdate(t, version.Capella)
|
||||
require.NoError(t, err)
|
||||
period := uint64(1)
|
||||
err = db.SaveLightClientUpdate(ctx, period, update)
|
||||
require.NoError(t, err)
|
||||
|
||||
retrievedUpdate, err := db.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, update, retrievedUpdate, "retrieved update does not match saved update")
|
||||
})
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
update, err := createUpdate(t, version.Deneb)
|
||||
require.NoError(t, err)
|
||||
period := uint64(1)
|
||||
err = db.SaveLightClientUpdate(ctx, period, update)
|
||||
require.NoError(t, err)
|
||||
|
||||
retrievedUpdate, err := db.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, update, retrievedUpdate, "retrieved update does not match saved update")
|
||||
})
|
||||
t.Run("Electra", func(t *testing.T) {
|
||||
update, err := createUpdate(t, version.Electra)
|
||||
require.NoError(t, err)
|
||||
period := uint64(1)
|
||||
err = db.SaveLightClientUpdate(ctx, period, update)
|
||||
require.NoError(t, err)
|
||||
|
||||
retrievedUpdate, err := db.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, update, retrievedUpdate, "retrieved update does not match saved update")
|
||||
})
|
||||
t.Run("Fulu", func(t *testing.T) {
|
||||
update, err := createUpdate(t, version.Fulu)
|
||||
require.NoError(t, err)
|
||||
period := uint64(1)
|
||||
err = db.SaveLightClientUpdate(ctx, period, update)
|
||||
require.NoError(t, err)
|
||||
|
||||
retrievedUpdate, err := db.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, update, retrievedUpdate, "retrieved update does not match saved update")
|
||||
})
|
||||
retrievedUpdate, err := db.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, update, retrievedUpdate, "retrieved update does not match saved update")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdates_canRetrieveRange(t *testing.T) {
|
||||
@@ -584,12 +557,21 @@ func TestStore_LightClientBootstrap_CanSaveRetrieve(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.AltairForkEpoch = 0
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
cfg.BellatrixForkEpoch = 1
|
||||
cfg.CapellaForkEpoch = 2
|
||||
cfg.DenebForkEpoch = 3
|
||||
cfg.ElectraForkEpoch = 4
|
||||
cfg.EpochsPerSyncCommitteePeriod = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
versionToForkEpoch := map[int]primitives.Epoch{
|
||||
version.Altair: params.BeaconConfig().AltairForkEpoch,
|
||||
version.Bellatrix: params.BeaconConfig().BellatrixForkEpoch,
|
||||
version.Capella: params.BeaconConfig().CapellaForkEpoch,
|
||||
version.Deneb: params.BeaconConfig().DenebForkEpoch,
|
||||
version.Electra: params.BeaconConfig().ElectraForkEpoch,
|
||||
}
|
||||
|
||||
db := setupDB(t)
|
||||
ctx := t.Context()
|
||||
|
||||
@@ -599,89 +581,38 @@ func TestStore_LightClientBootstrap_CanSaveRetrieve(t *testing.T) {
|
||||
require.IsNil(t, retrievedBootstrap)
|
||||
})
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
bootstrap, err := createDefaultLightClientBootstrap(primitives.Slot(uint64(params.BeaconConfig().AltairForkEpoch) * uint64(params.BeaconConfig().SlotsPerEpoch)))
|
||||
require.NoError(t, err)
|
||||
for testVersion := version.Altair; testVersion <= version.Electra; testVersion++ {
|
||||
t.Run(version.String(testVersion), func(t *testing.T) {
|
||||
bootstrap, err := createDefaultLightClientBootstrap(primitives.Slot(uint64(versionToForkEpoch[testVersion]) * uint64(params.BeaconConfig().SlotsPerEpoch)))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = bootstrap.SetCurrentSyncCommittee(createRandomSyncCommittee())
|
||||
require.NoError(t, err)
|
||||
err = bootstrap.SetCurrentSyncCommittee(createRandomSyncCommittee())
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.SaveLightClientBootstrap(ctx, []byte("blockRootAltair"), bootstrap)
|
||||
require.NoError(t, err)
|
||||
blockRoot := []byte("blockRootAltair" + version.String(testVersion))
|
||||
|
||||
retrievedBootstrap, err := db.LightClientBootstrap(ctx, []byte("blockRootAltair"))
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bootstrap.Header(), retrievedBootstrap.Header(), "retrieved bootstrap header does not match saved bootstrap header")
|
||||
require.DeepEqual(t, bootstrap.CurrentSyncCommittee(), retrievedBootstrap.CurrentSyncCommittee(), "retrieved bootstrap sync committee does not match saved bootstrap sync committee")
|
||||
savedBranch, err := bootstrap.CurrentSyncCommitteeBranch()
|
||||
require.NoError(t, err)
|
||||
retrievedBranch, err := retrievedBootstrap.CurrentSyncCommitteeBranch()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, savedBranch, retrievedBranch, "retrieved bootstrap sync committee branch does not match saved bootstrap sync committee branch")
|
||||
})
|
||||
err = db.SaveLightClientBootstrap(ctx, blockRoot, bootstrap)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
bootstrap, err := createDefaultLightClientBootstrap(primitives.Slot(uint64(params.BeaconConfig().CapellaForkEpoch) * uint64(params.BeaconConfig().SlotsPerEpoch)))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = bootstrap.SetCurrentSyncCommittee(createRandomSyncCommittee())
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.SaveLightClientBootstrap(ctx, []byte("blockRootCapella"), bootstrap)
|
||||
require.NoError(t, err)
|
||||
|
||||
retrievedBootstrap, err := db.LightClientBootstrap(ctx, []byte("blockRootCapella"))
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bootstrap.Header(), retrievedBootstrap.Header(), "retrieved bootstrap header does not match saved bootstrap header")
|
||||
require.DeepEqual(t, bootstrap.CurrentSyncCommittee(), retrievedBootstrap.CurrentSyncCommittee(), "retrieved bootstrap sync committee does not match saved bootstrap sync committee")
|
||||
savedBranch, err := bootstrap.CurrentSyncCommitteeBranch()
|
||||
require.NoError(t, err)
|
||||
retrievedBranch, err := retrievedBootstrap.CurrentSyncCommitteeBranch()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, savedBranch, retrievedBranch, "retrieved bootstrap sync committee branch does not match saved bootstrap sync committee branch")
|
||||
})
|
||||
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
bootstrap, err := createDefaultLightClientBootstrap(primitives.Slot(uint64(params.BeaconConfig().DenebForkEpoch) * uint64(params.BeaconConfig().SlotsPerEpoch)))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = bootstrap.SetCurrentSyncCommittee(createRandomSyncCommittee())
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.SaveLightClientBootstrap(ctx, []byte("blockRootDeneb"), bootstrap)
|
||||
require.NoError(t, err)
|
||||
|
||||
retrievedBootstrap, err := db.LightClientBootstrap(ctx, []byte("blockRootDeneb"))
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bootstrap.Header(), retrievedBootstrap.Header(), "retrieved bootstrap header does not match saved bootstrap header")
|
||||
require.DeepEqual(t, bootstrap.CurrentSyncCommittee(), retrievedBootstrap.CurrentSyncCommittee(), "retrieved bootstrap sync committee does not match saved bootstrap sync committee")
|
||||
savedBranch, err := bootstrap.CurrentSyncCommitteeBranch()
|
||||
require.NoError(t, err)
|
||||
retrievedBranch, err := retrievedBootstrap.CurrentSyncCommitteeBranch()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, savedBranch, retrievedBranch, "retrieved bootstrap sync committee branch does not match saved bootstrap sync committee branch")
|
||||
})
|
||||
|
||||
t.Run("Electra", func(t *testing.T) {
|
||||
bootstrap, err := createDefaultLightClientBootstrap(primitives.Slot(uint64(params.BeaconConfig().ElectraForkEpoch) * uint64(params.BeaconConfig().SlotsPerEpoch)))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = bootstrap.SetCurrentSyncCommittee(createRandomSyncCommittee())
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.SaveLightClientBootstrap(ctx, []byte("blockRootElectra"), bootstrap)
|
||||
require.NoError(t, err)
|
||||
|
||||
retrievedBootstrap, err := db.LightClientBootstrap(ctx, []byte("blockRootElectra"))
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bootstrap.Header(), retrievedBootstrap.Header(), "retrieved bootstrap header does not match saved bootstrap header")
|
||||
require.DeepEqual(t, bootstrap.CurrentSyncCommittee(), retrievedBootstrap.CurrentSyncCommittee(), "retrieved bootstrap sync committee does not match saved bootstrap sync committee")
|
||||
savedBranch, err := bootstrap.CurrentSyncCommitteeBranchElectra()
|
||||
require.NoError(t, err)
|
||||
retrievedBranch, err := retrievedBootstrap.CurrentSyncCommitteeBranchElectra()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, savedBranch, retrievedBranch, "retrieved bootstrap sync committee branch does not match saved bootstrap sync committee branch")
|
||||
})
|
||||
retrievedBootstrap, err := db.LightClientBootstrap(ctx, blockRoot)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bootstrap.Header(), retrievedBootstrap.Header(), "retrieved bootstrap header does not match saved bootstrap header")
|
||||
require.DeepEqual(t, bootstrap.CurrentSyncCommittee(), retrievedBootstrap.CurrentSyncCommittee(), "retrieved bootstrap sync committee does not match saved bootstrap sync committee")
|
||||
if testVersion >= version.Electra {
|
||||
savedBranch, err := bootstrap.CurrentSyncCommitteeBranchElectra()
|
||||
require.NoError(t, err)
|
||||
retrievedBranch, err := retrievedBootstrap.CurrentSyncCommitteeBranchElectra()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, savedBranch, retrievedBranch, "retrieved bootstrap sync committee branch does not match saved bootstrap sync committee branch")
|
||||
} else {
|
||||
savedBranch, err := bootstrap.CurrentSyncCommitteeBranch()
|
||||
require.NoError(t, err)
|
||||
retrievedBranch, err := retrievedBootstrap.CurrentSyncCommitteeBranch()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, savedBranch, retrievedBranch, "retrieved bootstrap sync committee branch does not match saved bootstrap sync committee branch")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LightClientBootstrap_MultipleBootstrapsWithSameSyncCommittee(t *testing.T) {
|
||||
@@ -839,6 +770,7 @@ func createDefaultLightClientBootstrap(currentSlot primitives.Slot) (interfaces.
|
||||
m = &pb.LightClientBootstrapAltair{
|
||||
Header: &pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: currentSlot,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
@@ -851,6 +783,7 @@ func createDefaultLightClientBootstrap(currentSlot primitives.Slot) (interfaces.
|
||||
m = &pb.LightClientBootstrapCapella{
|
||||
Header: &pb.LightClientHeaderCapella{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: currentSlot,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
@@ -877,6 +810,7 @@ func createDefaultLightClientBootstrap(currentSlot primitives.Slot) (interfaces.
|
||||
m = &pb.LightClientBootstrapDeneb{
|
||||
Header: &pb.LightClientHeaderDeneb{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: currentSlot,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
@@ -905,6 +839,7 @@ func createDefaultLightClientBootstrap(currentSlot primitives.Slot) (interfaces.
|
||||
m = &pb.LightClientBootstrapElectra{
|
||||
Header: &pb.LightClientHeaderDeneb{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: currentSlot,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
|
||||
@@ -31,6 +31,7 @@ go_library(
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/execution/types:go_default_library",
|
||||
@@ -97,6 +98,7 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
@@ -44,11 +45,18 @@ var (
|
||||
GetPayloadMethodV3,
|
||||
GetPayloadBodiesByHashV1,
|
||||
GetPayloadBodiesByRangeV1,
|
||||
GetBlobsV1,
|
||||
}
|
||||
|
||||
electraEngineEndpoints = []string{
|
||||
NewPayloadMethodV4,
|
||||
GetPayloadMethodV4,
|
||||
}
|
||||
|
||||
fuluEngineEndpoints = []string{
|
||||
GetPayloadMethodV5,
|
||||
GetBlobsV2,
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -73,6 +81,8 @@ const (
|
||||
GetPayloadMethodV3 = "engine_getPayloadV3"
|
||||
// GetPayloadMethodV4 is the get payload method added for electra
|
||||
GetPayloadMethodV4 = "engine_getPayloadV4"
|
||||
// GetPayloadMethodV5 is the get payload method added for fulu
|
||||
GetPayloadMethodV5 = "engine_getPayloadV5"
|
||||
// BlockByHashMethod request string for JSON-RPC.
|
||||
BlockByHashMethod = "eth_getBlockByHash"
|
||||
// BlockByNumberMethod request string for JSON-RPC.
|
||||
@@ -85,11 +95,16 @@ const (
|
||||
ExchangeCapabilities = "engine_exchangeCapabilities"
|
||||
// GetBlobsV1 request string for JSON-RPC.
|
||||
GetBlobsV1 = "engine_getBlobsV1"
|
||||
// GetBlobsV2 request string for JSON-RPC.
|
||||
GetBlobsV2 = "engine_getBlobsV2"
|
||||
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
|
||||
defaultEngineTimeout = time.Second
|
||||
)
|
||||
|
||||
var errInvalidPayloadBodyResponse = errors.New("engine api payload body response is invalid")
|
||||
var (
|
||||
errInvalidPayloadBodyResponse = errors.New("engine api payload body response is invalid")
|
||||
errMissingBlobsAndProofsFromEL = errors.New("engine api payload body response is missing blobs and proofs")
|
||||
)
|
||||
|
||||
// ForkchoiceUpdatedResponse is the response kind received by the
|
||||
// engine_forkchoiceUpdatedV1 endpoint.
|
||||
@@ -107,7 +122,8 @@ type Reconstructor interface {
|
||||
ReconstructFullBellatrixBlockBatch(
|
||||
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
|
||||
) ([]interfaces.SignedBeaconBlock, error)
|
||||
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error)
|
||||
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error)
|
||||
ReconstructDataColumnSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
// EngineCaller defines a client that can interact with an Ethereum
|
||||
@@ -256,14 +272,17 @@ func (s *Service) ForkchoiceUpdated(
|
||||
}
|
||||
|
||||
func getPayloadMethodAndMessage(slot primitives.Slot) (string, proto.Message) {
|
||||
pe := slots.ToEpoch(slot)
|
||||
if pe >= params.BeaconConfig().ElectraForkEpoch {
|
||||
epoch := slots.ToEpoch(slot)
|
||||
if epoch >= params.BeaconConfig().FuluForkEpoch {
|
||||
return GetPayloadMethodV5, &pb.ExecutionBundleFulu{}
|
||||
}
|
||||
if epoch >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return GetPayloadMethodV4, &pb.ExecutionBundleElectra{}
|
||||
}
|
||||
if pe >= params.BeaconConfig().DenebForkEpoch {
|
||||
if epoch >= params.BeaconConfig().DenebForkEpoch {
|
||||
return GetPayloadMethodV3, &pb.ExecutionPayloadDenebWithValueAndBlobsBundle{}
|
||||
}
|
||||
if pe >= params.BeaconConfig().CapellaForkEpoch {
|
||||
if epoch >= params.BeaconConfig().CapellaForkEpoch {
|
||||
return GetPayloadMethodV2, &pb.ExecutionPayloadCapellaWithValue{}
|
||||
}
|
||||
return GetPayloadMethod, &pb.ExecutionPayload{}
|
||||
@@ -289,7 +308,7 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primit
|
||||
}
|
||||
res, err := blocks.NewGetPayloadResponse(result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "new get payload response")
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
@@ -298,33 +317,36 @@ func (s *Service) ExchangeCapabilities(ctx context.Context) ([]string, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.ExchangeCapabilities")
|
||||
defer span.End()
|
||||
|
||||
// Only check for electra related engine methods if it has been activated.
|
||||
if params.ElectraEnabled() {
|
||||
supportedEngineEndpoints = append(supportedEngineEndpoints, electraEngineEndpoints...)
|
||||
}
|
||||
var result []string
|
||||
err := s.rpcClient.CallContext(ctx, &result, ExchangeCapabilities, supportedEngineEndpoints)
|
||||
if err != nil {
|
||||
|
||||
if params.FuluEnabled() {
|
||||
supportedEngineEndpoints = append(supportedEngineEndpoints, fuluEngineEndpoints...)
|
||||
}
|
||||
|
||||
elSupportedEndpointsSlice := make([]string, len(supportedEngineEndpoints))
|
||||
if err := s.rpcClient.CallContext(ctx, &elSupportedEndpointsSlice, ExchangeCapabilities, supportedEngineEndpoints); err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
|
||||
var unsupported []string
|
||||
for _, s1 := range supportedEngineEndpoints {
|
||||
supported := false
|
||||
for _, s2 := range result {
|
||||
if s1 == s2 {
|
||||
supported = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !supported {
|
||||
unsupported = append(unsupported, s1)
|
||||
elSupportedEndpoints := make(map[string]bool, len(elSupportedEndpointsSlice))
|
||||
for _, method := range elSupportedEndpointsSlice {
|
||||
elSupportedEndpoints[method] = true
|
||||
}
|
||||
|
||||
unsupported := make([]string, 0)
|
||||
for _, method := range supportedEngineEndpoints {
|
||||
if !elSupportedEndpoints[method] {
|
||||
unsupported = append(unsupported, method)
|
||||
}
|
||||
}
|
||||
|
||||
if len(unsupported) != 0 {
|
||||
log.Warnf("Please update client, detected the following unsupported engine methods: %s", unsupported)
|
||||
log.WithField("methods", unsupported).Warning("Connected execution client does not support some requested engine methods")
|
||||
}
|
||||
return result, handleRPCError(err)
|
||||
|
||||
return elSupportedEndpointsSlice, nil
|
||||
}
|
||||
|
||||
// GetTerminalBlockHash returns the valid terminal block hash based on total difficulty.
|
||||
@@ -495,9 +517,10 @@ func (s *Service) HeaderByNumber(ctx context.Context, number *big.Int) (*types.H
|
||||
func (s *Service) GetBlobs(ctx context.Context, versionedHashes []common.Hash) ([]*pb.BlobAndProof, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobs")
|
||||
defer span.End()
|
||||
|
||||
// If the execution engine does not support `GetBlobsV1`, return early to prevent encountering an error later.
|
||||
if !s.capabilityCache.has(GetBlobsV1) {
|
||||
return nil, nil
|
||||
return nil, errors.New(fmt.Sprintf("%s is not supported", GetBlobsV1))
|
||||
}
|
||||
|
||||
result := make([]*pb.BlobAndProof, len(versionedHashes))
|
||||
@@ -505,6 +528,19 @@ func (s *Service) GetBlobs(ctx context.Context, versionedHashes []common.Hash) (
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
func (s *Service) GetBlobsV2(ctx context.Context, versionedHashes []common.Hash) ([]*pb.BlobAndProofV2, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobsV2")
|
||||
defer span.End()
|
||||
|
||||
if !s.capabilityCache.has(GetBlobsV2) {
|
||||
return nil, errors.New(fmt.Sprintf("%s is not supported", GetBlobsV2))
|
||||
}
|
||||
|
||||
result := make([]*pb.BlobAndProofV2, len(versionedHashes))
|
||||
err := s.rpcClient.CallContext(ctx, &result, GetBlobsV2, versionedHashes)
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
// ReconstructFullBlock takes in a blinded beacon block and reconstructs
|
||||
// a beacon block with a full execution payload via the engine API.
|
||||
func (s *Service) ReconstructFullBlock(
|
||||
@@ -615,6 +651,75 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
|
||||
return verifiedBlobs, nil
|
||||
}
|
||||
|
||||
// ReconstructDataColumnSidecars reconstructs the verified data column sidecars for a given beacon block.
|
||||
// It retrieves the KZG commitments from the block body, fetches the associated blobs and cell proofs from the EL,
|
||||
// and constructs the corresponding verified read-only data column sidecars.
|
||||
func (s *Service) ReconstructDataColumnSidecars(ctx context.Context, signedROBlock interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error) {
|
||||
block := signedROBlock.Block()
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", blockRoot),
|
||||
"slot": block.Slot(),
|
||||
})
|
||||
|
||||
kzgCommitments, err := block.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Collect KZG hashes for all blobs.
|
||||
versionedHashes := make([]common.Hash, 0, len(kzgCommitments))
|
||||
for _, commitment := range kzgCommitments {
|
||||
versionedHash := primitives.ConvertKzgCommitmentToVersionedHash(commitment)
|
||||
versionedHashes = append(versionedHashes, versionedHash)
|
||||
}
|
||||
|
||||
// Fetch all blobsAndCellsProofs from the execution client.
|
||||
blobAndProofV2s, err := s.GetBlobsV2(ctx, versionedHashes)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "get blobs V2")
|
||||
}
|
||||
|
||||
// Return early if nothing is returned from the EL.
|
||||
if len(blobAndProofV2s) == 0 {
|
||||
log.Debug("No blobs returned from EL")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Extract the blobs and proofs from the blobAndProofV2s.
|
||||
blobs, cellProofs := make([][]byte, 0, len(blobAndProofV2s)), make([][]byte, 0, len(blobAndProofV2s))
|
||||
for _, blobsAndProofs := range blobAndProofV2s {
|
||||
if blobsAndProofs == nil {
|
||||
return nil, wrapWithBlockRoot(errMissingBlobsAndProofsFromEL, blockRoot, "")
|
||||
}
|
||||
|
||||
blobs, cellProofs = append(blobs, blobsAndProofs.Blob), append(cellProofs, blobsAndProofs.KzgProofs...)
|
||||
}
|
||||
|
||||
// Construct the data column sidcars from the blobs and cell proofs provided by the execution client.
|
||||
dataColumnSidecars, err := peerdas.ConstructDataColumnSidecars(signedROBlock, blobs, cellProofs)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "construct data column sidecars")
|
||||
}
|
||||
|
||||
// Finally, construct verified RO data column sidecars.
|
||||
// We trust the execution layer we are connected to, so we can upgrade the read only data column sidecar into a verified one.
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnSidecars))
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
roDataColumn, err := blocks.NewRODataColumnWithRoot(dataColumnSidecar, blockRoot)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "new read-only data column with root")
|
||||
}
|
||||
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
|
||||
}
|
||||
|
||||
log.Debug("Data columns successfully reconstructed from the execution client.")
|
||||
|
||||
return verifiedRODataColumns, nil
|
||||
}
|
||||
|
||||
func fullPayloadFromPayloadBody(
|
||||
header interfaces.ExecutionData, body *pb.ExecutionPayloadBody, bVersion int,
|
||||
) (interfaces.ExecutionData, error) {
|
||||
@@ -902,3 +1007,8 @@ func toBlockNumArg(number *big.Int) string {
|
||||
}
|
||||
return hexutil.EncodeBig(number)
|
||||
}
|
||||
|
||||
// wrapWithBlockRoot returns a new error with the given block root.
|
||||
func wrapWithBlockRoot(err error, blockRoot [32]byte, message string) error {
|
||||
return errors.Wrap(err, fmt.Sprintf("%s for block %#x", message, blockRoot))
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
mocks "github.com/OffchainLabs/prysm/v6/beacon-chain/execution/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
@@ -167,6 +168,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
cfg.FuluForkEpoch = 4
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
t.Run(GetPayloadMethod, func(t *testing.T) {
|
||||
@@ -317,11 +319,11 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.DeepEqual(t, uint64(2), g)
|
||||
|
||||
commitments := [][]byte{bytesutil.PadTo([]byte("commitment1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("commitment2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, commitments, resp.BlobsBundle.KzgCommitments)
|
||||
require.DeepEqual(t, commitments, resp.BlobsBundler.GetKzgCommitments())
|
||||
proofs := [][]byte{bytesutil.PadTo([]byte("proof1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("proof2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, proofs, resp.BlobsBundle.Proofs)
|
||||
require.DeepEqual(t, proofs, resp.BlobsBundler.GetProofs())
|
||||
blobs := [][]byte{bytesutil.PadTo([]byte("a"), fieldparams.BlobLength), bytesutil.PadTo([]byte("b"), fieldparams.BlobLength)}
|
||||
require.DeepEqual(t, blobs, resp.BlobsBundle.Blobs)
|
||||
require.DeepEqual(t, blobs, resp.BlobsBundler.GetBlobs())
|
||||
})
|
||||
t.Run(GetPayloadMethodV4, func(t *testing.T) {
|
||||
payloadId := [8]byte{1}
|
||||
@@ -372,11 +374,11 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.DeepEqual(t, uint64(2), g)
|
||||
|
||||
commitments := [][]byte{bytesutil.PadTo([]byte("commitment1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("commitment2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, commitments, resp.BlobsBundle.KzgCommitments)
|
||||
require.DeepEqual(t, commitments, resp.BlobsBundler.GetKzgCommitments())
|
||||
proofs := [][]byte{bytesutil.PadTo([]byte("proof1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("proof2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, proofs, resp.BlobsBundle.Proofs)
|
||||
require.DeepEqual(t, proofs, resp.BlobsBundler.GetProofs())
|
||||
blobs := [][]byte{bytesutil.PadTo([]byte("a"), fieldparams.BlobLength), bytesutil.PadTo([]byte("b"), fieldparams.BlobLength)}
|
||||
require.DeepEqual(t, blobs, resp.BlobsBundle.Blobs)
|
||||
require.DeepEqual(t, blobs, resp.BlobsBundler.GetBlobs())
|
||||
requests := &pb.ExecutionRequests{
|
||||
Deposits: []*pb.DepositRequest{
|
||||
{
|
||||
@@ -405,7 +407,52 @@ func TestClient_HTTP(t *testing.T) {
|
||||
|
||||
require.DeepEqual(t, requests, resp.ExecutionRequests)
|
||||
})
|
||||
t.Run(GetPayloadMethodV5, func(t *testing.T) {
|
||||
payloadId := [8]byte{1}
|
||||
want, ok := fix["ExecutionBundleFulu"].(*pb.GetPayloadV5ResponseJson)
|
||||
require.Equal(t, true, ok)
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
enc, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
|
||||
reqArg, err := json.Marshal(pb.PayloadIDBytes(payloadId))
|
||||
require.NoError(t, err)
|
||||
|
||||
// We expect the JSON string RPC request contains the right arguments.
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, string(reqArg),
|
||||
))
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": want,
|
||||
}
|
||||
err = json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
client := &Service{}
|
||||
client.rpcClient = rpcClient
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := client.GetPayload(ctx, payloadId, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
_, ok = resp.BlobsBundler.(*pb.BlobsBundleV2)
|
||||
if !ok {
|
||||
t.Logf("resp.BlobsBundler has unexpected type: %T", resp.BlobsBundler)
|
||||
}
|
||||
require.Equal(t, ok, true)
|
||||
})
|
||||
t.Run(ForkchoiceUpdatedMethod+" VALID status", func(t *testing.T) {
|
||||
forkChoiceState := &pb.ForkchoiceState{
|
||||
HeadBlockHash: []byte("head"),
|
||||
@@ -1539,6 +1586,7 @@ func fixtures() map[string]interface{} {
|
||||
"ExecutionPayloadCapellaWithValue": s.ExecutionPayloadWithValueCapella,
|
||||
"ExecutionPayloadDenebWithValue": s.ExecutionPayloadWithValueDeneb,
|
||||
"ExecutionBundleElectra": s.ExecutionBundleElectra,
|
||||
"ExecutionBundleFulu": s.ExecutionBundleFulu,
|
||||
"ValidPayloadStatus": s.ValidPayloadStatus,
|
||||
"InvalidBlockHashStatus": s.InvalidBlockHashStatus,
|
||||
"AcceptedStatus": s.AcceptedStatus,
|
||||
@@ -1774,6 +1822,36 @@ func fixturesStruct() *payloadFixtures {
|
||||
append([]byte{pb.WithdrawalRequestType}, withdrawalRequestBytes...),
|
||||
append([]byte{pb.ConsolidationRequestType}, consolidationRequestBytes...)},
|
||||
}
|
||||
executionBundleFixtureFulu := &pb.GetPayloadV5ResponseJson{
|
||||
ShouldOverrideBuilder: true,
|
||||
ExecutionPayload: &pb.ExecutionPayloadDenebJSON{
|
||||
ParentHash: &common.Hash{'a'},
|
||||
FeeRecipient: &common.Address{'b'},
|
||||
StateRoot: &common.Hash{'c'},
|
||||
ReceiptsRoot: &common.Hash{'d'},
|
||||
LogsBloom: &hexutil.Bytes{'e'},
|
||||
PrevRandao: &common.Hash{'f'},
|
||||
BaseFeePerGas: "0x123",
|
||||
BlockHash: &common.Hash{'g'},
|
||||
Transactions: []hexutil.Bytes{{'h'}},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
BlockNumber: &hexUint,
|
||||
GasLimit: &hexUint,
|
||||
GasUsed: &hexUint,
|
||||
Timestamp: &hexUint,
|
||||
BlobGasUsed: &bgu,
|
||||
ExcessBlobGas: &ebg,
|
||||
},
|
||||
BlockValue: "0x11fffffffff",
|
||||
BlobsBundle: &pb.BlobBundleV2JSON{
|
||||
Commitments: []hexutil.Bytes{[]byte("commitment1"), []byte("commitment2")},
|
||||
Proofs: []hexutil.Bytes{[]byte("proof1"), []byte("proof2")},
|
||||
Blobs: []hexutil.Bytes{{'a'}, {'b'}},
|
||||
},
|
||||
ExecutionRequests: []hexutil.Bytes{append([]byte{pb.DepositRequestType}, depositRequestBytes...),
|
||||
append([]byte{pb.WithdrawalRequestType}, withdrawalRequestBytes...),
|
||||
append([]byte{pb.ConsolidationRequestType}, consolidationRequestBytes...)},
|
||||
}
|
||||
parent := bytesutil.PadTo([]byte("parentHash"), fieldparams.RootLength)
|
||||
sha3Uncles := bytesutil.PadTo([]byte("sha3Uncles"), fieldparams.RootLength)
|
||||
miner := bytesutil.PadTo([]byte("miner"), fieldparams.FeeRecipientLength)
|
||||
@@ -1868,6 +1946,7 @@ func fixturesStruct() *payloadFixtures {
|
||||
ExecutionPayloadWithValueCapella: executionPayloadWithValueFixtureCapella,
|
||||
ExecutionPayloadWithValueDeneb: executionPayloadWithValueFixtureDeneb,
|
||||
ExecutionBundleElectra: executionBundleFixtureElectra,
|
||||
ExecutionBundleFulu: executionBundleFixtureFulu,
|
||||
ValidPayloadStatus: validStatus,
|
||||
InvalidBlockHashStatus: inValidBlockHashStatus,
|
||||
AcceptedStatus: acceptedStatus,
|
||||
@@ -1892,6 +1971,7 @@ type payloadFixtures struct {
|
||||
ExecutionPayloadWithValueCapella *pb.GetPayloadV2ResponseJson
|
||||
ExecutionPayloadWithValueDeneb *pb.GetPayloadV3ResponseJson
|
||||
ExecutionBundleElectra *pb.GetPayloadV4ResponseJson
|
||||
ExecutionBundleFulu *pb.GetPayloadV5ResponseJson
|
||||
ValidPayloadStatus *pb.PayloadStatus
|
||||
InvalidBlockHashStatus *pb.PayloadStatus
|
||||
AcceptedStatus *pb.PayloadStatus
|
||||
@@ -2361,7 +2441,7 @@ func Test_ExchangeCapabilities(t *testing.T) {
|
||||
for _, item := range results {
|
||||
require.NotNil(t, item)
|
||||
}
|
||||
assert.LogsContain(t, logHook, "Please update client, detected the following unsupported engine methods:")
|
||||
assert.LogsContain(t, logHook, "Connected execution client does not support some requested engine methods")
|
||||
})
|
||||
t.Run("list of items", func(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -2424,7 +2504,7 @@ func TestReconstructBlobSidecars(t *testing.T) {
|
||||
t.Run("get-blobs end point is not supported", func(t *testing.T) {
|
||||
hi := mockSummary(t, []bool{true, true, true, true, true, false})
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, hi)
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, "engine_getBlobsV1 is not supported", err)
|
||||
require.Equal(t, 0, len(verifiedBlobs))
|
||||
})
|
||||
|
||||
@@ -2476,6 +2556,76 @@ func TestReconstructBlobSidecars(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup right fork epoch
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
cfg.FuluForkEpoch = 4
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
client := &Service{capabilityCache: &capabilityCache{}}
|
||||
b := util.NewBeaconBlockFulu()
|
||||
b.Block.Slot = 4 * params.BeaconConfig().SlotsPerEpoch
|
||||
kzgCommitments := createRandomKzgCommitments(t, 6)
|
||||
b.Block.Body.BlobKzgCommitments = kzgCommitments
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("GetBlobsV2 is not supported", func(t *testing.T) {
|
||||
_, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
require.ErrorContains(t, "get blobs V2 for block", err)
|
||||
})
|
||||
|
||||
t.Run("nothing received", func(t *testing.T) {
|
||||
srv := createBlobServerV2(t, 0, []bool{})
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
})
|
||||
|
||||
t.Run("receiving all blobs", func(t *testing.T) {
|
||||
blobMasks := []bool{true, true, true, true, true, true}
|
||||
srv := createBlobServerV2(t, 6, blobMasks)
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 128, len(dataColumns))
|
||||
})
|
||||
|
||||
t.Run("missing some blobs", func(t *testing.T) {
|
||||
blobMasks := []bool{false, true, true, true, true, true}
|
||||
srv := createBlobServerV2(t, 6, blobMasks)
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
require.ErrorContains(t, errMissingBlobsAndProofsFromEL.Error(), err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
})
|
||||
}
|
||||
|
||||
func createRandomKzgCommitments(t *testing.T, num int) [][]byte {
|
||||
kzgCommitments := make([][]byte, num)
|
||||
for i := range kzgCommitments {
|
||||
@@ -2511,6 +2661,42 @@ func createBlobServer(t *testing.T, numBlobs int, callbackFuncs ...func()) *http
|
||||
}))
|
||||
}
|
||||
|
||||
func createBlobServerV2(t *testing.T, numBlobs int, blobMasks []bool) *httptest.Server {
|
||||
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
require.Equal(t, len(blobMasks), numBlobs)
|
||||
|
||||
blobAndCellProofs := make([]*pb.BlobAndProofV2Json, numBlobs)
|
||||
for i := range blobAndCellProofs {
|
||||
if !blobMasks[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
blobAndCellProofs[i] = &pb.BlobAndProofV2Json{
|
||||
Blob: []byte("0xblob"),
|
||||
KzgProofs: []hexutil.Bytes{},
|
||||
}
|
||||
for j := 0; j < int(params.BeaconConfig().NumberOfColumns); j++ {
|
||||
cellProof := make([]byte, 48)
|
||||
blobAndCellProofs[i].KzgProofs = append(blobAndCellProofs[i].KzgProofs, cellProof)
|
||||
}
|
||||
}
|
||||
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": blobAndCellProofs,
|
||||
}
|
||||
|
||||
err := json.NewEncoder(w).Encode(respJSON)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
}
|
||||
|
||||
func setupRpcClient(t *testing.T, url string, client *Service) (*rpc.Client, *Service) {
|
||||
rpcClient, err := rpc.DialHTTP(url)
|
||||
require.NoError(t, err)
|
||||
@@ -2522,6 +2708,12 @@ func setupRpcClient(t *testing.T, url string, client *Service) (*rpc.Client, *Se
|
||||
return rpcClient, client
|
||||
}
|
||||
|
||||
func setupRpcClientV2(t *testing.T, url string, client *Service) (*rpc.Client, *Service) {
|
||||
rpcClient, client := setupRpcClient(t, url, client)
|
||||
client.capabilityCache = &capabilityCache{capabilities: map[string]interface{}{GetBlobsV2: nil}}
|
||||
return rpcClient, client
|
||||
}
|
||||
|
||||
func testNewBlobVerifier() verification.NewBlobVerifier {
|
||||
return func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier {
|
||||
return &verification.MockBlobVerifier{
|
||||
|
||||
@@ -17,6 +17,7 @@ go_library(
|
||||
"//beacon-chain/execution/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"math/big"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -38,6 +39,8 @@ type EngineClient struct {
|
||||
ErrGetPayload error
|
||||
BlobSidecars []blocks.VerifiedROBlob
|
||||
ErrorBlobSidecars error
|
||||
DataColumnSidecars []blocks.VerifiedRODataColumn
|
||||
ErrorDataColumnSidecars error
|
||||
}
|
||||
|
||||
// NewPayload --
|
||||
@@ -109,10 +112,14 @@ func (e *EngineClient) ReconstructFullBellatrixBlockBatch(
|
||||
}
|
||||
|
||||
// ReconstructBlobSidecars is a mock implementation of the ReconstructBlobSidecars method.
|
||||
func (e *EngineClient) ReconstructBlobSidecars(context.Context, interfaces.ReadOnlySignedBeaconBlock, [32]byte, func(uint64) bool) ([]blocks.VerifiedROBlob, error) {
|
||||
func (e *EngineClient) ReconstructBlobSidecars(context.Context, interfaces.ReadOnlySignedBeaconBlock, [fieldparams.RootLength]byte, func(uint64) bool) ([]blocks.VerifiedROBlob, error) {
|
||||
return e.BlobSidecars, e.ErrorBlobSidecars
|
||||
}
|
||||
|
||||
func (e *EngineClient) ReconstructDataColumnSidecars(context.Context, interfaces.ReadOnlySignedBeaconBlock, [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error) {
|
||||
return e.DataColumnSidecars, e.ErrorDataColumnSidecars
|
||||
}
|
||||
|
||||
// GetTerminalBlockHash --
|
||||
func (e *EngineClient) GetTerminalBlockHash(ctx context.Context, transitionTime uint64) ([]byte, bool, error) {
|
||||
ttd := new(big.Int)
|
||||
|
||||
@@ -167,7 +167,6 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
syncChecker: &initialsync.SyncChecker{},
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
slasherEnabled: cliCtx.Bool(flags.SlasherFlag.Name),
|
||||
lcStore: &lightclient.Store{},
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
@@ -235,6 +234,10 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
// their initialization.
|
||||
beacon.finalizedStateAtStartUp = nil
|
||||
|
||||
if features.Get().EnableLightClient {
|
||||
beacon.lcStore = lightclient.NewLightClientStore()
|
||||
}
|
||||
|
||||
return beacon, nil
|
||||
}
|
||||
|
||||
@@ -883,8 +886,10 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
|
||||
regularsync.WithDataColumnStorage(b.DataColumnStorage),
|
||||
regularsync.WithVerifierWaiter(b.verifyInitWaiter),
|
||||
regularsync.WithAvailableBlocker(bFillStore),
|
||||
regularsync.WithCustodyInfo(b.custodyInfo),
|
||||
regularsync.WithSlasherEnabled(b.slasherEnabled),
|
||||
regularsync.WithLightClientStore(b.lcStore),
|
||||
regularsync.WithBatchVerifierLimit(b.cliCtx.Int(flags.BatchVerifierLimit.Name)),
|
||||
)
|
||||
return b.services.RegisterService(rs)
|
||||
}
|
||||
|
||||
@@ -705,31 +705,46 @@ func (p *Status) deprecatedPrune() {
|
||||
p.tallyIPTracker()
|
||||
}
|
||||
|
||||
// BestFinalized returns the highest finalized epoch equal to or higher than ours that is agreed
|
||||
// upon by the majority of peers. This method may not return the absolute highest finalized, but
|
||||
// the finalized epoch in which most peers can serve blocks (plurality voting).
|
||||
// Ideally, all peers would be reporting the same finalized epoch but some may be behind due to their
|
||||
// own latency, or because of their finalized epoch at the time we queried them.
|
||||
// Returns epoch number and list of peers that are at or beyond that epoch.
|
||||
// BestFinalized returns the highest finalized epoch equal to or higher than `ourFinalizedEpoch`
|
||||
// that is agreed upon by the majority of peers, and the peers agreeing on this finalized epoch.
|
||||
// This method may not return the absolute highest finalized epoch, but the finalized epoch in which
|
||||
// most peers can serve blocks (plurality voting). Ideally, all peers would be reporting the same
|
||||
// finalized epoch but some may be behind due to their own latency, or because of their finalized
|
||||
// epoch at the time we queried them.
|
||||
func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch primitives.Epoch) (primitives.Epoch, []peer.ID) {
|
||||
// Retrieve all connected peers.
|
||||
connected := p.Connected()
|
||||
|
||||
// key: finalized epoch, value: number of peers that support this finalized epoch.
|
||||
finalizedEpochVotes := make(map[primitives.Epoch]uint64)
|
||||
|
||||
// key: peer ID, value: finalized epoch of the peer.
|
||||
pidEpoch := make(map[peer.ID]primitives.Epoch, len(connected))
|
||||
|
||||
// key: peer ID, value: head slot of the peer.
|
||||
pidHead := make(map[peer.ID]primitives.Slot, len(connected))
|
||||
|
||||
potentialPIDs := make([]peer.ID, 0, len(connected))
|
||||
for _, pid := range connected {
|
||||
peerChainState, err := p.ChainState(pid)
|
||||
if err == nil && peerChainState != nil && peerChainState.FinalizedEpoch >= ourFinalizedEpoch {
|
||||
finalizedEpochVotes[peerChainState.FinalizedEpoch]++
|
||||
pidEpoch[pid] = peerChainState.FinalizedEpoch
|
||||
potentialPIDs = append(potentialPIDs, pid)
|
||||
pidHead[pid] = peerChainState.HeadSlot
|
||||
|
||||
// Skip if the peer's finalized epoch is not defined, or if the peer's finalized epoch is
|
||||
// lower than ours.
|
||||
if err != nil || peerChainState == nil || peerChainState.FinalizedEpoch < ourFinalizedEpoch {
|
||||
continue
|
||||
}
|
||||
|
||||
finalizedEpochVotes[peerChainState.FinalizedEpoch]++
|
||||
|
||||
pidEpoch[pid] = peerChainState.FinalizedEpoch
|
||||
pidHead[pid] = peerChainState.HeadSlot
|
||||
|
||||
potentialPIDs = append(potentialPIDs, pid)
|
||||
}
|
||||
|
||||
// Select the target epoch, which is the epoch most peers agree upon.
|
||||
var targetEpoch primitives.Epoch
|
||||
var mostVotes uint64
|
||||
// If there is a tie, select the highest epoch.
|
||||
targetEpoch, mostVotes := primitives.Epoch(0), uint64(0)
|
||||
for epoch, count := range finalizedEpochVotes {
|
||||
if count > mostVotes || (count == mostVotes && epoch > targetEpoch) {
|
||||
mostVotes = count
|
||||
@@ -737,11 +752,12 @@ func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch primitives.Epoch)
|
||||
}
|
||||
}
|
||||
|
||||
// Sort PIDs by finalized epoch, in decreasing order.
|
||||
// Sort PIDs by finalized (epoch, head), in decreasing order.
|
||||
sort.Slice(potentialPIDs, func(i, j int) bool {
|
||||
if pidEpoch[potentialPIDs[i]] == pidEpoch[potentialPIDs[j]] {
|
||||
return pidHead[potentialPIDs[i]] > pidHead[potentialPIDs[j]]
|
||||
}
|
||||
|
||||
return pidEpoch[potentialPIDs[i]] > pidEpoch[potentialPIDs[j]]
|
||||
})
|
||||
|
||||
@@ -765,25 +781,36 @@ func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch primitives.Epoch)
|
||||
// and is shared by at least minPeers.
|
||||
func (p *Status) BestNonFinalized(minPeers int, ourHeadEpoch primitives.Epoch) (primitives.Epoch, []peer.ID) {
|
||||
connected := p.Connected()
|
||||
epochVotes := make(map[primitives.Epoch]uint64)
|
||||
pidEpoch := make(map[peer.ID]primitives.Epoch, len(connected))
|
||||
pidHead := make(map[peer.ID]primitives.Slot, len(connected))
|
||||
potentialPIDs := make([]peer.ID, 0, len(connected))
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
ourHeadSlot := slotsPerEpoch.Mul(uint64(ourHeadEpoch))
|
||||
|
||||
ourHeadSlot := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(ourHeadEpoch))
|
||||
// key: head epoch, value: number of peers that support this epoch.
|
||||
epochVotes := make(map[primitives.Epoch]uint64)
|
||||
|
||||
// key: peer ID, value: head epoch of the peer.
|
||||
pidEpoch := make(map[peer.ID]primitives.Epoch, len(connected))
|
||||
|
||||
// key: peer ID, value: head slot of the peer.
|
||||
pidHead := make(map[peer.ID]primitives.Slot, len(connected))
|
||||
|
||||
potentialPIDs := make([]peer.ID, 0, len(connected))
|
||||
for _, pid := range connected {
|
||||
peerChainState, err := p.ChainState(pid)
|
||||
if err == nil && peerChainState != nil && peerChainState.HeadSlot > ourHeadSlot {
|
||||
epoch := slots.ToEpoch(peerChainState.HeadSlot)
|
||||
epochVotes[epoch]++
|
||||
pidEpoch[pid] = epoch
|
||||
pidHead[pid] = peerChainState.HeadSlot
|
||||
potentialPIDs = append(potentialPIDs, pid)
|
||||
// Skip if the peer's head epoch is not defined, or if the peer's head slot is
|
||||
// lower or equal than ours.
|
||||
if err != nil || peerChainState == nil || peerChainState.HeadSlot <= ourHeadSlot {
|
||||
continue
|
||||
}
|
||||
|
||||
epoch := slots.ToEpoch(peerChainState.HeadSlot)
|
||||
epochVotes[epoch]++
|
||||
pidEpoch[pid] = epoch
|
||||
pidHead[pid] = peerChainState.HeadSlot
|
||||
potentialPIDs = append(potentialPIDs, pid)
|
||||
}
|
||||
|
||||
// Select the target epoch, which has enough peers' votes (>= minPeers).
|
||||
var targetEpoch primitives.Epoch
|
||||
targetEpoch := primitives.Epoch(0)
|
||||
for epoch, votes := range epochVotes {
|
||||
if votes >= uint64(minPeers) && targetEpoch < epoch {
|
||||
targetEpoch = epoch
|
||||
@@ -1019,7 +1046,10 @@ func (p *Status) isfromBadIP(pid peer.ID) error {
|
||||
|
||||
if val, ok := p.ipTracker[ip.String()]; ok {
|
||||
if val > CollocationLimit {
|
||||
return errors.Errorf("collocation limit exceeded: got %d - limit %d", val, CollocationLimit)
|
||||
return errors.Errorf(
|
||||
"colocation limit exceeded: got %d - limit %d for peer %v with IP %v",
|
||||
val, CollocationLimit, pid, ip.String(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -22,50 +22,52 @@ const (
|
||||
SchemaVersionV3 = "/3"
|
||||
)
|
||||
|
||||
// Specifies the protocol prefix for all our Req/Resp topics.
|
||||
const protocolPrefix = "/eth2/beacon_chain/req"
|
||||
const (
|
||||
// Specifies the protocol prefix for all our Req/Resp topics.
|
||||
protocolPrefix = "/eth2/beacon_chain/req"
|
||||
|
||||
// StatusMessageName specifies the name for the status message topic.
|
||||
const StatusMessageName = "/status"
|
||||
// StatusMessageName specifies the name for the status message topic.
|
||||
StatusMessageName = "/status"
|
||||
|
||||
// GoodbyeMessageName specifies the name for the goodbye message topic.
|
||||
const GoodbyeMessageName = "/goodbye"
|
||||
// GoodbyeMessageName specifies the name for the goodbye message topic.
|
||||
GoodbyeMessageName = "/goodbye"
|
||||
|
||||
// BeaconBlocksByRangeMessageName specifies the name for the beacon blocks by range message topic.
|
||||
const BeaconBlocksByRangeMessageName = "/beacon_blocks_by_range"
|
||||
// BeaconBlocksByRangeMessageName specifies the name for the beacon blocks by range message topic.
|
||||
BeaconBlocksByRangeMessageName = "/beacon_blocks_by_range"
|
||||
|
||||
// BeaconBlocksByRootsMessageName specifies the name for the beacon blocks by root message topic.
|
||||
const BeaconBlocksByRootsMessageName = "/beacon_blocks_by_root"
|
||||
// BeaconBlocksByRootsMessageName specifies the name for the beacon blocks by root message topic.
|
||||
BeaconBlocksByRootsMessageName = "/beacon_blocks_by_root"
|
||||
|
||||
// PingMessageName Specifies the name for the ping message topic.
|
||||
const PingMessageName = "/ping"
|
||||
// PingMessageName Specifies the name for the ping message topic.
|
||||
PingMessageName = "/ping"
|
||||
|
||||
// MetadataMessageName specifies the name for the metadata message topic.
|
||||
const MetadataMessageName = "/metadata"
|
||||
// MetadataMessageName specifies the name for the metadata message topic.
|
||||
MetadataMessageName = "/metadata"
|
||||
|
||||
// BlobSidecarsByRangeName is the name for the BlobSidecarsByRange v1 message topic.
|
||||
const BlobSidecarsByRangeName = "/blob_sidecars_by_range"
|
||||
// BlobSidecarsByRangeName is the name for the BlobSidecarsByRange v1 message topic.
|
||||
BlobSidecarsByRangeName = "/blob_sidecars_by_range"
|
||||
|
||||
// BlobSidecarsByRootName is the name for the BlobSidecarsByRoot v1 message topic.
|
||||
const BlobSidecarsByRootName = "/blob_sidecars_by_root"
|
||||
// BlobSidecarsByRootName is the name for the BlobSidecarsByRoot v1 message topic.
|
||||
BlobSidecarsByRootName = "/blob_sidecars_by_root"
|
||||
|
||||
// LightClientBootstrapName is the name for the LightClientBootstrap message topic,
|
||||
const LightClientBootstrapName = "/light_client_bootstrap"
|
||||
// LightClientBootstrapName is the name for the LightClientBootstrap message topic,
|
||||
LightClientBootstrapName = "/light_client_bootstrap"
|
||||
|
||||
// LightClientUpdatesByRangeName is the name for the LightClientUpdatesByRange topic.
|
||||
const LightClientUpdatesByRangeName = "/light_client_updates_by_range"
|
||||
// LightClientUpdatesByRangeName is the name for the LightClientUpdatesByRange topic.
|
||||
LightClientUpdatesByRangeName = "/light_client_updates_by_range"
|
||||
|
||||
// LightClientFinalityUpdateName is the name for the LightClientFinalityUpdate topic.
|
||||
const LightClientFinalityUpdateName = "/light_client_finality_update"
|
||||
// LightClientFinalityUpdateName is the name for the LightClientFinalityUpdate topic.
|
||||
LightClientFinalityUpdateName = "/light_client_finality_update"
|
||||
|
||||
// LightClientOptimisticUpdateName is the name for the LightClientOptimisticUpdate topic.
|
||||
const LightClientOptimisticUpdateName = "/light_client_optimistic_update"
|
||||
// LightClientOptimisticUpdateName is the name for the LightClientOptimisticUpdate topic.
|
||||
LightClientOptimisticUpdateName = "/light_client_optimistic_update"
|
||||
|
||||
// DataColumnSidecarsByRootName is the name for the DataColumnSidecarsByRoot v1 message topic.
|
||||
const DataColumnSidecarsByRootName = "/data_column_sidecars_by_root"
|
||||
// DataColumnSidecarsByRootName is the name for the DataColumnSidecarsByRoot v1 message topic.
|
||||
DataColumnSidecarsByRootName = "/data_column_sidecars_by_root"
|
||||
|
||||
// DataColumnSidecarsByRangeName is the name for the DataColumnSidecarsByRange v1 message topic.
|
||||
const DataColumnSidecarsByRangeName = "/data_column_sidecars_by_range"
|
||||
// DataColumnSidecarsByRangeName is the name for the DataColumnSidecarsByRange v1 message topic.
|
||||
DataColumnSidecarsByRangeName = "/data_column_sidecars_by_range"
|
||||
)
|
||||
|
||||
const (
|
||||
// V1 RPC Topics
|
||||
@@ -101,6 +103,9 @@ const (
|
||||
// RPCDataColumnSidecarsByRootTopicV1 is a topic for requesting data column sidecars by their block root.
|
||||
// /eth2/beacon_chain/req/data_column_sidecars_by_root/1 - New in Fulu.
|
||||
RPCDataColumnSidecarsByRootTopicV1 = protocolPrefix + DataColumnSidecarsByRootName + SchemaVersionV1
|
||||
// RPCDataColumnSidecarsByRangeTopicV1 is a topic for requesting data column sidecars by their slot.
|
||||
// /eth2/beacon_chain/req/data_column_sidecars_by_range/1 - New in Fulu.
|
||||
RPCDataColumnSidecarsByRangeTopicV1 = protocolPrefix + DataColumnSidecarsByRangeName + SchemaVersionV1
|
||||
|
||||
// V2 RPC Topics
|
||||
// RPCBlocksByRangeTopicV2 defines v2 the topic for the blocks by range rpc method.
|
||||
@@ -121,91 +126,103 @@ const (
|
||||
)
|
||||
|
||||
// RPCTopicMappings map the base message type to the rpc request.
|
||||
var RPCTopicMappings = map[string]interface{}{
|
||||
// RPC Status Message
|
||||
RPCStatusTopicV1: new(pb.Status),
|
||||
// RPC Goodbye Message
|
||||
RPCGoodByeTopicV1: new(primitives.SSZUint64),
|
||||
// RPC Block By Range Message
|
||||
RPCBlocksByRangeTopicV1: new(pb.BeaconBlocksByRangeRequest),
|
||||
RPCBlocksByRangeTopicV2: new(pb.BeaconBlocksByRangeRequest),
|
||||
// RPC Block By Root Message
|
||||
RPCBlocksByRootTopicV1: new(p2ptypes.BeaconBlockByRootsReq),
|
||||
RPCBlocksByRootTopicV2: new(p2ptypes.BeaconBlockByRootsReq),
|
||||
// RPC Ping Message
|
||||
RPCPingTopicV1: new(primitives.SSZUint64),
|
||||
// RPC Metadata Message
|
||||
RPCMetaDataTopicV1: new(interface{}),
|
||||
RPCMetaDataTopicV2: new(interface{}),
|
||||
RPCMetaDataTopicV3: new(interface{}),
|
||||
// BlobSidecarsByRange v1 Message
|
||||
RPCBlobSidecarsByRangeTopicV1: new(pb.BlobSidecarsByRangeRequest),
|
||||
// BlobSidecarsByRoot v1 Message
|
||||
RPCBlobSidecarsByRootTopicV1: new(p2ptypes.BlobSidecarsByRootReq),
|
||||
var (
|
||||
RPCTopicMappings = map[string]interface{}{
|
||||
// RPC Status Message
|
||||
RPCStatusTopicV1: new(pb.Status),
|
||||
|
||||
// Light client
|
||||
RPCLightClientBootstrapTopicV1: new([fieldparams.RootLength]byte),
|
||||
RPCLightClientUpdatesByRangeTopicV1: new(pb.LightClientUpdatesByRangeRequest),
|
||||
RPCLightClientFinalityUpdateTopicV1: new(interface{}),
|
||||
RPCLightClientOptimisticUpdateTopicV1: new(interface{}),
|
||||
// RPC Goodbye Message
|
||||
RPCGoodByeTopicV1: new(primitives.SSZUint64),
|
||||
|
||||
// DataColumnSidecarsByRoot v1 Message
|
||||
RPCDataColumnSidecarsByRootTopicV1: new(p2ptypes.DataColumnsByRootIdentifiers),
|
||||
}
|
||||
// RPC Block By Range Message
|
||||
RPCBlocksByRangeTopicV1: new(pb.BeaconBlocksByRangeRequest),
|
||||
RPCBlocksByRangeTopicV2: new(pb.BeaconBlocksByRangeRequest),
|
||||
|
||||
// Maps all registered protocol prefixes.
|
||||
var protocolMapping = map[string]bool{
|
||||
protocolPrefix: true,
|
||||
}
|
||||
// RPC Block By Root Message
|
||||
RPCBlocksByRootTopicV1: new(p2ptypes.BeaconBlockByRootsReq),
|
||||
RPCBlocksByRootTopicV2: new(p2ptypes.BeaconBlockByRootsReq),
|
||||
|
||||
// Maps all the protocol message names for the different rpc
|
||||
// topics.
|
||||
var messageMapping = map[string]bool{
|
||||
StatusMessageName: true,
|
||||
GoodbyeMessageName: true,
|
||||
BeaconBlocksByRangeMessageName: true,
|
||||
BeaconBlocksByRootsMessageName: true,
|
||||
PingMessageName: true,
|
||||
MetadataMessageName: true,
|
||||
BlobSidecarsByRangeName: true,
|
||||
BlobSidecarsByRootName: true,
|
||||
LightClientBootstrapName: true,
|
||||
LightClientUpdatesByRangeName: true,
|
||||
LightClientFinalityUpdateName: true,
|
||||
LightClientOptimisticUpdateName: true,
|
||||
DataColumnSidecarsByRootName: true,
|
||||
}
|
||||
// RPC Ping Message
|
||||
RPCPingTopicV1: new(primitives.SSZUint64),
|
||||
|
||||
// Maps all the RPC messages which are to updated in altair.
|
||||
var altairMapping = map[string]bool{
|
||||
BeaconBlocksByRangeMessageName: true,
|
||||
BeaconBlocksByRootsMessageName: true,
|
||||
MetadataMessageName: true,
|
||||
}
|
||||
// RPC Metadata Message
|
||||
RPCMetaDataTopicV1: new(interface{}),
|
||||
RPCMetaDataTopicV2: new(interface{}),
|
||||
RPCMetaDataTopicV3: new(interface{}),
|
||||
|
||||
// Maps all the RPC messages which are to updated in fulu.
|
||||
var fuluMapping = map[string]bool{
|
||||
MetadataMessageName: true,
|
||||
}
|
||||
// BlobSidecarsByRange v1 Message
|
||||
RPCBlobSidecarsByRangeTopicV1: new(pb.BlobSidecarsByRangeRequest),
|
||||
|
||||
var versionMapping = map[string]bool{
|
||||
SchemaVersionV1: true,
|
||||
SchemaVersionV2: true,
|
||||
SchemaVersionV3: true,
|
||||
}
|
||||
// BlobSidecarsByRoot v1 Message
|
||||
RPCBlobSidecarsByRootTopicV1: new(p2ptypes.BlobSidecarsByRootReq),
|
||||
|
||||
// OmitContextBytesV1 keeps track of which RPC methods do not write context bytes in their v1 incarnations.
|
||||
// Phase0 did not have the notion of context bytes, which prefix wire-encoded values with a [4]byte identifier
|
||||
// to convey the schema for the receiver to use. These RPCs had a version bump to V2 when the context byte encoding
|
||||
// was introduced. For other RPC methods, context bytes are always required.
|
||||
var OmitContextBytesV1 = map[string]bool{
|
||||
StatusMessageName: true,
|
||||
GoodbyeMessageName: true,
|
||||
BeaconBlocksByRangeMessageName: true,
|
||||
BeaconBlocksByRootsMessageName: true,
|
||||
PingMessageName: true,
|
||||
MetadataMessageName: true,
|
||||
}
|
||||
// Light client
|
||||
RPCLightClientBootstrapTopicV1: new([fieldparams.RootLength]byte),
|
||||
RPCLightClientUpdatesByRangeTopicV1: new(pb.LightClientUpdatesByRangeRequest),
|
||||
RPCLightClientFinalityUpdateTopicV1: new(interface{}),
|
||||
RPCLightClientOptimisticUpdateTopicV1: new(interface{}),
|
||||
|
||||
// DataColumnSidecarsByRange v1 Message
|
||||
RPCDataColumnSidecarsByRangeTopicV1: new(pb.DataColumnSidecarsByRangeRequest),
|
||||
|
||||
// DataColumnSidecarsByRoot v1 Message
|
||||
RPCDataColumnSidecarsByRootTopicV1: new(p2ptypes.DataColumnsByRootIdentifiers),
|
||||
}
|
||||
|
||||
// Maps all registered protocol prefixes.
|
||||
protocolMapping = map[string]bool{
|
||||
protocolPrefix: true,
|
||||
}
|
||||
|
||||
// Maps all the protocol message names for the different rpc topics.
|
||||
messageMapping = map[string]bool{
|
||||
StatusMessageName: true,
|
||||
GoodbyeMessageName: true,
|
||||
BeaconBlocksByRangeMessageName: true,
|
||||
BeaconBlocksByRootsMessageName: true,
|
||||
PingMessageName: true,
|
||||
MetadataMessageName: true,
|
||||
BlobSidecarsByRangeName: true,
|
||||
BlobSidecarsByRootName: true,
|
||||
LightClientBootstrapName: true,
|
||||
LightClientUpdatesByRangeName: true,
|
||||
LightClientFinalityUpdateName: true,
|
||||
LightClientOptimisticUpdateName: true,
|
||||
DataColumnSidecarsByRootName: true,
|
||||
DataColumnSidecarsByRangeName: true,
|
||||
}
|
||||
|
||||
// Maps all the RPC messages which are to updated in altair.
|
||||
altairMapping = map[string]string{
|
||||
BeaconBlocksByRangeMessageName: SchemaVersionV2,
|
||||
BeaconBlocksByRootsMessageName: SchemaVersionV2,
|
||||
MetadataMessageName: SchemaVersionV2,
|
||||
}
|
||||
|
||||
// Maps all the RPC messages which are to updated in fulu.
|
||||
fuluMapping = map[string]string{
|
||||
MetadataMessageName: SchemaVersionV3,
|
||||
}
|
||||
|
||||
versionMapping = map[string]bool{
|
||||
SchemaVersionV1: true,
|
||||
SchemaVersionV2: true,
|
||||
SchemaVersionV3: true,
|
||||
}
|
||||
|
||||
// OmitContextBytesV1 keeps track of which RPC methods do not write context bytes in their v1 incarnations.
|
||||
// Phase0 did not have the notion of context bytes, which prefix wire-encoded values with a [4]byte identifier
|
||||
// to convey the schema for the receiver to use. These RPCs had a version bump to V2 when the context byte encoding
|
||||
// was introduced. For other RPC methods, context bytes are always required.
|
||||
OmitContextBytesV1 = map[string]bool{
|
||||
StatusMessageName: true,
|
||||
GoodbyeMessageName: true,
|
||||
BeaconBlocksByRangeMessageName: true,
|
||||
BeaconBlocksByRootsMessageName: true,
|
||||
PingMessageName: true,
|
||||
MetadataMessageName: true,
|
||||
}
|
||||
)
|
||||
|
||||
// VerifyTopicMapping verifies that the topic and its accompanying
|
||||
// message type is correct.
|
||||
@@ -327,13 +344,17 @@ func TopicFromMessage(msg string, epoch primitives.Epoch) (string, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
// Check if the message is to be updated in fulu.
|
||||
if epoch >= beaconConfig.FuluForkEpoch && fuluMapping[msg] {
|
||||
return protocolPrefix + msg + SchemaVersionV3, nil
|
||||
if epoch >= beaconConfig.FuluForkEpoch {
|
||||
if version, ok := fuluMapping[msg]; ok {
|
||||
return protocolPrefix + msg + version, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the message is to be updated in altair.
|
||||
if epoch >= beaconConfig.AltairForkEpoch && altairMapping[msg] {
|
||||
return protocolPrefix + msg + SchemaVersionV2, nil
|
||||
if epoch >= beaconConfig.AltairForkEpoch {
|
||||
if version, ok := altairMapping[msg]; ok {
|
||||
return protocolPrefix + msg + version, nil
|
||||
}
|
||||
}
|
||||
|
||||
return protocolPrefix + msg + SchemaVersionV1, nil
|
||||
|
||||
@@ -119,50 +119,31 @@ func TestTopicFromMessage_CorrectType(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("after altair fork but before fulu fork", func(t *testing.T) {
|
||||
for m := range messageMapping {
|
||||
topic, err := TopicFromMessage(m, altairForkEpoch)
|
||||
require.NoError(t, err)
|
||||
// Not modified in altair fork.
|
||||
topic, err := TopicFromMessage(GoodbyeMessageName, altairForkEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/eth2/beacon_chain/req/goodbye/1", topic)
|
||||
|
||||
if altairMapping[m] {
|
||||
require.Equal(t, true, strings.Contains(topic, SchemaVersionV2))
|
||||
_, _, version, err := TopicDeconstructor(topic)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, SchemaVersionV2, version)
|
||||
continue
|
||||
}
|
||||
|
||||
require.Equal(t, true, strings.Contains(topic, SchemaVersionV1))
|
||||
_, _, version, err := TopicDeconstructor(topic)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, SchemaVersionV1, version)
|
||||
}
|
||||
// Modified in altair fork.
|
||||
topic, err = TopicFromMessage(MetadataMessageName, altairForkEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/eth2/beacon_chain/req/metadata/2", topic)
|
||||
})
|
||||
|
||||
t.Run("after fulu fork", func(t *testing.T) {
|
||||
for m := range messageMapping {
|
||||
topic, err := TopicFromMessage(m, fuluForkEpoch)
|
||||
require.NoError(t, err)
|
||||
// Not modified in any fork.
|
||||
topic, err := TopicFromMessage(GoodbyeMessageName, fuluForkEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/eth2/beacon_chain/req/goodbye/1", topic)
|
||||
|
||||
if fuluMapping[m] {
|
||||
require.Equal(t, true, strings.Contains(topic, SchemaVersionV3))
|
||||
_, _, version, err := TopicDeconstructor(topic)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, SchemaVersionV3, version)
|
||||
continue
|
||||
}
|
||||
// Modified in altair fork.
|
||||
topic, err = TopicFromMessage(BeaconBlocksByRangeMessageName, fuluForkEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/eth2/beacon_chain/req/beacon_blocks_by_range/2", topic)
|
||||
|
||||
if altairMapping[m] {
|
||||
require.Equal(t, true, strings.Contains(topic, SchemaVersionV2))
|
||||
_, _, version, err := TopicDeconstructor(topic)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, SchemaVersionV2, version)
|
||||
continue
|
||||
}
|
||||
|
||||
require.Equal(t, true, strings.Contains(topic, SchemaVersionV1))
|
||||
_, _, version, err := TopicDeconstructor(topic)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, SchemaVersionV1, version)
|
||||
}
|
||||
// Modified both in altair and fulu fork.
|
||||
topic, err = TopicFromMessage(MetadataMessageName, fuluForkEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/eth2/beacon_chain/req/metadata/3", topic)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -213,7 +213,7 @@ func (s BlobSidecarsByRootReq) Len() int {
|
||||
// ====================================
|
||||
// DataColumnsByRootIdentifiers section
|
||||
// ====================================
|
||||
var _ ssz.Marshaler = (*DataColumnsByRootIdentifiers)(nil)
|
||||
var _ ssz.Marshaler = DataColumnsByRootIdentifiers{}
|
||||
var _ ssz.Unmarshaler = (*DataColumnsByRootIdentifiers)(nil)
|
||||
|
||||
// DataColumnsByRootIdentifiers is used to specify a list of data column targets (root+index) in a DataColumnSidecarsByRoot RPC request.
|
||||
@@ -275,33 +275,33 @@ func (d *DataColumnsByRootIdentifiers) UnmarshalSSZ(buf []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *DataColumnsByRootIdentifiers) MarshalSSZ() ([]byte, error) {
|
||||
func (d DataColumnsByRootIdentifiers) MarshalSSZ() ([]byte, error) {
|
||||
var err error
|
||||
count := len(*d)
|
||||
count := len(d)
|
||||
maxSize := params.BeaconConfig().MaxRequestBlocksDeneb
|
||||
if uint64(count) > maxSize {
|
||||
return nil, errors.Errorf("data column identifiers list exceeds max size: %d > %d", count, maxSize)
|
||||
}
|
||||
|
||||
if len(*d) == 0 {
|
||||
if len(d) == 0 {
|
||||
return []byte{}, nil
|
||||
}
|
||||
sizes := make([]uint32, count)
|
||||
valTotal := uint32(0)
|
||||
for i, elem := range *d {
|
||||
for i, elem := range d {
|
||||
if elem == nil {
|
||||
return nil, errors.New("nil item in DataColumnsByRootIdentifiers list")
|
||||
}
|
||||
sizes[i] = uint32(elem.SizeSSZ())
|
||||
valTotal += sizes[i]
|
||||
}
|
||||
offSize := uint32(4 * len(*d))
|
||||
offSize := uint32(4 * len(d))
|
||||
out := make([]byte, offSize, offSize+valTotal)
|
||||
for i := range sizes {
|
||||
binary.LittleEndian.PutUint32(out[i*4:i*4+4], offSize)
|
||||
offSize += sizes[i]
|
||||
}
|
||||
for _, elem := range *d {
|
||||
for _, elem := range d {
|
||||
out, err = elem.MarshalSSZTo(out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -312,7 +312,7 @@ func (d *DataColumnsByRootIdentifiers) MarshalSSZ() ([]byte, error) {
|
||||
}
|
||||
|
||||
// MarshalSSZTo implements ssz.Marshaler. It appends the serialized DataColumnSidecarsByRootReq value to the provided byte slice.
|
||||
func (d *DataColumnsByRootIdentifiers) MarshalSSZTo(dst []byte) ([]byte, error) {
|
||||
func (d DataColumnsByRootIdentifiers) MarshalSSZTo(dst []byte) ([]byte, error) {
|
||||
obj, err := d.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -321,11 +321,11 @@ func (d *DataColumnsByRootIdentifiers) MarshalSSZTo(dst []byte) ([]byte, error)
|
||||
}
|
||||
|
||||
// SizeSSZ implements ssz.Marshaler. It returns the size of the serialized representation.
|
||||
func (d *DataColumnsByRootIdentifiers) SizeSSZ() int {
|
||||
func (d DataColumnsByRootIdentifiers) SizeSSZ() int {
|
||||
size := 0
|
||||
for i := 0; i < len(*d); i++ {
|
||||
for i := 0; i < len(d); i++ {
|
||||
size += 4
|
||||
size += (*d)[i].SizeSSZ()
|
||||
size += (d)[i].SizeSSZ()
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
@@ -42,9 +42,9 @@ go_test(
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/light-client:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -10,11 +10,13 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
@@ -36,7 +38,9 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
@@ -45,6 +49,7 @@ go_test(
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/state/stategen/mock:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
|
||||
@@ -3,12 +3,15 @@ package lookup
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/core"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
@@ -49,6 +52,7 @@ type BeaconDbBlocker struct {
|
||||
ChainInfoFetcher blockchain.ChainInfoFetcher
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
DataColumnStorage *filesystem.DataColumnStorage
|
||||
}
|
||||
|
||||
// Block returns the beacon block for a given identifier. The identifier can be one of:
|
||||
@@ -212,64 +216,190 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []int) (
|
||||
|
||||
root := bytesutil.ToBytes32(rootSlice)
|
||||
|
||||
b, err := p.BeaconDB.Block(ctx, root)
|
||||
roSignedBlock, err := p.BeaconDB.Block(ctx, root)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrapf(err, "failed to retrieve block %#x from db", rootSlice), Reason: core.Internal}
|
||||
}
|
||||
if b == nil {
|
||||
|
||||
if roSignedBlock == nil {
|
||||
return nil, &core.RpcError{Err: fmt.Errorf("block %#x not found in db", rootSlice), Reason: core.NotFound}
|
||||
}
|
||||
|
||||
// if block is not in the retention window, return 200 w/ empty list
|
||||
if !p.BlobStorage.WithinRetentionPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(p.GenesisTimeFetcher.CurrentSlot())) {
|
||||
// If block is not in the retention window, return 200 w/ empty list
|
||||
if !p.BlobStorage.WithinRetentionPeriod(slots.ToEpoch(roSignedBlock.Block().Slot()), slots.ToEpoch(p.GenesisTimeFetcher.CurrentSlot())) {
|
||||
return make([]*blocks.VerifiedROBlob, 0), nil
|
||||
}
|
||||
|
||||
commitments, err := b.Block().Body().BlobKzgCommitments()
|
||||
roBlock := roSignedBlock.Block()
|
||||
|
||||
commitments, err := roBlock.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrapf(err, "failed to retrieve kzg commitments from block %#x", rootSlice), Reason: core.Internal}
|
||||
}
|
||||
// if there are no commitments return 200 w/ empty list
|
||||
|
||||
// If there are no commitments return 200 w/ empty list
|
||||
if len(commitments) == 0 {
|
||||
return make([]*blocks.VerifiedROBlob, 0), nil
|
||||
}
|
||||
|
||||
sum := p.BlobStorage.Summary(root)
|
||||
// Compute the first Fulu slot.
|
||||
fuluForkEpoch := params.BeaconConfig().FuluForkEpoch
|
||||
fuluForkSlot := primitives.Slot(math.MaxUint64)
|
||||
if fuluForkEpoch != primitives.Epoch(math.MaxUint64) {
|
||||
fuluForkSlot, err = slots.EpochStart(fuluForkEpoch)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrap(err, "could not calculate peerDAS start slot"), Reason: core.Internal}
|
||||
}
|
||||
}
|
||||
|
||||
if len(indices) == 0 {
|
||||
for i := range commitments {
|
||||
if sum.HasIndex(uint64(i)) {
|
||||
indices = append(indices, i)
|
||||
if roBlock.Slot() >= fuluForkSlot {
|
||||
roBlock, err := blocks.NewROBlockWithRoot(roSignedBlock, root)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrapf(err, "failed to create roBlock with root %#x", root), Reason: core.Internal}
|
||||
}
|
||||
|
||||
return p.blobsFromStoredDataColumns(roBlock, indices)
|
||||
}
|
||||
|
||||
return p.blobsFromStoredBlobs(commitments, root, indices)
|
||||
}
|
||||
|
||||
// blobsFromStoredBlobs retrieves blob sidercars corresponding to `indices` and `root` from the store.
|
||||
// This function expects blob sidecars to be stored (aka. no data column sidecars).
|
||||
func (p *BeaconDbBlocker) blobsFromStoredBlobs(commitments [][]byte, root [fieldparams.RootLength]byte, indices []int) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
summary := p.BlobStorage.Summary(root)
|
||||
maxBlobCount := summary.MaxBlobsForEpoch()
|
||||
|
||||
for _, index := range indices {
|
||||
if uint64(index) >= maxBlobCount {
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("requested index %d is bigger than the maximum possible blob count %d", index, maxBlobCount),
|
||||
Reason: core.BadRequest,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, ix := range indices {
|
||||
if uint64(ix) >= sum.MaxBlobsForEpoch() {
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("requested index %d is bigger than the maximum possible blob count %d", ix, sum.MaxBlobsForEpoch()),
|
||||
Reason: core.BadRequest,
|
||||
}
|
||||
}
|
||||
if !sum.HasIndex(uint64(ix)) {
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("requested index %d not found", ix),
|
||||
Reason: core.NotFound,
|
||||
}
|
||||
|
||||
if !summary.HasIndex(uint64(index)) {
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("requested index %d not found", index),
|
||||
Reason: core.NotFound,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
blobs := make([]*blocks.VerifiedROBlob, len(indices))
|
||||
for i, index := range indices {
|
||||
vblob, err := p.BlobStorage.Get(root, uint64(index))
|
||||
// If no indices are provided, use all indices that are available in the summary.
|
||||
if len(indices) == 0 {
|
||||
for index := range commitments {
|
||||
if summary.HasIndex(uint64(index)) {
|
||||
indices = append(indices, index)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve blob sidecars from the store.
|
||||
blobs := make([]*blocks.VerifiedROBlob, 0, len(indices))
|
||||
for _, index := range indices {
|
||||
blobSidecar, err := p.BlobStorage.Get(root, uint64(index))
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("could not retrieve blob for block root %#x at index %d", rootSlice, index),
|
||||
Err: fmt.Errorf("could not retrieve blob for block root %#x at index %d", root, index),
|
||||
Reason: core.Internal,
|
||||
}
|
||||
}
|
||||
blobs[i] = &vblob
|
||||
|
||||
blobs = append(blobs, &blobSidecar)
|
||||
}
|
||||
|
||||
return blobs, nil
|
||||
}
|
||||
|
||||
// blobsFromStoredDataColumns retrieves data column sidecars from the store,
|
||||
// reconstructs the whole matrix if needed, converts the matrix to blobs,
|
||||
// and then returns converted blobs corresponding to `indices` and `root`.
|
||||
// This function expects data column sidecars to be stored (aka. no blob sidecars).
|
||||
// If not enough data column sidecars are available to convert blobs from them
|
||||
// (either directly or after reconstruction), an error is returned.
|
||||
func (p *BeaconDbBlocker) blobsFromStoredDataColumns(block blocks.ROBlock, indices []int) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
root := block.Root()
|
||||
|
||||
// Use all indices if none are provided.
|
||||
if len(indices) == 0 {
|
||||
commitments, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{
|
||||
Err: errors.Wrap(err, "could not retrieve blob commitments"),
|
||||
Reason: core.Internal,
|
||||
}
|
||||
}
|
||||
|
||||
for index := range commitments {
|
||||
indices = append(indices, index)
|
||||
}
|
||||
}
|
||||
|
||||
// Count how many columns we have in the store.
|
||||
summary := p.DataColumnStorage.Summary(root)
|
||||
stored := summary.Stored()
|
||||
count := uint64(len(stored))
|
||||
|
||||
if count < peerdas.MinimumColumnsCountToReconstruct() {
|
||||
// There is no way to reconstruct the data columns.
|
||||
return nil, &core.RpcError{
|
||||
Err: errors.Errorf("the node does not custody enough data columns to reconstruct blobs - please start the beacon node with the `--%s` flag to ensure this call to succeed, or retry later if it is already the case", flags.SubscribeAllDataSubnets.Name),
|
||||
Reason: core.NotFound,
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve from the database needed data columns.
|
||||
verifiedRoDataColumnSidecars, err := p.neededDataColumnSidecars(root, stored)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{
|
||||
Err: errors.Wrap(err, "needed data column sidecars"),
|
||||
Reason: core.Internal,
|
||||
}
|
||||
}
|
||||
|
||||
// Reconstruct blob sidecars from data column sidecars.
|
||||
verifiedRoBlobSidecars, err := peerdas.ReconstructBlobs(block, verifiedRoDataColumnSidecars, indices)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{
|
||||
Err: errors.Wrap(err, "blobs from data columns"),
|
||||
Reason: core.Internal,
|
||||
}
|
||||
}
|
||||
|
||||
return verifiedRoBlobSidecars, nil
|
||||
}
|
||||
|
||||
// neededDataColumnSidecars retrieves all data column sidecars corresponding to (non extended) blobs if available,
|
||||
// else retrieves all data column sidecars from the store.
|
||||
func (p *BeaconDbBlocker) neededDataColumnSidecars(root [fieldparams.RootLength]byte, stored map[uint64]bool) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// Check if we have all the non-extended data columns.
|
||||
cellsPerBlob := fieldparams.CellsPerBlob
|
||||
blobIndices := make([]uint64, 0, cellsPerBlob)
|
||||
hasAllBlobColumns := true
|
||||
for i := range uint64(cellsPerBlob) {
|
||||
if !stored[i] {
|
||||
hasAllBlobColumns = false
|
||||
break
|
||||
}
|
||||
blobIndices = append(blobIndices, i)
|
||||
}
|
||||
|
||||
if hasAllBlobColumns {
|
||||
// Retrieve only the non-extended data columns.
|
||||
verifiedRoSidecars, err := p.DataColumnStorage.Get(root, blobIndices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data columns storage get")
|
||||
}
|
||||
|
||||
return verifiedRoSidecars, nil
|
||||
}
|
||||
|
||||
// Retrieve all the data columns.
|
||||
verifiedRoSidecars, err := p.DataColumnStorage.Get(root, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data columns storage get")
|
||||
}
|
||||
|
||||
return verifiedRoSidecars, nil
|
||||
}
|
||||
|
||||
@@ -8,12 +8,15 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
mockChain "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/core"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/testutil"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
@@ -158,172 +161,335 @@ func TestGetBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetBlob(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
const (
|
||||
slot = 123
|
||||
blobCount = 4
|
||||
denebForEpoch = 1
|
||||
fuluForkEpoch = 2
|
||||
)
|
||||
|
||||
setupDeneb := func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = denebForEpoch
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}
|
||||
|
||||
setupFulu := func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = denebForEpoch
|
||||
cfg.FuluForkEpoch = fuluForkEpoch
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}
|
||||
|
||||
ctx := t.Context()
|
||||
db := testDB.SetupDB(t)
|
||||
denebBlock, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 123, 4)
|
||||
require.NoError(t, db.SaveBlock(t.Context(), denebBlock))
|
||||
_, bs := filesystem.NewEphemeralBlobStorageAndFs(t)
|
||||
testSidecars := verification.FakeVerifySliceForTest(t, blobs)
|
||||
for i := range testSidecars {
|
||||
require.NoError(t, bs.Save(testSidecars[i]))
|
||||
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create and save Deneb block and blob sidecars.
|
||||
_, blobStorage := filesystem.NewEphemeralBlobStorageAndFs(t)
|
||||
|
||||
denebBlock, storedBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [fieldparams.RootLength]byte{}, slot, blobCount)
|
||||
denebBlockRoot := denebBlock.Root()
|
||||
|
||||
verifiedStoredSidecars := verification.FakeVerifySliceForTest(t, storedBlobSidecars)
|
||||
for i := range verifiedStoredSidecars {
|
||||
err := blobStorage.Save(verifiedStoredSidecars[i])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
blockRoot := blobs[0].BlockRoot()
|
||||
|
||||
err = db.SaveBlock(t.Context(), denebBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create Electra block and blob sidecars. (Electra block = Fulu block),
|
||||
// save the block, convert blob sidecars to data column sidecars and save the block.
|
||||
fuluForkSlot := fuluForkEpoch * params.BeaconConfig().SlotsPerEpoch
|
||||
fuluBlock, fuluBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, fuluForkSlot, blobCount)
|
||||
fuluBlockRoot := fuluBlock.Root()
|
||||
|
||||
cellsAndProofsList := make([]kzg.CellsAndProofs, 0, len(fuluBlobSidecars))
|
||||
for _, blob := range fuluBlobSidecars {
|
||||
var kzgBlob kzg.Blob
|
||||
copy(kzgBlob[:], blob.Blob)
|
||||
cellsAndProogs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
|
||||
require.NoError(t, err)
|
||||
cellsAndProofsList = append(cellsAndProofsList, cellsAndProogs)
|
||||
}
|
||||
|
||||
dataColumnSidecarPb, err := peerdas.DataColumnSidecars(fuluBlock, cellsAndProofsList)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRoDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnSidecarPb))
|
||||
for _, sidecarPb := range dataColumnSidecarPb {
|
||||
roDataColumn, err := blocks.NewRODataColumnWithRoot(sidecarPb, fuluBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRoDataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRoDataColumnSidecars = append(verifiedRoDataColumnSidecars, verifiedRoDataColumn)
|
||||
}
|
||||
|
||||
err = db.SaveBlock(t.Context(), fuluBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("genesis", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{}
|
||||
_, rpcErr := blocker.Blobs(ctx, "genesis", nil)
|
||||
assert.Equal(t, http.StatusBadRequest, core.ErrorReasonToHTTP(rpcErr.Reason))
|
||||
assert.StringContains(t, "blobs are not supported for Phase 0 fork", rpcErr.Err.Error())
|
||||
require.Equal(t, http.StatusBadRequest, core.ErrorReasonToHTTP(rpcErr.Reason))
|
||||
require.StringContains(t, "blobs are not supported for Phase 0 fork", rpcErr.Err.Error())
|
||||
})
|
||||
|
||||
t.Run("head", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{Root: blockRoot[:]},
|
||||
ChainInfoFetcher: &mockChain.ChainService{Root: denebBlockRoot[:]},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
|
||||
retrievedVerifiedSidecars, rpcErr := blocker.Blobs(ctx, "head", nil)
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, blobCount, len(retrievedVerifiedSidecars))
|
||||
|
||||
for i := range blobCount {
|
||||
expected := verifiedStoredSidecars[i]
|
||||
|
||||
actual := retrievedVerifiedSidecars[i].BlobSidecar
|
||||
require.NotNil(t, actual)
|
||||
|
||||
require.Equal(t, expected.Index, actual.Index)
|
||||
require.DeepEqual(t, expected.Blob, actual.Blob)
|
||||
require.DeepEqual(t, expected.KzgCommitment, actual.KzgCommitment)
|
||||
require.DeepEqual(t, expected.KzgProof, actual.KzgProof)
|
||||
}
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "head", nil)
|
||||
assert.Equal(t, rpcErr == nil, true)
|
||||
require.Equal(t, 4, len(verifiedBlobs))
|
||||
sidecar := verifiedBlobs[0].BlobSidecar
|
||||
require.NotNil(t, sidecar)
|
||||
assert.Equal(t, uint64(0), sidecar.Index)
|
||||
assert.DeepEqual(t, blobs[0].Blob, sidecar.Blob)
|
||||
assert.DeepEqual(t, blobs[0].KzgCommitment, sidecar.KzgCommitment)
|
||||
assert.DeepEqual(t, blobs[0].KzgProof, sidecar.KzgProof)
|
||||
sidecar = verifiedBlobs[1].BlobSidecar
|
||||
require.NotNil(t, sidecar)
|
||||
assert.Equal(t, uint64(1), sidecar.Index)
|
||||
assert.DeepEqual(t, blobs[1].Blob, sidecar.Blob)
|
||||
assert.DeepEqual(t, blobs[1].KzgCommitment, sidecar.KzgCommitment)
|
||||
assert.DeepEqual(t, blobs[1].KzgProof, sidecar.KzgProof)
|
||||
sidecar = verifiedBlobs[2].BlobSidecar
|
||||
require.NotNil(t, sidecar)
|
||||
assert.Equal(t, uint64(2), sidecar.Index)
|
||||
assert.DeepEqual(t, blobs[2].Blob, sidecar.Blob)
|
||||
assert.DeepEqual(t, blobs[2].KzgCommitment, sidecar.KzgCommitment)
|
||||
assert.DeepEqual(t, blobs[2].KzgProof, sidecar.KzgProof)
|
||||
sidecar = verifiedBlobs[3].BlobSidecar
|
||||
require.NotNil(t, sidecar)
|
||||
assert.Equal(t, uint64(3), sidecar.Index)
|
||||
assert.DeepEqual(t, blobs[3].Blob, sidecar.Blob)
|
||||
assert.DeepEqual(t, blobs[3].KzgCommitment, sidecar.KzgCommitment)
|
||||
assert.DeepEqual(t, blobs[3].KzgProof, sidecar.KzgProof)
|
||||
})
|
||||
|
||||
t.Run("finalized", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: blockRoot[:]}},
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "finalized", nil)
|
||||
assert.Equal(t, rpcErr == nil, true)
|
||||
require.Equal(t, 4, len(verifiedBlobs))
|
||||
verifiedSidecars, rpcErr := blocker.Blobs(ctx, "finalized", nil)
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, blobCount, len(verifiedSidecars))
|
||||
})
|
||||
|
||||
t.Run("justified", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{CurrentJustifiedCheckPoint: ðpb.Checkpoint{Root: blockRoot[:]}},
|
||||
ChainInfoFetcher: &mockChain.ChainService{CurrentJustifiedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "justified", nil)
|
||||
assert.Equal(t, rpcErr == nil, true)
|
||||
require.Equal(t, 4, len(verifiedBlobs))
|
||||
verifiedSidecars, rpcErr := blocker.Blobs(ctx, "justified", nil)
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, blobCount, len(verifiedSidecars))
|
||||
})
|
||||
|
||||
t.Run("root", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, hexutil.Encode(blockRoot[:]), nil)
|
||||
assert.Equal(t, rpcErr == nil, true)
|
||||
require.Equal(t, 4, len(verifiedBlobs))
|
||||
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, hexutil.Encode(denebBlockRoot[:]), nil)
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, blobCount, len(verifiedBlobs))
|
||||
})
|
||||
|
||||
t.Run("slot", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123", nil)
|
||||
assert.Equal(t, rpcErr == nil, true)
|
||||
require.Equal(t, 4, len(verifiedBlobs))
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, blobCount, len(verifiedBlobs))
|
||||
})
|
||||
|
||||
t.Run("one blob only", func(t *testing.T) {
|
||||
const index = 2
|
||||
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: blockRoot[:]}},
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123", []int{2})
|
||||
assert.Equal(t, rpcErr == nil, true)
|
||||
require.Equal(t, 1, len(verifiedBlobs))
|
||||
sidecar := verifiedBlobs[0].BlobSidecar
|
||||
require.NotNil(t, sidecar)
|
||||
assert.Equal(t, uint64(2), sidecar.Index)
|
||||
assert.DeepEqual(t, blobs[2].Blob, sidecar.Blob)
|
||||
assert.DeepEqual(t, blobs[2].KzgCommitment, sidecar.KzgCommitment)
|
||||
assert.DeepEqual(t, blobs[2].KzgProof, sidecar.KzgProof)
|
||||
|
||||
retrievedVerifiedSidecars, rpcErr := blocker.Blobs(ctx, "123", []int{index})
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, 1, len(retrievedVerifiedSidecars))
|
||||
|
||||
expected := verifiedStoredSidecars[index]
|
||||
actual := retrievedVerifiedSidecars[0].BlobSidecar
|
||||
require.NotNil(t, actual)
|
||||
|
||||
require.Equal(t, uint64(index), actual.Index)
|
||||
require.DeepEqual(t, expected.Blob, actual.Blob)
|
||||
require.DeepEqual(t, expected.KzgCommitment, actual.KzgCommitment)
|
||||
require.DeepEqual(t, expected.KzgProof, actual.KzgProof)
|
||||
})
|
||||
|
||||
t.Run("no blobs returns an empty array", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: blockRoot[:]}},
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: filesystem.NewEphemeralBlobStorage(t),
|
||||
}
|
||||
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123", nil)
|
||||
assert.Equal(t, rpcErr == nil, true)
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, 0, len(verifiedBlobs))
|
||||
})
|
||||
|
||||
t.Run("no blob at index", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: blockRoot[:]}},
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
noBlobIndex := len(blobs) + 1
|
||||
|
||||
noBlobIndex := len(storedBlobSidecars) + 1
|
||||
_, rpcErr := blocker.Blobs(ctx, "123", []int{0, noBlobIndex})
|
||||
require.NotNil(t, rpcErr)
|
||||
assert.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
|
||||
require.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
|
||||
})
|
||||
|
||||
t.Run("index too big", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: blockRoot[:]}},
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
_, rpcErr := blocker.Blobs(ctx, "123", []int{0, math.MaxInt})
|
||||
require.NotNil(t, rpcErr)
|
||||
assert.Equal(t, core.ErrorReason(core.BadRequest), rpcErr.Reason)
|
||||
require.Equal(t, core.ErrorReason(core.BadRequest), rpcErr.Reason)
|
||||
})
|
||||
|
||||
t.Run("not enough stored data column sidecars", func(t *testing.T) {
|
||||
setupFulu(t)
|
||||
|
||||
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars[:fieldparams.CellsPerBlob-1])
|
||||
require.NoError(t, err)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: blobStorage,
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
}
|
||||
|
||||
_, rpcErr := blocker.Blobs(ctx, hexutil.Encode(fuluBlockRoot[:]), nil)
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
|
||||
})
|
||||
|
||||
t.Run("reconstruction needed", func(t *testing.T) {
|
||||
setupFulu(t)
|
||||
|
||||
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars[1 : peerdas.MinimumColumnsCountToReconstruct()+1])
|
||||
require.NoError(t, err)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: blobStorage,
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
}
|
||||
|
||||
retrievedVerifiedRoBlobs, rpcErr := blocker.Blobs(ctx, hexutil.Encode(fuluBlockRoot[:]), nil)
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, len(fuluBlobSidecars), len(retrievedVerifiedRoBlobs))
|
||||
|
||||
for i, retrievedVerifiedRoBlob := range retrievedVerifiedRoBlobs {
|
||||
retrievedBlobSidecarPb := retrievedVerifiedRoBlob.BlobSidecar
|
||||
initialBlobSidecarPb := fuluBlobSidecars[i].BlobSidecar
|
||||
require.DeepSSZEqual(t, initialBlobSidecarPb, retrievedBlobSidecarPb)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("no reconstruction needed", func(t *testing.T) {
|
||||
setupFulu(t)
|
||||
|
||||
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: blobStorage,
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
}
|
||||
|
||||
retrievedVerifiedRoBlobs, rpcErr := blocker.Blobs(ctx, hexutil.Encode(fuluBlockRoot[:]), nil)
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, len(fuluBlobSidecars), len(retrievedVerifiedRoBlobs))
|
||||
|
||||
for i, retrievedVerifiedRoBlob := range retrievedVerifiedRoBlobs {
|
||||
retrievedBlobSidecarPb := retrievedVerifiedRoBlob.BlobSidecar
|
||||
initialBlobSidecarPb := fuluBlobSidecars[i].BlobSidecar
|
||||
require.DeepSSZEqual(t, initialBlobSidecarPb, retrievedBlobSidecarPb)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -8,13 +8,18 @@ import (
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// constructGenericBeaconBlock constructs a `GenericBeaconBlock` based on the block version and other parameters.
|
||||
func (vs *Server) constructGenericBeaconBlock(sBlk interfaces.SignedBeaconBlock, blobsBundle *enginev1.BlobsBundle, winningBid primitives.Wei) (*ethpb.GenericBeaconBlock, error) {
|
||||
func (vs *Server) constructGenericBeaconBlock(
|
||||
sBlk interfaces.SignedBeaconBlock,
|
||||
blobsBundler enginev1.BlobsBundler,
|
||||
winningBid primitives.Wei,
|
||||
) (*ethpb.GenericBeaconBlock, error) {
|
||||
if sBlk == nil || sBlk.Block() == nil {
|
||||
return nil, fmt.Errorf("block cannot be nil")
|
||||
return nil, errors.New("block cannot be nil")
|
||||
}
|
||||
|
||||
blockProto, err := sBlk.Block().Proto()
|
||||
@@ -35,11 +40,23 @@ func (vs *Server) constructGenericBeaconBlock(sBlk interfaces.SignedBeaconBlock,
|
||||
case version.Capella:
|
||||
return vs.constructCapellaBlock(blockProto, isBlinded, bidStr), nil
|
||||
case version.Deneb:
|
||||
return vs.constructDenebBlock(blockProto, isBlinded, bidStr, blobsBundle), nil
|
||||
bundle, ok := blobsBundler.(*enginev1.BlobsBundle)
|
||||
if blobsBundler != nil && !ok {
|
||||
return nil, fmt.Errorf("expected *BlobsBundler, got %T", blobsBundler)
|
||||
}
|
||||
return vs.constructDenebBlock(blockProto, isBlinded, bidStr, bundle), nil
|
||||
case version.Electra:
|
||||
return vs.constructElectraBlock(blockProto, isBlinded, bidStr, blobsBundle), nil
|
||||
bundle, ok := blobsBundler.(*enginev1.BlobsBundle)
|
||||
if blobsBundler != nil && !ok {
|
||||
return nil, fmt.Errorf("expected *BlobsBundler, got %T", blobsBundler)
|
||||
}
|
||||
return vs.constructElectraBlock(blockProto, isBlinded, bidStr, bundle), nil
|
||||
case version.Fulu:
|
||||
return vs.constructFuluBlock(blockProto, isBlinded, bidStr, blobsBundle), nil
|
||||
bundle, ok := blobsBundler.(*enginev1.BlobsBundleV2)
|
||||
if blobsBundler != nil && !ok {
|
||||
return nil, fmt.Errorf("expected *BlobsBundleV2, got %T", blobsBundler)
|
||||
}
|
||||
return vs.constructFuluBlock(blockProto, isBlinded, bidStr, bundle), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown block version: %d", sBlk.Version())
|
||||
}
|
||||
@@ -92,7 +109,7 @@ func (vs *Server) constructElectraBlock(blockProto proto.Message, isBlinded bool
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Electra{Electra: electraContents}, IsBlinded: false, PayloadValue: payloadValue}
|
||||
}
|
||||
|
||||
func (vs *Server) constructFuluBlock(blockProto proto.Message, isBlinded bool, payloadValue string, bundle *enginev1.BlobsBundle) *ethpb.GenericBeaconBlock {
|
||||
func (vs *Server) constructFuluBlock(blockProto proto.Message, isBlinded bool, payloadValue string, bundle *enginev1.BlobsBundleV2) *ethpb.GenericBeaconBlock {
|
||||
if isBlinded {
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedFulu{BlindedFulu: blockProto.(*ethpb.BlindedBeaconBlockFulu)}, IsBlinded: true, PayloadValue: payloadValue}
|
||||
}
|
||||
|
||||
@@ -261,6 +261,10 @@ func (vs *Server) buildValidatorDuty(
|
||||
}
|
||||
|
||||
func populateCommitteeFields(duty *ethpb.DutiesV2Response_Duty, la *helpers.LiteAssignment) {
|
||||
if duty == nil || la == nil {
|
||||
// should never be the case as previous functions should set
|
||||
return
|
||||
}
|
||||
duty.CommitteeLength = la.CommitteeLength
|
||||
duty.CommitteeIndex = la.CommitteeIndex
|
||||
duty.ValidatorCommitteeIndex = la.ValidatorCommitteeIndex
|
||||
|
||||
@@ -232,7 +232,7 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
|
||||
}()
|
||||
|
||||
winningBid := primitives.ZeroWei()
|
||||
var bundle *enginev1.BlobsBundle
|
||||
var bundle enginev1.BlobsBundler
|
||||
if sBlk.Version() >= version.Bellatrix {
|
||||
local, err := vs.getLocalPayload(ctx, sBlk.Block(), head)
|
||||
if err != nil {
|
||||
|
||||
@@ -54,7 +54,7 @@ const blockBuilderTimeout = 1 * time.Second
|
||||
const gasLimitAdjustmentFactor = 1024
|
||||
|
||||
// Sets the execution data for the block. Execution data can come from local EL client or remote builder depends on validator registration and circuit breaker conditions.
|
||||
func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, local *blocks.GetPayloadResponse, bid builder.Bid, builderBoostFactor primitives.Gwei) (primitives.Wei, *enginev1.BlobsBundle, error) {
|
||||
func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, local *blocks.GetPayloadResponse, bid builder.Bid, builderBoostFactor primitives.Gwei) (primitives.Wei, enginev1.BlobsBundler, error) {
|
||||
_, span := trace.StartSpan(ctx, "ProposerServer.setExecutionData")
|
||||
defer span.End()
|
||||
|
||||
@@ -69,13 +69,13 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
|
||||
|
||||
// Use local payload if builder payload is nil.
|
||||
if bid == nil {
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
}
|
||||
|
||||
builderPayload, err := bid.Header()
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Proposer: failed to retrieve header from BuilderBid")
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
}
|
||||
|
||||
switch {
|
||||
@@ -84,7 +84,7 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
log.WithError(err).Warn("Proposer: failed to match withdrawals root")
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
}
|
||||
|
||||
// Compare payload values between local and builder. Default to the local value if it is higher.
|
||||
@@ -97,7 +97,7 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
|
||||
"minBuilderBid": minBid,
|
||||
"builderGweiValue": builderValueGwei,
|
||||
}).Warn("Proposer: using local execution payload because min bid not attained")
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
}
|
||||
|
||||
// Use local block if min difference is not attained
|
||||
@@ -108,7 +108,7 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
|
||||
"minBidDiff": minDiff,
|
||||
"builderGweiValue": builderValueGwei,
|
||||
}).Warn("Proposer: using local execution payload because min difference with local value was not attained")
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
}
|
||||
|
||||
// Use builder payload if the following in true:
|
||||
@@ -133,7 +133,7 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
|
||||
bidDeneb, ok := bid.(builder.BidDeneb)
|
||||
if !ok {
|
||||
log.Warnf("bid type %T does not implement builder.BidDeneb", bid)
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
} else {
|
||||
builderKzgCommitments = bidDeneb.BlobKzgCommitments()
|
||||
}
|
||||
@@ -144,14 +144,14 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
|
||||
bidElectra, ok := bid.(builder.BidElectra)
|
||||
if !ok {
|
||||
log.Warnf("bid type %T does not implement builder.BidElectra", bid)
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
} else {
|
||||
executionRequests = bidElectra.ExecutionRequests()
|
||||
}
|
||||
}
|
||||
if err := setBuilderExecution(blk, builderPayload, builderKzgCommitments, executionRequests); err != nil {
|
||||
log.WithError(err).Warn("Proposer: failed to set builder payload")
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
} else {
|
||||
return bid.Value(), nil, nil
|
||||
}
|
||||
@@ -171,11 +171,11 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
|
||||
trace.Int64Attribute("builderGweiValue", int64(builderValueGwei)), // lint:ignore uintcast -- This is OK for tracing.
|
||||
trace.Int64Attribute("builderBoostFactor", int64(builderBoostFactor)), // lint:ignore uintcast -- This is OK for tracing.
|
||||
)
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
default: // Bellatrix case.
|
||||
if err := setBuilderExecution(blk, builderPayload, nil, nil); err != nil {
|
||||
log.WithError(err).Warn("Proposer: failed to set builder payload")
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
} else {
|
||||
return bid.Value(), nil, nil
|
||||
}
|
||||
@@ -375,8 +375,8 @@ func matchingWithdrawalsRoot(local, builder interfaces.ExecutionData) (bool, err
|
||||
// It delegates to setExecution for the actual work.
|
||||
func setLocalExecution(blk interfaces.SignedBeaconBlock, local *blocks.GetPayloadResponse) error {
|
||||
var kzgCommitments [][]byte
|
||||
if local.BlobsBundle != nil {
|
||||
kzgCommitments = local.BlobsBundle.KzgCommitments
|
||||
if local.BlobsBundler != nil {
|
||||
kzgCommitments = local.BlobsBundler.GetKzgCommitments()
|
||||
}
|
||||
if local.ExecutionRequests != nil {
|
||||
if err := blk.SetExecutionRequests(local.ExecutionRequests); err != nil {
|
||||
|
||||
@@ -519,7 +519,7 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
PayloadIDBytes: id,
|
||||
GetPayloadResponse: &blocks.GetPayloadResponse{
|
||||
ExecutionData: ed,
|
||||
BlobsBundle: blobsBundle,
|
||||
BlobsBundler: blobsBundle,
|
||||
Bid: primitives.ZeroWei(),
|
||||
},
|
||||
}
|
||||
@@ -527,7 +527,7 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
res, err := vs.getLocalPayload(ctx, blk.Block(), capellaTransitionState)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(4), res.ExecutionData.BlockNumber())
|
||||
require.DeepEqual(t, res.BlobsBundle, blobsBundle)
|
||||
require.DeepEqual(t, res.BlobsBundler, blobsBundle)
|
||||
})
|
||||
t.Run("Can get builder payload and blobs in Deneb", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
|
||||
@@ -529,7 +529,7 @@ func TestServer_GetBeaconBlock_Deneb(t *testing.T) {
|
||||
PayloadIDBytes: &enginev1.PayloadIDBytes{1},
|
||||
GetPayloadResponse: &blocks.GetPayloadResponse{
|
||||
ExecutionData: ed,
|
||||
BlobsBundle: bundle,
|
||||
BlobsBundler: bundle,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
"block_batcher.go",
|
||||
"broadcast_bls_changes.go",
|
||||
"context.go",
|
||||
"data_columns_reconstruct.go",
|
||||
"deadlines.go",
|
||||
"decode_pubsub.go",
|
||||
"doc.go",
|
||||
@@ -25,6 +26,7 @@ go_library(
|
||||
"rpc_blob_sidecars_by_range.go",
|
||||
"rpc_blob_sidecars_by_root.go",
|
||||
"rpc_chunked_response.go",
|
||||
"rpc_data_column_sidecars_by_range.go",
|
||||
"rpc_data_column_sidecars_by_root.go",
|
||||
"rpc_goodbye.go",
|
||||
"rpc_light_client.go",
|
||||
@@ -39,6 +41,7 @@ go_library(
|
||||
"subscriber_beacon_blocks.go",
|
||||
"subscriber_blob_sidecar.go",
|
||||
"subscriber_bls_to_execution_change.go",
|
||||
"subscriber_data_column_sidecar.go",
|
||||
"subscriber_handlers.go",
|
||||
"subscriber_light_client.go",
|
||||
"subscriber_sync_committee_message.go",
|
||||
@@ -77,6 +80,7 @@ go_library(
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/core/transition/interop:go_default_library",
|
||||
@@ -161,6 +165,7 @@ go_test(
|
||||
"block_batcher_test.go",
|
||||
"broadcast_bls_changes_test.go",
|
||||
"context_test.go",
|
||||
"data_columns_reconstruct_test.go",
|
||||
"decode_pubsub_test.go",
|
||||
"error_test.go",
|
||||
"fork_watcher_test.go",
|
||||
@@ -171,6 +176,7 @@ go_test(
|
||||
"rpc_beacon_blocks_by_root_test.go",
|
||||
"rpc_blob_sidecars_by_range_test.go",
|
||||
"rpc_blob_sidecars_by_root_test.go",
|
||||
"rpc_data_column_sidecars_by_range_test.go",
|
||||
"rpc_data_column_sidecars_by_root_test.go",
|
||||
"rpc_goodbye_test.go",
|
||||
"rpc_handler_test.go",
|
||||
@@ -205,6 +211,7 @@ go_test(
|
||||
deps = [
|
||||
"//async/abool:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
@@ -212,6 +219,7 @@ go_test(
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -282,6 +290,7 @@ go_test(
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@com_github_stretchr_testify//require:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -13,8 +13,6 @@ import (
|
||||
|
||||
const signatureVerificationInterval = 50 * time.Millisecond
|
||||
|
||||
const verifierLimit = 50
|
||||
|
||||
type signatureVerifier struct {
|
||||
set *bls.SignatureBatch
|
||||
resChan chan error
|
||||
@@ -36,7 +34,7 @@ func (s *Service) verifierRoutine() {
|
||||
return
|
||||
case sig := <-s.signatureChan:
|
||||
verifierBatch = append(verifierBatch, sig)
|
||||
if len(verifierBatch) >= verifierLimit {
|
||||
if len(verifierBatch) >= s.cfg.batchVerifierLimit {
|
||||
verifyBatch(verifierBatch)
|
||||
verifierBatch = []*signatureVerifier{}
|
||||
}
|
||||
|
||||
@@ -67,6 +67,7 @@ func TestValidateWithBatchVerifier(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
svc := &Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{batchVerifierLimit: verifierLimit},
|
||||
cancel: cancel,
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
}
|
||||
|
||||
208
beacon-chain/sync/data_columns_reconstruct.go
Normal file
208
beacon-chain/sync/data_columns_reconstruct.go
Normal file
@@ -0,0 +1,208 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
broadcastMissingDataColumnsTimeIntoSlotMin = 1 * time.Second
|
||||
broadcastMissingDataColumnsSlack = 2 * time.Second
|
||||
)
|
||||
|
||||
// reconstructSaveBroadcastDataColumnSidecars reconstructs if possible and
|
||||
// needed all data column sidecars. Then, it saves into the store missing
|
||||
// sidecars. After a delay, it broadcasts in the background not seen via gossip
|
||||
// (but reconstructed) sidecars.
|
||||
func (s *Service) reconstructSaveBroadcastDataColumnSidecars(
|
||||
ctx context.Context,
|
||||
slot primitives.Slot,
|
||||
proposerIndex primitives.ValidatorIndex,
|
||||
root [fieldparams.RootLength]byte,
|
||||
) error {
|
||||
startTime := time.Now()
|
||||
|
||||
// Get the columns we store.
|
||||
storedDataColumns := s.cfg.dataColumnStorage.Summary(root)
|
||||
storedColumnsCount := storedDataColumns.Count()
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Lock to prevent concurrent reconstructions.
|
||||
s.reconstructionLock.Lock()
|
||||
defer s.reconstructionLock.Unlock()
|
||||
|
||||
// If reconstruction is not possible or if all columns are already stored, exit early.
|
||||
if storedColumnsCount < peerdas.MinimumColumnsCountToReconstruct() || storedColumnsCount == numberOfColumns {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve our local node info.
|
||||
nodeID := s.cfg.p2p.NodeID()
|
||||
custodyGroupCount := s.cfg.custodyInfo.ActualGroupCount()
|
||||
localNodeInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
// Load all the possible data columns sidecars, to minimize reconstruction time.
|
||||
verifiedSidecars, err := s.cfg.dataColumnStorage.Get(root, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get data column sidecars")
|
||||
}
|
||||
|
||||
// Reconstruct all the data column sidecars.
|
||||
reconstructedSidecars, err := peerdas.ReconstructDataColumnSidecars(verifiedSidecars)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "reconstruct data column sidecars")
|
||||
}
|
||||
|
||||
// Filter reconstructed sidecars to save.
|
||||
custodyColumns := localNodeInfo.CustodyColumns
|
||||
toSaveSidecars := make([]blocks.VerifiedRODataColumn, 0, len(custodyColumns))
|
||||
for _, sidecar := range reconstructedSidecars {
|
||||
if custodyColumns[sidecar.Index] {
|
||||
toSaveSidecars = append(toSaveSidecars, sidecar)
|
||||
}
|
||||
}
|
||||
|
||||
// Save the data columns sidecars in the database.
|
||||
// Note: We do not call `receiveDataColumn`, because it will ignore
|
||||
// incoming data columns via gossip while we did not broadcast (yet) the reconstructed data columns.
|
||||
if err := s.cfg.dataColumnStorage.Save(toSaveSidecars); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
// Update reconstruction metrics
|
||||
dataColumnReconstructionHistogram.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
dataColumnReconstructionCounter.Add(float64(len(reconstructedSidecars) - len(verifiedSidecars)))
|
||||
|
||||
// Schedule the broadcast.
|
||||
if err := s.scheduleMissingDataColumnSidecarsBroadcast(ctx, root, proposerIndex, slot); err != nil {
|
||||
return errors.Wrap(err, "schedule reconstructed data columns broadcast")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"slot": slot,
|
||||
"fromColumnsCount": storedColumnsCount,
|
||||
}).Debug("Data columns reconstructed and saved")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// scheduleMissingDataColumnSidecarsBroadcast schedules the broadcast of missing
|
||||
// (aka. not seen via gossip but reconstructed) sidecars.
|
||||
func (s *Service) scheduleMissingDataColumnSidecarsBroadcast(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
proposerIndex primitives.ValidatorIndex,
|
||||
slot primitives.Slot,
|
||||
) error {
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%x", root),
|
||||
"slot": slot,
|
||||
})
|
||||
|
||||
// Get the time corresponding to the start of the slot.
|
||||
genesisTime := uint64(s.cfg.chain.GenesisTime().Unix())
|
||||
slotStartTime, err := slots.ToTime(genesisTime, slot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "to time")
|
||||
}
|
||||
|
||||
// Compute the waiting time. This could be negative. In such a case, broadcast immediately.
|
||||
randFloat := s.reconstructionRandGen.Float64()
|
||||
timeIntoSlot := broadcastMissingDataColumnsTimeIntoSlotMin + time.Duration(float64(broadcastMissingDataColumnsSlack)*randFloat)
|
||||
broadcastTime := slotStartTime.Add(timeIntoSlot)
|
||||
waitingTime := time.Until(broadcastTime)
|
||||
time.AfterFunc(waitingTime, func() {
|
||||
// Return early if the context was canceled during the waiting time.
|
||||
if err := ctx.Err(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.broadcastMissingDataColumnSidecars(slot, proposerIndex, root, timeIntoSlot); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast missing data column sidecars")
|
||||
}
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) broadcastMissingDataColumnSidecars(
|
||||
slot primitives.Slot,
|
||||
proposerIndex primitives.ValidatorIndex,
|
||||
root [fieldparams.RootLength]byte,
|
||||
timeIntoSlot time.Duration,
|
||||
) error {
|
||||
// Get the node ID.
|
||||
nodeID := s.cfg.p2p.NodeID()
|
||||
|
||||
// Get the custody group count.
|
||||
custodyGroupCount := s.cfg.custodyInfo.ActualGroupCount()
|
||||
|
||||
// Retrieve the local node info.
|
||||
localNodeInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "peerdas info")
|
||||
}
|
||||
|
||||
// Compute the missing data columns (data columns we should custody but we did not received via gossip.)
|
||||
missingColumns := make([]uint64, 0, len(localNodeInfo.CustodyColumns))
|
||||
for column := range localNodeInfo.CustodyColumns {
|
||||
if !s.hasSeenDataColumnIndex(slot, proposerIndex, column) {
|
||||
missingColumns = append(missingColumns, column)
|
||||
}
|
||||
}
|
||||
|
||||
// Return early if there are no missing data columns.
|
||||
if len(missingColumns) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load from the store the non received but reconstructed data column.
|
||||
verifiedRODataColumnSidecars, err := s.cfg.dataColumnStorage.Get(root, missingColumns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "data column storage get")
|
||||
}
|
||||
|
||||
broadcastedColumns := make([]uint64, 0, len(verifiedRODataColumnSidecars))
|
||||
for _, verifiedRODataColumn := range verifiedRODataColumnSidecars {
|
||||
broadcastedColumns = append(broadcastedColumns, verifiedRODataColumn.Index)
|
||||
// Compute the subnet for this column.
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(verifiedRODataColumn.Index)
|
||||
|
||||
// Broadcast the missing data column.
|
||||
if err := s.cfg.p2p.BroadcastDataColumn(root, subnet, verifiedRODataColumn.DataColumnSidecar); err != nil {
|
||||
log.WithError(err).Error("Broadcast data column")
|
||||
}
|
||||
|
||||
// Now, we can set the data column as seen.
|
||||
s.setSeenDataColumnIndex(slot, proposerIndex, verifiedRODataColumn.Index)
|
||||
}
|
||||
|
||||
if logrus.GetLevel() >= logrus.DebugLevel {
|
||||
// Sort for nice logging.
|
||||
slices.Sort(broadcastedColumns)
|
||||
slices.Sort(missingColumns)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"timeIntoSlot": timeIntoSlot,
|
||||
"missingColumns": missingColumns,
|
||||
"broadcasted": broadcastedColumns,
|
||||
}).Debug("Start broadcasting not seen via gossip but reconstructed data columns")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
191
beacon-chain/sync/data_columns_reconstruct_test.go
Normal file
191
beacon-chain/sync/data_columns_reconstruct_test.go
Normal file
@@ -0,0 +1,191 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
mockChain "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
func TestReconstructDataColumns(t *testing.T) {
|
||||
const blobCount = 4
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
roBlock, _, verifiedRoDataColumns := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
|
||||
require.Equal(t, numberOfColumns, uint64(len(verifiedRoDataColumns)))
|
||||
|
||||
root, block := roBlock.Root(), roBlock.Block()
|
||||
slot, proposerIndex := block.Slot(), block.ProposerIndex()
|
||||
|
||||
minimumCount := peerdas.MinimumColumnsCountToReconstruct()
|
||||
|
||||
t.Run("not enough stored sidecars", func(t *testing.T) {
|
||||
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
err := storage.Save(verifiedRoDataColumns[:minimumCount-1])
|
||||
require.NoError(t, err)
|
||||
|
||||
service := NewService(ctx, WithP2P(p2ptest.NewTestP2P(t)), WithDataColumnStorage(storage))
|
||||
err = service.reconstructSaveBroadcastDataColumnSidecars(ctx, slot, proposerIndex, root)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("all stored sidecars", func(t *testing.T) {
|
||||
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
err := storage.Save(verifiedRoDataColumns)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := NewService(ctx, WithP2P(p2ptest.NewTestP2P(t)), WithDataColumnStorage(storage))
|
||||
err = service.reconstructSaveBroadcastDataColumnSidecars(ctx, slot, proposerIndex, root)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("should reconstruct", func(t *testing.T) {
|
||||
// Here we setup a cgc of 8, which is not realistic, since there is no
|
||||
// real reason for a node to both:
|
||||
// - store enough data column sidecars to enable reconstruction, and
|
||||
// - custody not enough columns to enable reconstruction.
|
||||
// However, for the needs of this test, this is perfectly fine.
|
||||
const cgc = 8
|
||||
|
||||
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
minimumCount := peerdas.MinimumColumnsCountToReconstruct()
|
||||
err := storage.Save(verifiedRoDataColumns[:minimumCount])
|
||||
require.NoError(t, err)
|
||||
|
||||
custodyInfo := &peerdas.CustodyInfo{}
|
||||
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(cgc)
|
||||
custodyInfo.ToAdvertiseGroupCount.Set(cgc)
|
||||
|
||||
service := NewService(
|
||||
ctx,
|
||||
WithP2P(p2ptest.NewTestP2P(t)),
|
||||
WithDataColumnStorage(storage),
|
||||
WithCustodyInfo(custodyInfo),
|
||||
WithChainService(&mockChain.ChainService{}),
|
||||
)
|
||||
|
||||
err = service.reconstructSaveBroadcastDataColumnSidecars(ctx, slot, proposerIndex, root)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := make(map[uint64]bool, minimumCount+cgc)
|
||||
for i := range minimumCount {
|
||||
expected[i] = true
|
||||
}
|
||||
|
||||
// The node should custody these indices.
|
||||
for _, i := range [...]uint64{1, 17, 19, 42, 75, 87, 102, 117} {
|
||||
expected[i] = true
|
||||
}
|
||||
|
||||
summary := storage.Summary(root)
|
||||
actual := summary.Stored()
|
||||
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
for index := range expected {
|
||||
require.Equal(t, true, actual[index])
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestBroadcastMissingDataColumnSidecars(t *testing.T) {
|
||||
const (
|
||||
cgc = 8
|
||||
blobCount = 4
|
||||
timeIntoSlot = 0
|
||||
)
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
ctx := t.Context()
|
||||
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
roBlock, _, verifiedRoDataColumns := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
|
||||
require.Equal(t, numberOfColumns, uint64(len(verifiedRoDataColumns)))
|
||||
|
||||
root, block := roBlock.Root(), roBlock.Block()
|
||||
slot, proposerIndex := block.Slot(), block.ProposerIndex()
|
||||
|
||||
t.Run("no missing sidecars", func(t *testing.T) {
|
||||
custodyInfo := &peerdas.CustodyInfo{}
|
||||
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(cgc)
|
||||
custodyInfo.ToAdvertiseGroupCount.Set(cgc)
|
||||
|
||||
service := NewService(
|
||||
ctx,
|
||||
WithP2P(p2ptest.NewTestP2P(t)),
|
||||
WithCustodyInfo(custodyInfo),
|
||||
)
|
||||
|
||||
for _, index := range [...]uint64{1, 17, 19, 42, 75, 87, 102, 117} {
|
||||
key := computeCacheKey(slot, proposerIndex, index)
|
||||
service.seenDataColumnCache.Add(key, true)
|
||||
}
|
||||
|
||||
err := service.broadcastMissingDataColumnSidecars(slot, proposerIndex, root, timeIntoSlot)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("some missing sidecars", func(t *testing.T) {
|
||||
custodyInfo := &peerdas.CustodyInfo{}
|
||||
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(cgc)
|
||||
custodyInfo.ToAdvertiseGroupCount.Set(cgc)
|
||||
|
||||
toSave := make([]blocks.VerifiedRODataColumn, 0, 2)
|
||||
for _, index := range [...]uint64{42, 87} {
|
||||
toSave = append(toSave, verifiedRoDataColumns[index])
|
||||
}
|
||||
|
||||
p2p := p2ptest.NewTestP2P(t)
|
||||
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
err := storage.Save(toSave)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := NewService(
|
||||
ctx,
|
||||
WithP2P(p2p),
|
||||
WithCustodyInfo(custodyInfo),
|
||||
WithDataColumnStorage(storage),
|
||||
)
|
||||
|
||||
for _, index := range [...]uint64{1, 17, 19, 102, 117} { // 42, 75 and 87 are missing
|
||||
key := computeCacheKey(slot, proposerIndex, index)
|
||||
service.seenDataColumnCache.Add(key, true)
|
||||
}
|
||||
|
||||
for _, index := range [...]uint64{42, 75, 87} {
|
||||
seen := service.hasSeenDataColumnIndex(slot, proposerIndex, index)
|
||||
require.Equal(t, false, seen)
|
||||
}
|
||||
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load())
|
||||
|
||||
err = service.broadcastMissingDataColumnSidecars(slot, proposerIndex, root, timeIntoSlot)
|
||||
require.NoError(t, err)
|
||||
|
||||
seen := service.hasSeenDataColumnIndex(slot, proposerIndex, 75)
|
||||
require.Equal(t, false, seen)
|
||||
|
||||
for _, index := range [...]uint64{42, 87} {
|
||||
seen := service.hasSeenDataColumnIndex(slot, proposerIndex, index)
|
||||
require.Equal(t, true, seen)
|
||||
}
|
||||
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load())
|
||||
|
||||
})
|
||||
}
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/abool"
|
||||
mockChain "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
@@ -46,6 +47,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -81,6 +83,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -125,6 +128,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -167,6 +171,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -211,6 +216,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -255,6 +261,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -274,6 +281,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
}
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlobSidecarsByRangeTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlobSidecarsByRootTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCMetaDataTopicV3+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -89,6 +89,13 @@ var (
|
||||
Buckets: []float64{5, 10, 50, 100, 150, 250, 500, 1000, 2000},
|
||||
},
|
||||
)
|
||||
rpcDataColumnsByRangeResponseLatency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "rpc_data_columns_by_range_response_latency_milliseconds",
|
||||
Help: "Captures total time to respond to rpc DataColumnsByRange requests in a milliseconds distribution",
|
||||
Buckets: []float64{5, 10, 50, 100, 150, 250, 500, 1000, 2000},
|
||||
},
|
||||
)
|
||||
arrivalBlockPropagationHistogram = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "block_arrival_latency_milliseconds",
|
||||
@@ -203,6 +210,19 @@ var (
|
||||
Buckets: []float64{100, 250, 500, 750, 1000, 1500, 2000, 4000, 8000, 12000, 16000},
|
||||
},
|
||||
)
|
||||
|
||||
dataColumnReconstructionCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "beacon_data_availability_reconstructed_columns_total",
|
||||
Help: "Count the number of reconstructed data columns.",
|
||||
})
|
||||
|
||||
dataColumnReconstructionHistogram = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "beacon_data_availability_reconstruction_time_milliseconds",
|
||||
Help: "Captures the time taken to reconstruct data columns.",
|
||||
Buckets: []float64{100, 250, 500, 750, 1000, 1500, 2000, 4000, 8000, 12000, 16000},
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
func (s *Service) updateMetrics() {
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
@@ -198,6 +199,14 @@ func WithAvailableBlocker(avb coverage.AvailableBlocker) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithCustodyInfo for custody info.
|
||||
func WithCustodyInfo(custodyInfo *peerdas.CustodyInfo) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.custodyInfo = custodyInfo
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSlasherEnabled configures the sync package to support slashing detection.
|
||||
func WithSlasherEnabled(enabled bool) Option {
|
||||
return func(s *Service) error {
|
||||
@@ -213,3 +222,11 @@ func WithLightClientStore(lcs *lightClient.Store) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithBatchVerifierLimit sets the maximum number of signatures to batch verify at once.
|
||||
func WithBatchVerifierLimit(limit int) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.batchVerifierLimit = limit
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,6 +40,8 @@ import (
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
var verifierLimit = 1000
|
||||
|
||||
func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := dbtest.SetupDB(t)
|
||||
|
||||
@@ -47,13 +47,18 @@ func newRateLimiter(p2pProvider p2p.P2P) *limiter {
|
||||
allowedBlobsPerSecond := float64(flags.Get().BlobBatchLimit)
|
||||
allowedBlobsBurst := int64(flags.Get().BlobBatchLimitBurstFactor * flags.Get().BlobBatchLimit)
|
||||
|
||||
// Initialize data column limits.
|
||||
allowedDataColumnsPerSecond := float64(flags.Get().DataColumnBatchLimit)
|
||||
allowedDataColumnsBurst := int64(flags.Get().DataColumnBatchLimitBurstFactor * flags.Get().DataColumnBatchLimit)
|
||||
|
||||
// Set topic map for all rpc topics.
|
||||
topicMap := make(map[string]*leakybucket.Collector, len(p2p.RPCTopicMappings))
|
||||
// Goodbye Message
|
||||
topicMap[addEncoding(p2p.RPCGoodByeTopicV1)] = leakybucket.NewCollector(1, 1, leakyBucketPeriod, false /* deleteEmptyBuckets */)
|
||||
// MetadataV0 Message
|
||||
// Metadata Message
|
||||
topicMap[addEncoding(p2p.RPCMetaDataTopicV1)] = leakybucket.NewCollector(1, defaultBurstLimit, leakyBucketPeriod, false /* deleteEmptyBuckets */)
|
||||
topicMap[addEncoding(p2p.RPCMetaDataTopicV2)] = leakybucket.NewCollector(1, defaultBurstLimit, leakyBucketPeriod, false /* deleteEmptyBuckets */)
|
||||
topicMap[addEncoding(p2p.RPCMetaDataTopicV3)] = leakybucket.NewCollector(1, defaultBurstLimit, leakyBucketPeriod, false /* deleteEmptyBuckets */)
|
||||
// Ping Message
|
||||
topicMap[addEncoding(p2p.RPCPingTopicV1)] = leakybucket.NewCollector(1, defaultBurstLimit, leakyBucketPeriod, false /* deleteEmptyBuckets */)
|
||||
// Status Message
|
||||
@@ -67,6 +72,9 @@ func newRateLimiter(p2pProvider p2p.P2P) *limiter {
|
||||
// for BlobSidecarsByRoot and BlobSidecarsByRange
|
||||
blobCollector := leakybucket.NewCollector(allowedBlobsPerSecond, allowedBlobsBurst, blockBucketPeriod, false)
|
||||
|
||||
// for DataColumnSidecarsByRoot and DataColumnSidecarsByRange
|
||||
dataColumnSidecars := leakybucket.NewCollector(allowedDataColumnsPerSecond, allowedDataColumnsBurst, blockBucketPeriod, false)
|
||||
|
||||
// BlocksByRoots requests
|
||||
topicMap[addEncoding(p2p.RPCBlocksByRootTopicV1)] = blockCollector
|
||||
topicMap[addEncoding(p2p.RPCBlocksByRootTopicV2)] = blockCollectorV2
|
||||
@@ -86,6 +94,11 @@ func newRateLimiter(p2pProvider p2p.P2P) *limiter {
|
||||
topicMap[addEncoding(p2p.RPCLightClientOptimisticUpdateTopicV1)] = leakybucket.NewCollector(1, defaultBurstLimit, leakyBucketPeriod, false /* deleteEmptyBuckets */)
|
||||
topicMap[addEncoding(p2p.RPCLightClientFinalityUpdateTopicV1)] = leakybucket.NewCollector(1, defaultBurstLimit, leakyBucketPeriod, false /* deleteEmptyBuckets */)
|
||||
|
||||
// DataColumnSidecarsByRootV1
|
||||
topicMap[addEncoding(p2p.RPCDataColumnSidecarsByRootTopicV1)] = dataColumnSidecars
|
||||
// DataColumnSidecarsByRangeV1
|
||||
topicMap[addEncoding(p2p.RPCDataColumnSidecarsByRangeTopicV1)] = dataColumnSidecars
|
||||
|
||||
// General topic for all rpc requests.
|
||||
topicMap[rpcLimiterTopic] = leakybucket.NewCollector(5, defaultBurstLimit*2, leakyBucketPeriod, false /* deleteEmptyBuckets */)
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
|
||||
func TestNewRateLimiter(t *testing.T) {
|
||||
rlimiter := newRateLimiter(mockp2p.NewTestP2P(t))
|
||||
assert.Equal(t, len(rlimiter.limiterMap), 16, "correct number of topics not registered")
|
||||
assert.Equal(t, len(rlimiter.limiterMap), 19, "correct number of topics not registered")
|
||||
}
|
||||
|
||||
func TestNewRateLimiter_FreeCorrectly(t *testing.T) {
|
||||
|
||||
@@ -42,15 +42,15 @@ func (s *Service) rpcHandlerByTopicFromFork(forkIndex int) (map[string]rpcHandle
|
||||
// Fulu: https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/p2p-interface.md#messages
|
||||
if forkIndex >= version.Fulu {
|
||||
return map[string]rpcHandler{
|
||||
p2p.RPCStatusTopicV1: s.statusRPCHandler,
|
||||
p2p.RPCGoodByeTopicV1: s.goodbyeRPCHandler,
|
||||
p2p.RPCBlocksByRangeTopicV2: s.beaconBlocksByRangeRPCHandler,
|
||||
p2p.RPCBlocksByRootTopicV2: s.beaconBlocksRootRPCHandler,
|
||||
p2p.RPCPingTopicV1: s.pingHandler,
|
||||
p2p.RPCMetaDataTopicV3: s.metaDataHandler, // Modified in Fulu
|
||||
p2p.RPCBlobSidecarsByRootTopicV1: s.blobSidecarByRootRPCHandler,
|
||||
p2p.RPCBlobSidecarsByRangeTopicV1: s.blobSidecarsByRangeRPCHandler,
|
||||
p2p.RPCDataColumnSidecarsByRootTopicV1: s.dataColumnSidecarByRootRPCHandler, // Added in Fulu
|
||||
p2p.RPCGoodByeTopicV1: s.goodbyeRPCHandler,
|
||||
p2p.RPCBlocksByRangeTopicV2: s.beaconBlocksByRangeRPCHandler,
|
||||
p2p.RPCBlocksByRootTopicV2: s.beaconBlocksRootRPCHandler,
|
||||
p2p.RPCPingTopicV1: s.pingHandler,
|
||||
p2p.RPCMetaDataTopicV3: s.metaDataHandler, // Modified in Fulu
|
||||
p2p.RPCBlobSidecarsByRootTopicV1: s.blobSidecarByRootRPCHandler,
|
||||
p2p.RPCBlobSidecarsByRangeTopicV1: s.blobSidecarsByRangeRPCHandler,
|
||||
p2p.RPCDataColumnSidecarsByRootTopicV1: s.dataColumnSidecarByRootRPCHandler, // Added in Fulu
|
||||
p2p.RPCDataColumnSidecarsByRangeTopicV1: s.dataColumnSidecarsByRangeRPCHandler, // Added in Fulu
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
218
beacon-chain/sync/rpc_data_column_sidecars_by_range.go
Normal file
218
beacon-chain/sync/rpc_data_column_sidecars_by_range.go
Normal file
@@ -0,0 +1,218 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
p2ptypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// We count a single request as a single rate limiting amount, regardless of the number of columns requested.
|
||||
const rateLimitingAmount = 1
|
||||
|
||||
var notDataColumnsByRangeIdentifiersError = errors.New("not data columns by range identifiers")
|
||||
|
||||
// dataColumnSidecarsByRangeRPCHandler looks up the request data columns from the database from a given start slot index
|
||||
func (s *Service) dataColumnSidecarsByRangeRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error {
|
||||
ctx, span := trace.StartSpan(ctx, "sync.DataColumnSidecarsByRangeHandler")
|
||||
defer span.End()
|
||||
|
||||
// Check if the message type is the one expected.
|
||||
request, ok := msg.(*pb.DataColumnSidecarsByRangeRequest)
|
||||
if !ok {
|
||||
return notDataColumnsByRangeIdentifiersError
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, respTimeout)
|
||||
defer cancel()
|
||||
|
||||
SetRPCStreamDeadlines(stream)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
maxRequestDataColumnSidecars := beaconConfig.MaxRequestDataColumnSidecars
|
||||
remotePeer := stream.Conn().RemotePeer()
|
||||
|
||||
requestedColumns := request.Columns
|
||||
|
||||
// Format log fields.
|
||||
var requestedColumnsLog interface{} = "all"
|
||||
if uint64(len(requestedColumns)) != beaconConfig.NumberOfColumns {
|
||||
requestedColumnsLog = requestedColumns
|
||||
}
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"remotePeer": remotePeer,
|
||||
"requestedColumns": requestedColumnsLog,
|
||||
"startSlot": request.StartSlot,
|
||||
"count": request.Count,
|
||||
})
|
||||
|
||||
// Validate the request regarding rate limiting.
|
||||
if err := s.rateLimiter.validateRequest(stream, rateLimitingAmount); err != nil {
|
||||
return errors.Wrap(err, "rate limiter validate request")
|
||||
}
|
||||
|
||||
// Validate the request regarding its parameters.
|
||||
rangeParameters, err := validateDataColumnsByRange(request, s.cfg.chain.CurrentSlot())
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(remotePeer)
|
||||
tracing.AnnotateError(span, err)
|
||||
return errors.Wrap(err, "validate data columns by range")
|
||||
}
|
||||
if rangeParameters == nil {
|
||||
log.Debug("No data columns by range to serve")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Debug("Serving data columns by range request")
|
||||
|
||||
// Ticker to stagger out large requests.
|
||||
ticker := time.NewTicker(time.Second)
|
||||
|
||||
batcher, err := newBlockRangeBatcher(*rangeParameters, s.cfg.beaconDB, s.rateLimiter, s.cfg.chain.IsCanonical, ticker)
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, err)
|
||||
return errors.Wrap(err, "new block range batcher")
|
||||
}
|
||||
|
||||
// Derive the wanted columns for the request.
|
||||
wantedColumns := make([]uint64, len(request.Columns))
|
||||
copy(wantedColumns, request.Columns)
|
||||
|
||||
// Sort the wanted columns.
|
||||
slices.Sort(wantedColumns)
|
||||
|
||||
var batch blockBatch
|
||||
for batch, ok = batcher.next(ctx, stream); ok; batch, ok = batcher.next(ctx, stream) {
|
||||
batchStart := time.Now()
|
||||
maxRequestDataColumnSidecars, err = s.streamDataColumnBatch(ctx, batch, maxRequestDataColumnSidecars, wantedColumns, stream)
|
||||
rpcDataColumnsByRangeResponseLatency.Observe(float64(time.Since(batchStart).Milliseconds()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Once the quota is reached, we're done serving the request.
|
||||
if maxRequestDataColumnSidecars == 0 {
|
||||
log.WithField("initialQuota", beaconConfig.MaxRequestDataColumnSidecars).Debug("Reached quota for data column sidecars by range request")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err := batch.error(); err != nil {
|
||||
log.WithError(err).Debug("error in DataColumnSidecarsByRange batch")
|
||||
|
||||
// If we hit a rate limit, the error response has already been written, and the stream is already closed.
|
||||
if !errors.Is(err, p2ptypes.ErrRateLimited) {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
}
|
||||
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
closeStream(stream, log)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) streamDataColumnBatch(ctx context.Context, batch blockBatch, quota uint64, wantedDataColumnIndices []uint64, stream libp2pcore.Stream) (uint64, error) {
|
||||
_, span := trace.StartSpan(ctx, "sync.streamDataColumnBatch")
|
||||
defer span.End()
|
||||
|
||||
// Defensive check to guard against underflow.
|
||||
if quota == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Loop over the blocks in the batch.
|
||||
for _, block := range batch.canonical() {
|
||||
// Get the block blockRoot.
|
||||
blockRoot := block.Root()
|
||||
|
||||
// Retrieve the data column sidecars from the store.
|
||||
verifiedRODataColumns, err := s.cfg.dataColumnStorage.Get(blockRoot, wantedDataColumnIndices)
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
return quota, errors.Wrapf(err, "get data column sidecars: block root %#x", blockRoot)
|
||||
}
|
||||
|
||||
// Write the retrieved sidecars to the stream.
|
||||
for _, verifiedRODataColumn := range verifiedRODataColumns {
|
||||
sidecar := verifiedRODataColumn.DataColumnSidecar
|
||||
SetStreamWriteDeadline(stream, defaultWriteDuration)
|
||||
|
||||
if err := WriteDataColumnSidecarChunk(stream, s.cfg.chain, s.cfg.p2p.Encoding(), sidecar); err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, err)
|
||||
return quota, errors.Wrap(err, "write data column sidecar chunk")
|
||||
}
|
||||
|
||||
s.rateLimiter.add(stream, rateLimitingAmount)
|
||||
quota -= 1
|
||||
|
||||
// Stop streaming results once the quota of writes for the request is consumed.
|
||||
if quota == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return quota, nil
|
||||
}
|
||||
|
||||
func validateDataColumnsByRange(request *pb.DataColumnSidecarsByRangeRequest, currentSlot primitives.Slot) (*rangeParams, error) {
|
||||
startSlot, count := request.StartSlot, request.Count
|
||||
|
||||
if count == 0 {
|
||||
return nil, errors.Wrap(p2ptypes.ErrInvalidRequest, "invalid request count parameter")
|
||||
}
|
||||
|
||||
endSlot, err := request.StartSlot.SafeAdd(count - 1)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(p2ptypes.ErrInvalidRequest, "overflow start + count -1")
|
||||
}
|
||||
|
||||
// Peers may overshoot the current slot when in initial sync,
|
||||
// so we don't want to penalize them by treating the request as an error.
|
||||
if startSlot > currentSlot {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Compute the oldest slot we'll allow a peer to request, based on the current slot.
|
||||
minStartSlot, err := dataColumnsRPCMinValidSlot(currentSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(p2ptypes.ErrInvalidRequest, "data columns RPC min valid slot")
|
||||
}
|
||||
|
||||
// Return early if there is nothing to serve.
|
||||
if endSlot < minStartSlot {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Do not serve sidecars for slots before the minimum valid slot or after the current slot.
|
||||
startSlot = max(startSlot, minStartSlot)
|
||||
endSlot = min(endSlot, currentSlot)
|
||||
|
||||
sizeMinusOne, err := endSlot.SafeSub(uint64(startSlot))
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("overflow end - start: %d - %d - should never happen", endSlot, startSlot)
|
||||
}
|
||||
|
||||
size, err := sizeMinusOne.SafeAdd(1)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(p2ptypes.ErrInvalidRequest, "overflow end - start + 1")
|
||||
}
|
||||
|
||||
rangeParameters := &rangeParams{start: startSlot, end: endSlot, size: uint64(size)}
|
||||
return rangeParameters, nil
|
||||
}
|
||||
301
beacon-chain/sync/rpc_data_column_sidecars_by_range_test.go
Normal file
301
beacon-chain/sync/rpc_data_column_sidecars_by_range_test.go
Normal file
@@ -0,0 +1,301 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
chainMock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
consensusblocks "github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
func TestDataColumnSidecarsByRangeRPCHandler(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
t.Run("wrong message type", func(t *testing.T) {
|
||||
service := &Service{}
|
||||
err := service.dataColumnSidecarsByRangeRPCHandler(ctx, nil, nil)
|
||||
require.ErrorIs(t, err, notDataColumnsByRangeIdentifiersError)
|
||||
})
|
||||
|
||||
t.Run("invalid request", func(t *testing.T) {
|
||||
slot := primitives.Slot(400)
|
||||
|
||||
localP2P, remoteP2P := p2ptest.NewTestP2P(t), p2ptest.NewTestP2P(t)
|
||||
|
||||
service := &Service{
|
||||
cfg: &config{
|
||||
p2p: localP2P,
|
||||
chain: &chainMock.ChainService{
|
||||
Slot: &slot,
|
||||
},
|
||||
},
|
||||
rateLimiter: newRateLimiter(localP2P),
|
||||
}
|
||||
|
||||
protocolID := protocol.ID(fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRangeTopicV1))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
code, _, err := readStatusCodeNoDeadline(stream, localP2P.Encoding())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, responseCodeInvalidRequest, code)
|
||||
})
|
||||
|
||||
localP2P.Connect(remoteP2P)
|
||||
stream, err := localP2P.BHost.NewStream(ctx, remoteP2P.BHost.ID(), protocolID)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg := &pb.DataColumnSidecarsByRangeRequest{
|
||||
Count: 0, // Invalid count
|
||||
}
|
||||
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) >= 0)
|
||||
|
||||
err = service.dataColumnSidecarsByRangeRPCHandler(ctx, msg, stream)
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) < 0)
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
|
||||
slot := primitives.Slot(400)
|
||||
|
||||
params := []util.DataColumnParam{
|
||||
{Slot: 10, Index: 1}, {Slot: 10, Index: 2}, {Slot: 10, Index: 3},
|
||||
{Slot: 40, Index: 4}, {Slot: 40, Index: 6},
|
||||
{Slot: 45, Index: 7}, {Slot: 45, Index: 8}, {Slot: 45, Index: 9},
|
||||
}
|
||||
|
||||
_, verifiedRODataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, params)
|
||||
|
||||
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
err := storage.Save(verifiedRODataColumns)
|
||||
require.NoError(t, err)
|
||||
|
||||
localP2P, remoteP2P := p2ptest.NewTestP2P(t), p2ptest.NewTestP2P(t)
|
||||
protocolID := protocol.ID(fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRangeTopicV1))
|
||||
|
||||
roots := [][fieldparams.RootLength]byte{
|
||||
verifiedRODataColumns[0].BlockRoot(),
|
||||
verifiedRODataColumns[3].BlockRoot(),
|
||||
verifiedRODataColumns[5].BlockRoot(),
|
||||
}
|
||||
|
||||
slots := []primitives.Slot{
|
||||
verifiedRODataColumns[0].Slot(),
|
||||
verifiedRODataColumns[3].Slot(),
|
||||
verifiedRODataColumns[5].Slot(),
|
||||
}
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
roBlocks := make([]blocks.ROBlock, 0, len(roots))
|
||||
for i := range 3 {
|
||||
signedBeaconBlockPb := util.NewBeaconBlock()
|
||||
signedBeaconBlockPb.Block.Slot = slots[i]
|
||||
if i != 0 {
|
||||
signedBeaconBlockPb.Block.ParentRoot = roots[i-1][:]
|
||||
}
|
||||
|
||||
signedBeaconBlock, err := consensusblocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// There is a discrepancy between the root of the beacon block and the rodata column root,
|
||||
// but for the sake of this test, we actually don't care.
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(signedBeaconBlock, roots[i])
|
||||
require.NoError(t, err)
|
||||
|
||||
roBlocks = append(roBlocks, roblock)
|
||||
}
|
||||
|
||||
err = beaconDB.SaveROBlocks(ctx, roBlocks, false /*cache*/)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{
|
||||
cfg: &config{
|
||||
p2p: localP2P,
|
||||
beaconDB: beaconDB,
|
||||
chain: &chainMock.ChainService{
|
||||
Slot: &slot,
|
||||
},
|
||||
dataColumnStorage: storage,
|
||||
},
|
||||
rateLimiter: newRateLimiter(localP2P),
|
||||
}
|
||||
|
||||
ctxMap := ContextByteVersions{
|
||||
[4]byte{245, 165, 253, 66}: version.Fulu,
|
||||
}
|
||||
|
||||
root0 := verifiedRODataColumns[0].BlockRoot()
|
||||
root3 := verifiedRODataColumns[3].BlockRoot()
|
||||
root5 := verifiedRODataColumns[5].BlockRoot()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
|
||||
sidecars := make([]*blocks.RODataColumn, 0, 5)
|
||||
|
||||
for i := uint64(0); ; /* no stop condition */ i++ {
|
||||
sidecar, err := readChunkedDataColumnSidecar(stream, remoteP2P, ctxMap)
|
||||
if errors.Is(err, io.EOF) {
|
||||
// End of stream.
|
||||
break
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
|
||||
require.Equal(t, 8, len(sidecars))
|
||||
require.Equal(t, root0, sidecars[0].BlockRoot())
|
||||
require.Equal(t, root0, sidecars[1].BlockRoot())
|
||||
require.Equal(t, root0, sidecars[2].BlockRoot())
|
||||
require.Equal(t, root3, sidecars[3].BlockRoot())
|
||||
require.Equal(t, root3, sidecars[4].BlockRoot())
|
||||
require.Equal(t, root5, sidecars[5].BlockRoot())
|
||||
require.Equal(t, root5, sidecars[6].BlockRoot())
|
||||
require.Equal(t, root5, sidecars[7].BlockRoot())
|
||||
|
||||
require.Equal(t, uint64(1), sidecars[0].Index)
|
||||
require.Equal(t, uint64(2), sidecars[1].Index)
|
||||
require.Equal(t, uint64(3), sidecars[2].Index)
|
||||
require.Equal(t, uint64(4), sidecars[3].Index)
|
||||
require.Equal(t, uint64(6), sidecars[4].Index)
|
||||
require.Equal(t, uint64(7), sidecars[5].Index)
|
||||
require.Equal(t, uint64(8), sidecars[6].Index)
|
||||
require.Equal(t, uint64(9), sidecars[7].Index)
|
||||
})
|
||||
|
||||
localP2P.Connect(remoteP2P)
|
||||
stream, err := localP2P.BHost.NewStream(ctx, remoteP2P.BHost.ID(), protocolID)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg := &pb.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: 5,
|
||||
Count: 50,
|
||||
Columns: []uint64{1, 2, 3, 4, 6, 7, 8, 9, 10},
|
||||
}
|
||||
|
||||
err = service.dataColumnSidecarsByRangeRPCHandler(ctx, msg, stream)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestValidateDataColumnsByRange(t *testing.T) {
|
||||
maxUint := primitives.Slot(math.MaxUint64)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.FuluForkEpoch = 10
|
||||
config.MinEpochsForDataColumnSidecarsRequest = 4096
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
startSlot primitives.Slot
|
||||
count uint64
|
||||
currentSlot primitives.Slot
|
||||
expected *rangeParams
|
||||
expectErr bool
|
||||
errContains string
|
||||
}{
|
||||
{
|
||||
name: "zero count returns error",
|
||||
count: 0,
|
||||
expectErr: true,
|
||||
errContains: "invalid request count parameter",
|
||||
},
|
||||
{
|
||||
name: "overflow in addition returns error",
|
||||
startSlot: maxUint - 5,
|
||||
count: 10,
|
||||
currentSlot: maxUint,
|
||||
expectErr: true,
|
||||
errContains: "overflow start + count -1",
|
||||
},
|
||||
{
|
||||
name: "start greater than current returns nil",
|
||||
startSlot: 150,
|
||||
count: 10,
|
||||
currentSlot: 100,
|
||||
expected: nil,
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "end slot greater than min start slot returns nil",
|
||||
startSlot: 150,
|
||||
count: 100,
|
||||
currentSlot: 300,
|
||||
expected: nil,
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "range within limits",
|
||||
startSlot: 350,
|
||||
count: 10,
|
||||
currentSlot: 400,
|
||||
expected: &rangeParams{start: 350, end: 359, size: 10},
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "range exceeds limits",
|
||||
startSlot: 0,
|
||||
count: 10_000,
|
||||
currentSlot: 400,
|
||||
expected: &rangeParams{start: 320, end: 400, size: 81},
|
||||
expectErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
request := &pb.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: tc.startSlot,
|
||||
Count: tc.count,
|
||||
}
|
||||
|
||||
rangeParameters, err := validateDataColumnsByRange(request, tc.currentSlot)
|
||||
if tc.expectErr {
|
||||
require.ErrorContains(t, err, tc.errContains)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expected, rangeParameters)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -98,12 +99,15 @@ func (s *Service) dataColumnSidecarByRootRPCHandler(ctx context.Context, msg int
|
||||
log.Debug("Serving data column sidecar by root request")
|
||||
|
||||
count := 0
|
||||
for root, columns := range requestedColumnsByRoot {
|
||||
for _, ident := range requestedColumnIdents {
|
||||
if err := ctx.Err(); err != nil {
|
||||
closeStream(stream, log)
|
||||
return errors.Wrap(err, "context error")
|
||||
}
|
||||
|
||||
root := bytesutil.ToBytes32(ident.BlockRoot)
|
||||
columns := ident.Columns
|
||||
|
||||
// Throttle request processing to no more than batchSize/sec.
|
||||
for range columns {
|
||||
if ticker != nil && count != 0 && count%batchSize == 0 {
|
||||
@@ -170,5 +174,10 @@ func dataColumnsRPCMinValidSlot(currentSlot primitives.Slot) (primitives.Slot, e
|
||||
minStartEpoch = currEpoch - minReqEpochs
|
||||
}
|
||||
|
||||
return slots.EpochStart(minStartEpoch)
|
||||
epochStart, err := slots.EpochStart(minStartEpoch)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "epoch start for epoch %d", minStartEpoch)
|
||||
}
|
||||
|
||||
return epochStart, nil
|
||||
}
|
||||
|
||||
@@ -139,7 +139,7 @@ func TestDataColumnSidecarsByRootRPCHandler(t *testing.T) {
|
||||
sidecars := make([]*blocks.RODataColumn, 0, 5)
|
||||
|
||||
for i := uint64(0); ; /* no stop condition */ i++ {
|
||||
sidecar, err := readChunkedDataColumnSideCar(stream, remoteP2P, ctxMap)
|
||||
sidecar, err := readChunkedDataColumnSidecar(stream, remoteP2P, ctxMap)
|
||||
if errors.Is(err, io.EOF) {
|
||||
// End of stream.
|
||||
break
|
||||
@@ -276,21 +276,28 @@ func TestDataColumnsRPCMinValidSlot(t *testing.T) {
|
||||
expected: primitives.Slot(math.MaxUint64),
|
||||
},
|
||||
{
|
||||
name: "Current epoch equals fork epoch",
|
||||
name: "Current epoch is before fulu fork epoch",
|
||||
fuluForkEpoch: 10,
|
||||
minReqEpochs: 5,
|
||||
currentSlot: primitives.Slot(8 * slotsPerEpoch),
|
||||
expected: primitives.Slot(10 * slotsPerEpoch),
|
||||
},
|
||||
{
|
||||
name: "Current epoch is fulu fork epoch",
|
||||
fuluForkEpoch: 10,
|
||||
minReqEpochs: 5,
|
||||
currentSlot: primitives.Slot(10 * slotsPerEpoch),
|
||||
expected: primitives.Slot(10 * slotsPerEpoch),
|
||||
},
|
||||
{
|
||||
name: "Current epoch less than minReqEpochs",
|
||||
name: "Current epoch between fulu fork epoch and minReqEpochs",
|
||||
fuluForkEpoch: 10,
|
||||
minReqEpochs: 20,
|
||||
currentSlot: primitives.Slot(15 * slotsPerEpoch),
|
||||
expected: primitives.Slot(10 * slotsPerEpoch),
|
||||
},
|
||||
{
|
||||
name: "Current epoch greater than minReqEpochs + fork epoch",
|
||||
name: "Current epoch after fulu fork epoch + minReqEpochs",
|
||||
fuluForkEpoch: 10,
|
||||
minReqEpochs: 5,
|
||||
currentSlot: primitives.Slot(20 * slotsPerEpoch),
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
)
|
||||
|
||||
// metaDataHandler reads the incoming metadata rpc request from the peer.
|
||||
// metaDataHandler reads the incoming metadata RPC request from the peer.
|
||||
func (s *Service) metaDataHandler(_ context.Context, _ interface{}, stream libp2pcore.Stream) error {
|
||||
SetRPCStreamDeadlines(stream)
|
||||
|
||||
@@ -70,7 +70,9 @@ func (s *Service) metaDataHandler(_ context.Context, _ interface{}, stream libp2
|
||||
switch streamVersion {
|
||||
case p2p.SchemaVersionV1:
|
||||
switch metadataVersion {
|
||||
case version.Altair, version.Deneb:
|
||||
case version.Altair, version.Fulu:
|
||||
// If the stream version corresponds to Phase 0 but our metadata
|
||||
// corresponds to Altair or Fulu, convert our metadata to the Phase 0 one.
|
||||
metadata = wrapper.WrappedMetadataV0(
|
||||
&pb.MetaDataV0{
|
||||
Attnets: metadata.AttnetsBitfield(),
|
||||
@@ -81,13 +83,18 @@ func (s *Service) metaDataHandler(_ context.Context, _ interface{}, stream libp2
|
||||
case p2p.SchemaVersionV2:
|
||||
switch metadataVersion {
|
||||
case version.Phase0:
|
||||
// If the stream version corresponds to Altair but our metadata
|
||||
// corresponds to Phase 0, convert our metadata to the Altair one,
|
||||
// and use a zeroed syncnets bitfield.
|
||||
metadata = wrapper.WrappedMetadataV1(
|
||||
&pb.MetaDataV1{
|
||||
Attnets: metadata.AttnetsBitfield(),
|
||||
SeqNumber: metadata.SequenceNumber(),
|
||||
Syncnets: bitfield.Bitvector4{byte(0x00)},
|
||||
})
|
||||
case version.Deneb:
|
||||
case version.Fulu:
|
||||
// If the stream version corresponds to Altair but our metadata
|
||||
// corresponds to Fulu, convert our metadata to the Altair one.
|
||||
metadata = wrapper.WrappedMetadataV1(
|
||||
&pb.MetaDataV1{
|
||||
Attnets: metadata.AttnetsBitfield(),
|
||||
@@ -95,6 +102,32 @@ func (s *Service) metaDataHandler(_ context.Context, _ interface{}, stream libp2
|
||||
Syncnets: metadata.SyncnetsBitfield(),
|
||||
})
|
||||
}
|
||||
|
||||
case p2p.SchemaVersionV3:
|
||||
switch metadataVersion {
|
||||
case version.Phase0:
|
||||
// If the stream version corresponds to Fulu but our metadata
|
||||
// corresponds to Phase 0, convert our metadata to the Fulu one,
|
||||
// and use a zeroed syncnets bitfield and custody group count.
|
||||
metadata = wrapper.WrappedMetadataV2(
|
||||
&pb.MetaDataV2{
|
||||
Attnets: metadata.AttnetsBitfield(),
|
||||
SeqNumber: metadata.SequenceNumber(),
|
||||
Syncnets: bitfield.Bitvector4{byte(0x00)},
|
||||
CustodyGroupCount: 0,
|
||||
})
|
||||
case version.Altair:
|
||||
// If the stream version corresponds to Fulu but our metadata
|
||||
// corresponds to Altair, convert our metadata to the Fulu one and
|
||||
// use a zeroed custody group count.
|
||||
metadata = wrapper.WrappedMetadataV2(
|
||||
&pb.MetaDataV2{
|
||||
Attnets: metadata.AttnetsBitfield(),
|
||||
SeqNumber: metadata.SequenceNumber(),
|
||||
Syncnets: metadata.SyncnetsBitfield(),
|
||||
CustodyGroupCount: 0,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Write the METADATA response into the stream.
|
||||
@@ -164,12 +197,14 @@ func (s *Service) sendMetaDataRequest(ctx context.Context, peerID peer.ID) (meta
|
||||
}
|
||||
|
||||
// Defensive check to ensure valid objects are being sent.
|
||||
topicVersion := ""
|
||||
var topicVersion string
|
||||
switch msg.Version() {
|
||||
case version.Phase0:
|
||||
topicVersion = p2p.SchemaVersionV1
|
||||
case version.Altair:
|
||||
topicVersion = p2p.SchemaVersionV2
|
||||
case version.Fulu:
|
||||
topicVersion = p2p.SchemaVersionV3
|
||||
}
|
||||
|
||||
// Validate the version of the topic.
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -15,6 +16,7 @@ import (
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/ssz/equality"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -22,6 +24,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
)
|
||||
|
||||
func TestMetaDataRPCHandler_ReceivesMetadata(t *testing.T) {
|
||||
@@ -76,158 +79,241 @@ func TestMetaDataRPCHandler_ReceivesMetadata(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetadataRPCHandler_SendsMetadata(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
bitfield := [8]byte{'A', 'B'}
|
||||
p2.LocalMetadata = wrapper.WrappedMetadataV0(&pb.MetaDataV0{
|
||||
SeqNumber: 2,
|
||||
Attnets: bitfield[:],
|
||||
})
|
||||
|
||||
// Set up a head state in the database with data we expect.
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
d := db.SetupDB(t)
|
||||
r := &Service{
|
||||
func createService(peer p2p.P2P, chain *mock.ChainService) *Service {
|
||||
return &Service{
|
||||
cfg: &config{
|
||||
beaconDB: d,
|
||||
p2p: p1,
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
p2p: peer,
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
},
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
|
||||
r2 := &Service{
|
||||
cfg: &config{
|
||||
beaconDB: d,
|
||||
p2p: p2,
|
||||
chain: &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
rateLimiter: newRateLimiter(p2),
|
||||
}
|
||||
|
||||
// Setup streams
|
||||
pcl := protocol.ID(p2p.RPCMetaDataTopicV1 + r.cfg.p2p.Encoding().ProtocolSuffix())
|
||||
topic := string(pcl)
|
||||
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(1, 1, time.Second, false)
|
||||
r2.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(1, 1, time.Second, false)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
assert.NoError(t, r2.metaDataHandler(t.Context(), new(interface{}), stream))
|
||||
})
|
||||
|
||||
md, err := r.sendMetaDataRequest(t.Context(), p2.BHost.ID())
|
||||
assert.NoError(t, err)
|
||||
|
||||
if !equality.DeepEqual(md.InnerObject(), p2.LocalMetadata.InnerObject()) {
|
||||
t.Fatalf("MetadataV0 unequal, received %v but wanted %v", md, p2.LocalMetadata)
|
||||
}
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
|
||||
conns := p1.BHost.Network().ConnsToPeer(p2.BHost.ID())
|
||||
if len(conns) == 0 {
|
||||
t.Error("Peer is disconnected despite receiving a valid ping")
|
||||
rateLimiter: newRateLimiter(peer),
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetadataRPCHandler_SendsMetadataAltair(t *testing.T) {
|
||||
func TestMetadataRPCHandler_SendMetadataRequest(t *testing.T) {
|
||||
const (
|
||||
requestTimeout = 1 * time.Second
|
||||
seqNumber = 2
|
||||
custodyGroupCount = 4
|
||||
)
|
||||
|
||||
attnets := []byte{'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'}
|
||||
syncnets := []byte{0x4}
|
||||
|
||||
// Configure the test beacon chain.
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bCfg := params.BeaconConfig().Copy()
|
||||
bCfg.AltairForkEpoch = 5
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
beaconChainConfig := params.BeaconConfig().Copy()
|
||||
beaconChainConfig.AltairForkEpoch = 5
|
||||
beaconChainConfig.FuluForkEpoch = 15
|
||||
params.OverrideBeaconConfig(beaconChainConfig)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
bitfield := [8]byte{'A', 'B'}
|
||||
p2.LocalMetadata = wrapper.WrappedMetadataV0(&pb.MetaDataV0{
|
||||
SeqNumber: 2,
|
||||
Attnets: bitfield[:],
|
||||
})
|
||||
// Compute the number of seconds in an epoch.
|
||||
secondsPerEpoch := oneEpoch()
|
||||
|
||||
// Set up a head state in the database with data we expect.
|
||||
d := db.SetupDB(t)
|
||||
chain := &mock.ChainService{Genesis: time.Now().Add(-5 * oneEpoch()), ValidatorsRoot: [32]byte{}}
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
beaconDB: d,
|
||||
p2p: p1,
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
testCases := []struct {
|
||||
name string
|
||||
topic string
|
||||
epochsSinceGenesisPeer1, epochsSinceGenesisPeer2 int
|
||||
metadataPeer2, expected metadata.Metadata
|
||||
}{
|
||||
{
|
||||
name: "Phase0-Phase0",
|
||||
topic: p2p.RPCMetaDataTopicV1,
|
||||
epochsSinceGenesisPeer1: 0,
|
||||
epochsSinceGenesisPeer2: 0,
|
||||
metadataPeer2: wrapper.WrappedMetadataV0(&pb.MetaDataV0{
|
||||
SeqNumber: seqNumber,
|
||||
Attnets: attnets,
|
||||
}),
|
||||
expected: wrapper.WrappedMetadataV0(&pb.MetaDataV0{
|
||||
SeqNumber: seqNumber,
|
||||
Attnets: attnets,
|
||||
}),
|
||||
},
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
|
||||
chain2 := &mock.ChainService{Genesis: time.Now().Add(-5 * oneEpoch()), ValidatorsRoot: [32]byte{}}
|
||||
r2 := &Service{
|
||||
cfg: &config{
|
||||
beaconDB: d,
|
||||
p2p: p2,
|
||||
chain: chain2,
|
||||
clock: startup.NewClock(chain2.Genesis, chain2.ValidatorsRoot),
|
||||
{
|
||||
name: "Phase0-Altair",
|
||||
topic: p2p.RPCMetaDataTopicV1,
|
||||
epochsSinceGenesisPeer1: 0,
|
||||
epochsSinceGenesisPeer2: 5,
|
||||
metadataPeer2: wrapper.WrappedMetadataV1(&pb.MetaDataV1{
|
||||
SeqNumber: seqNumber,
|
||||
Attnets: attnets,
|
||||
Syncnets: syncnets,
|
||||
}),
|
||||
expected: wrapper.WrappedMetadataV0(&pb.MetaDataV0{
|
||||
SeqNumber: seqNumber,
|
||||
Attnets: attnets,
|
||||
}),
|
||||
},
|
||||
{
|
||||
name: "Phase0-Fulu",
|
||||
topic: p2p.RPCMetaDataTopicV1,
|
||||
epochsSinceGenesisPeer1: 0,
|
||||
epochsSinceGenesisPeer2: 15,
|
||||
metadataPeer2: wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
SeqNumber: seqNumber,
|
||||
Attnets: attnets,
|
||||
Syncnets: syncnets,
|
||||
CustodyGroupCount: custodyGroupCount,
|
||||
}),
|
||||
expected: wrapper.WrappedMetadataV0(&pb.MetaDataV0{
|
||||
SeqNumber: seqNumber,
|
||||
Attnets: attnets,
|
||||
}),
|
||||
},
|
||||
{
|
||||
name: "Altair-Phase0",
|
||||
topic: p2p.RPCMetaDataTopicV2,
|
||||
epochsSinceGenesisPeer1: 5,
|
||||
epochsSinceGenesisPeer2: 0,
|
||||
metadataPeer2: wrapper.WrappedMetadataV0(&pb.MetaDataV0{
|
||||
SeqNumber: seqNumber,
|
||||
Attnets: attnets,
|
||||
}),
|
||||
expected: wrapper.WrappedMetadataV1(&pb.MetaDataV1{
|
||||
SeqNumber: seqNumber,
|
||||
Attnets: attnets,
|
||||
Syncnets: bitfield.Bitvector4{byte(0x00)},
|
||||
}),
|
||||
},
|
||||
{
|
||||
name: "Altair-Altair",
|
||||
topic: p2p.RPCMetaDataTopicV2,
|
||||
epochsSinceGenesisPeer1: 5,
|
||||
epochsSinceGenesisPeer2: 5,
|
||||
metadataPeer2: wrapper.WrappedMetadataV1(&pb.MetaDataV1{
|
||||
SeqNumber: seqNumber,
|
||||
Attnets: attnets,
|
||||
Syncnets: syncnets,
|
||||
}),
|
||||
expected: wrapper.WrappedMetadataV1(&pb.MetaDataV1{
|
||||
SeqNumber: seqNumber,
|
||||
Attnets: attnets,
|
||||
Syncnets: syncnets,
|
||||
}),
|
||||
},
|
||||
{
|
||||
name: "Altair-Fulu",
|
||||
topic: p2p.RPCMetaDataTopicV2,
|
||||
epochsSinceGenesisPeer1: 5,
|
||||
epochsSinceGenesisPeer2: 15,
|
||||
metadataPeer2: wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
SeqNumber: seqNumber,
|
||||
Attnets: attnets,
|
||||
Syncnets: syncnets,
|
||||
CustodyGroupCount: custodyGroupCount,
|
||||
}),
|
||||
expected: wrapper.WrappedMetadataV1(&pb.MetaDataV1{
|
||||
SeqNumber: seqNumber,
|
||||
Attnets: attnets,
|
||||
Syncnets: syncnets,
|
||||
}),
|
||||
},
|
||||
{
|
||||
name: "Fulu-Phase0",
|
||||
topic: p2p.RPCMetaDataTopicV3,
|
||||
epochsSinceGenesisPeer1: 15,
|
||||
epochsSinceGenesisPeer2: 0,
|
||||
metadataPeer2: wrapper.WrappedMetadataV0(&pb.MetaDataV0{
|
||||
SeqNumber: seqNumber,
|
||||
Attnets: attnets,
|
||||
}),
|
||||
expected: wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
SeqNumber: seqNumber,
|
||||
Attnets: attnets,
|
||||
Syncnets: bitfield.Bitvector4{byte(0x00)},
|
||||
CustodyGroupCount: 0,
|
||||
}),
|
||||
},
|
||||
{
|
||||
name: "Fulu-Altair",
|
||||
topic: p2p.RPCMetaDataTopicV3,
|
||||
epochsSinceGenesisPeer1: 15,
|
||||
epochsSinceGenesisPeer2: 5,
|
||||
metadataPeer2: wrapper.WrappedMetadataV1(&pb.MetaDataV1{
|
||||
SeqNumber: seqNumber,
|
||||
Attnets: attnets,
|
||||
Syncnets: syncnets,
|
||||
}),
|
||||
expected: wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
SeqNumber: seqNumber,
|
||||
Attnets: attnets,
|
||||
Syncnets: syncnets,
|
||||
CustodyGroupCount: 0,
|
||||
}),
|
||||
},
|
||||
{
|
||||
name: "Fulu-Fulu",
|
||||
topic: p2p.RPCMetaDataTopicV3,
|
||||
epochsSinceGenesisPeer1: 15,
|
||||
epochsSinceGenesisPeer2: 15,
|
||||
metadataPeer2: wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
SeqNumber: seqNumber,
|
||||
Attnets: attnets,
|
||||
Syncnets: syncnets,
|
||||
CustodyGroupCount: custodyGroupCount,
|
||||
}),
|
||||
expected: wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
SeqNumber: seqNumber,
|
||||
Attnets: attnets,
|
||||
Syncnets: syncnets,
|
||||
CustodyGroupCount: custodyGroupCount,
|
||||
}),
|
||||
},
|
||||
rateLimiter: newRateLimiter(p2),
|
||||
}
|
||||
|
||||
// Setup streams
|
||||
pcl := protocol.ID(p2p.RPCMetaDataTopicV2 + r.cfg.p2p.Encoding().ProtocolSuffix())
|
||||
topic := string(pcl)
|
||||
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(2, 2, time.Second, false)
|
||||
r2.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(2, 2, time.Second, false)
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
err := r2.metaDataHandler(t.Context(), new(interface{}), stream)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
ctx := context.Background()
|
||||
|
||||
_, err := r.sendMetaDataRequest(t.Context(), p2.BHost.ID())
|
||||
assert.NoError(t, err)
|
||||
// Setup and connect peers.
|
||||
peer1, peer2 := p2ptest.NewTestP2P(t), p2ptest.NewTestP2P(t)
|
||||
peer1.Connect(peer2)
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
// Ensure the peers are connected.
|
||||
peersCount := len(peer1.BHost.Network().Peers())
|
||||
require.Equal(t, 1, peersCount, "Expected peers to be connected")
|
||||
|
||||
// Fix up peer with the correct metadata.
|
||||
p2.LocalMetadata = wrapper.WrappedMetadataV1(&pb.MetaDataV1{
|
||||
SeqNumber: 2,
|
||||
Attnets: bitfield[:],
|
||||
Syncnets: []byte{0x0},
|
||||
})
|
||||
// Setup sync services.
|
||||
genesisPeer1 := time.Now().Add(-time.Duration(tc.epochsSinceGenesisPeer1) * secondsPerEpoch)
|
||||
genesisPeer2 := time.Now().Add(-time.Duration(tc.epochsSinceGenesisPeer2) * secondsPerEpoch)
|
||||
|
||||
wg.Add(1)
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
assert.NoError(t, r2.metaDataHandler(t.Context(), new(interface{}), stream))
|
||||
})
|
||||
chainPeer1 := &mock.ChainService{Genesis: genesisPeer1, ValidatorsRoot: [32]byte{}}
|
||||
chainPeer2 := &mock.ChainService{Genesis: genesisPeer2, ValidatorsRoot: [32]byte{}}
|
||||
|
||||
md, err := r.sendMetaDataRequest(t.Context(), p2.BHost.ID())
|
||||
assert.NoError(t, err)
|
||||
servicePeer1 := createService(peer1, chainPeer1)
|
||||
servicePeer2 := createService(peer2, chainPeer2)
|
||||
|
||||
if !equality.DeepEqual(md.InnerObject(), p2.LocalMetadata.InnerObject()) {
|
||||
t.Fatalf("MetadataV1 unequal, received %v but wanted %v", md, p2.LocalMetadata)
|
||||
}
|
||||
// Define the behavior of peer2 when receiving a METADATA request.
|
||||
protocolSuffix := servicePeer2.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
protocolID := protocol.ID(tc.topic + protocolSuffix)
|
||||
peer2.LocalMetadata = tc.metadataPeer2
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
wg.Add(1)
|
||||
peer2.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
err := servicePeer2.metaDataHandler(ctx, new(interface{}), stream)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
conns := p1.BHost.Network().ConnsToPeer(p2.BHost.ID())
|
||||
if len(conns) == 0 {
|
||||
t.Error("Peer is disconnected despite receiving a valid ping")
|
||||
// Send a METADATA request from peer1 to peer2.
|
||||
actual, err := servicePeer1.sendMetaDataRequest(ctx, peer2.BHost.ID())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait until the METADATA request is received by peer2 or timeout.
|
||||
timeOutReached := util.WaitTimeout(&wg, requestTimeout)
|
||||
require.Equal(t, false, timeOutReached, "Did not receive METADATA request within timeout")
|
||||
|
||||
// Compare the received METADATA object with the expected METADATA object.
|
||||
require.DeepSSZEqual(t, tc.expected.InnerObject(), actual.InnerObject(), "Metadata unequal")
|
||||
|
||||
// Ensure the peers are still connected.
|
||||
peersCount = len(peer1.BHost.Network().Peers())
|
||||
assert.Equal(t, 1, peersCount, "Expected peers to be connected")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -4,12 +4,14 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"slices"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
p2ptypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -30,18 +32,24 @@ var errBlobUnmarshal = errors.New("Could not unmarshal chunk-encoded blob")
|
||||
// Any error from the following declaration block should result in peer downscoring.
|
||||
var (
|
||||
// ErrInvalidFetchedData is used to signal that an error occurred which should result in peer downscoring.
|
||||
ErrInvalidFetchedData = errors.New("invalid data returned from peer")
|
||||
errBlobIndexOutOfBounds = errors.Wrap(verification.ErrBlobInvalid, "blob index out of range")
|
||||
errMaxRequestBlobSidecarsExceeded = errors.Wrap(verification.ErrBlobInvalid, "peer exceeded req blob chunk tx limit")
|
||||
errChunkResponseSlotNotAsc = errors.Wrap(verification.ErrBlobInvalid, "blob slot not higher than previous block root")
|
||||
errChunkResponseIndexNotAsc = errors.Wrap(verification.ErrBlobInvalid, "blob indices for a block must start at 0 and increase by 1")
|
||||
errUnrequested = errors.Wrap(verification.ErrBlobInvalid, "received BlobSidecar in response that was not requested")
|
||||
errBlobResponseOutOfBounds = errors.Wrap(verification.ErrBlobInvalid, "received BlobSidecar with slot outside BlobSidecarsByRangeRequest bounds")
|
||||
errChunkResponseBlockMismatch = errors.Wrap(verification.ErrBlobInvalid, "blob block details do not match")
|
||||
errChunkResponseParentMismatch = errors.Wrap(verification.ErrBlobInvalid, "parent root for response element doesn't match previous element root")
|
||||
errDataColumnChunkedReadFailure = errors.New("failed to read stream of chunk-encoded data columns")
|
||||
ErrInvalidFetchedData = errors.New("invalid data returned from peer")
|
||||
errBlobIndexOutOfBounds = errors.Wrap(verification.ErrBlobInvalid, "blob index out of range")
|
||||
errMaxRequestBlobSidecarsExceeded = errors.Wrap(verification.ErrBlobInvalid, "peer exceeded req blob chunk tx limit")
|
||||
errChunkResponseSlotNotAsc = errors.Wrap(verification.ErrBlobInvalid, "blob slot not higher than previous block root")
|
||||
errChunkResponseIndexNotAsc = errors.Wrap(verification.ErrBlobInvalid, "blob indices for a block must start at 0 and increase by 1")
|
||||
errUnrequested = errors.Wrap(verification.ErrBlobInvalid, "received BlobSidecar in response that was not requested")
|
||||
errBlobResponseOutOfBounds = errors.Wrap(verification.ErrBlobInvalid, "received BlobSidecar with slot outside BlobSidecarsByRangeRequest bounds")
|
||||
errChunkResponseBlockMismatch = errors.Wrap(verification.ErrBlobInvalid, "blob block details do not match")
|
||||
errChunkResponseParentMismatch = errors.Wrap(verification.ErrBlobInvalid, "parent root for response element doesn't match previous element root")
|
||||
errDataColumnChunkedReadFailure = errors.New("failed to read stream of chunk-encoded data columns")
|
||||
errMaxRequestDataColumnSidecarsExceeded = errors.New("count of requested data column sidecars exceeds MAX_REQUEST_DATA_COLUMN_SIDECARS")
|
||||
errMaxResponseDataColumnSidecarsExceeded = errors.New("peer returned more data column sidecars than requested")
|
||||
)
|
||||
|
||||
// ------
|
||||
// Blocks
|
||||
// ------
|
||||
|
||||
// BeaconBlockProcessor defines a block processing function, which allows to start utilizing
|
||||
// blocks even before all blocks are ready.
|
||||
type BeaconBlockProcessor func(block interfaces.ReadOnlySignedBeaconBlock) error
|
||||
@@ -155,6 +163,14 @@ func SendBeaconBlocksByRootRequest(
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
// -------------
|
||||
// Blob sidecars
|
||||
// -------------
|
||||
|
||||
// BlobResponseValidation represents a function that can validate aspects of a single unmarshaled blob sidecar
|
||||
// that was received from a peer in response to an rpc request.
|
||||
type BlobResponseValidation func(blocks.ROBlob) error
|
||||
|
||||
func SendBlobsByRangeRequest(ctx context.Context, tor blockchain.TemporalOracle, p2pApi p2p.SenderEncoder, pid peer.ID, ctxMap ContextByteVersions, req *ethpb.BlobSidecarsByRangeRequest, bvs ...BlobResponseValidation) ([]blocks.ROBlob, error) {
|
||||
topic, err := p2p.TopicFromMessage(p2p.BlobSidecarsByRangeName, slots.ToEpoch(tor.CurrentSlot()))
|
||||
if err != nil {
|
||||
@@ -216,10 +232,6 @@ func SendBlobSidecarByRoot(
|
||||
return readChunkEncodedBlobs(stream, p2pApi.Encoding(), ctxMap, blobValidatorFromRootReq(req), max)
|
||||
}
|
||||
|
||||
// BlobResponseValidation represents a function that can validate aspects of a single unmarshaled blob
|
||||
// that was received from a peer in response to an rpc request.
|
||||
type BlobResponseValidation func(blocks.ROBlob) error
|
||||
|
||||
func composeBlobValidations(vf ...BlobResponseValidation) BlobResponseValidation {
|
||||
return func(blob blocks.ROBlob) error {
|
||||
for i := range vf {
|
||||
@@ -385,10 +397,258 @@ func readChunkedBlobSidecar(stream network.Stream, encoding encoder.NetworkEncod
|
||||
return rob, nil
|
||||
}
|
||||
|
||||
func readChunkedDataColumnSideCar(
|
||||
// --------------------
|
||||
// Data column sidecars
|
||||
// --------------------
|
||||
|
||||
// SendDataColumnSidecarsByRangeRequest sends a request for data column sidecars by range
|
||||
// and returns the fetched data column sidecars.
|
||||
func SendDataColumnSidecarsByRangeRequest(
|
||||
ctx context.Context,
|
||||
tor blockchain.TemporalOracle,
|
||||
p2pApi p2p.P2P,
|
||||
pid peer.ID,
|
||||
ctxMap ContextByteVersions,
|
||||
request *ethpb.DataColumnSidecarsByRangeRequest,
|
||||
) ([]blocks.RODataColumn, error) {
|
||||
// Return early if nothing to request.
|
||||
if request == nil || request.Count == 0 || len(request.Columns) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfColumns := beaconConfig.NumberOfColumns
|
||||
maxRequestDataColumnSidecars := params.BeaconConfig().MaxRequestDataColumnSidecars
|
||||
|
||||
// Check if we do not request too many sidecars.
|
||||
columnsCount := uint64(len(request.Columns))
|
||||
totalCount := request.Count * columnsCount
|
||||
if totalCount > maxRequestDataColumnSidecars {
|
||||
return nil, errors.Wrapf(errMaxRequestDataColumnSidecarsExceeded, "requestedCount=%d, allowedCount=%d", totalCount, maxRequestDataColumnSidecars)
|
||||
}
|
||||
|
||||
// Build the topic.
|
||||
currentSlot := tor.CurrentSlot()
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
topic, err := p2p.TopicFromMessage(p2p.DataColumnSidecarsByRangeName, currentEpoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "topic from message")
|
||||
}
|
||||
|
||||
// Build the logs.
|
||||
var columnsLog interface{} = "all"
|
||||
if columnsCount < numberOfColumns {
|
||||
columns := request.Columns
|
||||
slices.Sort(columns)
|
||||
columnsLog = columns
|
||||
}
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"peer": pid,
|
||||
"topic": topic,
|
||||
"startSlot": request.StartSlot,
|
||||
"count": request.Count,
|
||||
"columns": columnsLog,
|
||||
"totalCount": totalCount,
|
||||
})
|
||||
|
||||
// Send the request.
|
||||
stream, err := p2pApi.Send(ctx, request, topic, pid)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "p2p send")
|
||||
}
|
||||
defer closeStream(stream, log)
|
||||
|
||||
// Read the data column sidecars from the stream.
|
||||
roDataColumns := make([]blocks.RODataColumn, 0, totalCount)
|
||||
for range totalCount {
|
||||
// Avoid reading extra chunks if the context is done.
|
||||
if err := ctx.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
validatorSlotWithinBounds, err := isSidecarSlotWithinBounds(request)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "is sidecar slot within bounds")
|
||||
}
|
||||
|
||||
roDataColumn, err := readChunkedDataColumnSidecar(
|
||||
stream, p2pApi, ctxMap,
|
||||
validatorSlotWithinBounds,
|
||||
isSidecarIndexRequested(request),
|
||||
)
|
||||
if errors.Is(err, io.EOF) {
|
||||
return roDataColumns, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "read chunked data column sidecar")
|
||||
}
|
||||
|
||||
if roDataColumn == nil {
|
||||
return nil, errors.New("nil data column sidecar, should never happen")
|
||||
}
|
||||
|
||||
roDataColumns = append(roDataColumns, *roDataColumn)
|
||||
}
|
||||
|
||||
// All requested sidecars were delivered by the peer. Expecting EOF.
|
||||
if _, err := readChunkedDataColumnSidecar(stream, p2pApi, ctxMap); !errors.Is(err, io.EOF) {
|
||||
return nil, errors.Wrapf(errMaxResponseDataColumnSidecarsExceeded, "requestedCount=%d", totalCount)
|
||||
}
|
||||
|
||||
return roDataColumns, nil
|
||||
}
|
||||
|
||||
// isSidecarSlotWithinBounds verifies that the slot of the data column sidecar is within the bounds of the request.
|
||||
func isSidecarSlotWithinBounds(request *ethpb.DataColumnSidecarsByRangeRequest) (DataColumnResponseValidation, error) {
|
||||
// endSlot is exclusive (while request.StartSlot is inclusive).
|
||||
endSlot, err := request.StartSlot.SafeAdd(request.Count)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "calculate end slot")
|
||||
}
|
||||
|
||||
validator := func(sidecar blocks.RODataColumn) error {
|
||||
slot := sidecar.Slot()
|
||||
|
||||
if !(request.StartSlot <= slot && slot < endSlot) {
|
||||
return errors.Errorf("data column sidecar slot %d out of range [%d, %d[", slot, request.StartSlot, endSlot)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return validator, nil
|
||||
}
|
||||
|
||||
// isSidecarIndexRequested verifies that the index of the data column sidecar is found in the requested indices.
|
||||
func isSidecarIndexRequested(request *ethpb.DataColumnSidecarsByRangeRequest) DataColumnResponseValidation {
|
||||
requestedIndices := make(map[uint64]bool)
|
||||
for _, col := range request.Columns {
|
||||
requestedIndices[col] = true
|
||||
}
|
||||
|
||||
return func(sidecar blocks.RODataColumn) error {
|
||||
columnIndex := sidecar.Index
|
||||
if !requestedIndices[columnIndex] {
|
||||
return errors.Errorf("data column sidecar index %d not found in requested indices", columnIndex)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// SendDataColumnSidecarsByRootRequest sends a request for data column sidecars by root
|
||||
// and returns the fetched data column sidecars.
|
||||
func SendDataColumnSidecarsByRootRequest(
|
||||
ctx context.Context,
|
||||
tor blockchain.TemporalOracle,
|
||||
p2pApi p2p.P2P,
|
||||
pid peer.ID,
|
||||
ctxMap ContextByteVersions,
|
||||
request p2ptypes.DataColumnsByRootIdentifiers,
|
||||
) ([]blocks.RODataColumn, error) {
|
||||
// Return early if the request is nil.
|
||||
if request == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Compute how many sidecars are requested.
|
||||
count := uint64(0)
|
||||
for _, identifier := range request {
|
||||
count += uint64(len(identifier.Columns))
|
||||
}
|
||||
|
||||
// Return early if nothing to request.
|
||||
if count == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Verify that the request count is within the maximum allowed.
|
||||
maxRequestDataColumnSidecars := params.BeaconConfig().MaxRequestDataColumnSidecars
|
||||
if count > maxRequestDataColumnSidecars {
|
||||
return nil, errors.Wrapf(errMaxRequestDataColumnSidecarsExceeded, "current: %d, max: %d", count, maxRequestDataColumnSidecars)
|
||||
}
|
||||
|
||||
// Get the topic for the request.
|
||||
topic, err := p2p.TopicFromMessage(p2p.DataColumnSidecarsByRootName, slots.ToEpoch(tor.CurrentSlot()))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "topic from message")
|
||||
}
|
||||
|
||||
// Send the request to the peer.
|
||||
stream, err := p2pApi.Send(ctx, request, topic, pid)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "p2p api send")
|
||||
}
|
||||
defer closeStream(stream, log)
|
||||
|
||||
// Read the data column sidecars from the stream.
|
||||
roDataColumns := make([]blocks.RODataColumn, 0, count)
|
||||
|
||||
// Read the data column sidecars from the stream.
|
||||
for range count {
|
||||
roDataColumn, err := readChunkedDataColumnSidecar(stream, p2pApi, ctxMap, isSidecarIndexRootRequested(request))
|
||||
if errors.Is(err, io.EOF) {
|
||||
return roDataColumns, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "read chunked data column sidecar")
|
||||
}
|
||||
|
||||
if roDataColumn == nil {
|
||||
return nil, errors.Wrap(err, "nil data column sidecar, should never happen")
|
||||
}
|
||||
|
||||
roDataColumns = append(roDataColumns, *roDataColumn)
|
||||
}
|
||||
|
||||
// All requested sidecars were delivered by the peer. Expecting EOF.
|
||||
if _, err := readChunkedDataColumnSidecar(stream, p2pApi, ctxMap); !errors.Is(err, io.EOF) {
|
||||
return nil, errors.Wrapf(errMaxResponseDataColumnSidecarsExceeded, "requestedCount=%d", count)
|
||||
}
|
||||
|
||||
return roDataColumns, nil
|
||||
}
|
||||
|
||||
func isSidecarIndexRootRequested(request p2ptypes.DataColumnsByRootIdentifiers) DataColumnResponseValidation {
|
||||
columnsIndexFromRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool)
|
||||
|
||||
for _, sidecar := range request {
|
||||
blockRoot := bytesutil.ToBytes32(sidecar.BlockRoot)
|
||||
if columnsIndexFromRoot[blockRoot] == nil {
|
||||
columnsIndexFromRoot[blockRoot] = make(map[uint64]bool)
|
||||
}
|
||||
|
||||
for _, column := range sidecar.Columns {
|
||||
columnsIndexFromRoot[blockRoot][column] = true
|
||||
}
|
||||
}
|
||||
|
||||
return func(sidecar blocks.RODataColumn) error {
|
||||
root, index := sidecar.BlockRoot(), sidecar.Index
|
||||
indices, ok := columnsIndexFromRoot[root]
|
||||
|
||||
if !ok {
|
||||
return errors.Errorf("root #%x returned by peer but not requested", root)
|
||||
}
|
||||
|
||||
if !indices[index] {
|
||||
return errors.Errorf("index %d for root #%x returned by peer but not requested", index, root)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DataColumnResponseValidation represents a function that can validate aspects of a single unmarshaled data column sidecar
|
||||
// that was received from a peer in response to an rpc request.
|
||||
type DataColumnResponseValidation func(column blocks.RODataColumn) error
|
||||
|
||||
func readChunkedDataColumnSidecar(
|
||||
stream network.Stream,
|
||||
p2pApi p2p.P2P,
|
||||
ctxMap ContextByteVersions,
|
||||
validationFunctions ...DataColumnResponseValidation,
|
||||
) (*blocks.RODataColumn, error) {
|
||||
// Read the status code from the stream.
|
||||
statusCode, errMessage, err := ReadStatusCode(stream, p2pApi.Encoding())
|
||||
@@ -432,5 +692,12 @@ func readChunkedDataColumnSideCar(
|
||||
return nil, errors.Wrap(err, "new read only data column")
|
||||
}
|
||||
|
||||
// Run validation functions.
|
||||
for _, validationFunction := range validationFunctions {
|
||||
if err := validationFunction(roDataColumn); err != nil {
|
||||
return nil, errors.Wrap(err, "validation function")
|
||||
}
|
||||
}
|
||||
|
||||
return &roDataColumn, nil
|
||||
}
|
||||
|
||||
@@ -5,12 +5,15 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
p2pTypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
p2ptypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
@@ -20,6 +23,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -882,3 +886,745 @@ func TestSendBlobsByRangeRequest(t *testing.T) {
|
||||
func TestErrInvalidFetchedDataDistinction(t *testing.T) {
|
||||
require.Equal(t, false, errors.Is(ErrInvalidFetchedData, verification.ErrBlobInvalid))
|
||||
}
|
||||
|
||||
func TestSendDataColumnSidecarsByRangeRequest(t *testing.T) {
|
||||
nilTestCases := []struct {
|
||||
name string
|
||||
request *ethpb.DataColumnSidecarsByRangeRequest
|
||||
}{
|
||||
{
|
||||
name: "nil request",
|
||||
request: nil,
|
||||
},
|
||||
{
|
||||
name: "count is 0",
|
||||
request: ðpb.DataColumnSidecarsByRangeRequest{},
|
||||
},
|
||||
{
|
||||
name: "columns is nil",
|
||||
request: ðpb.DataColumnSidecarsByRangeRequest{Count: 1},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range nilTestCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actual, err := SendDataColumnSidecarsByRangeRequest(t.Context(), nil, nil, "aRandomPID", nil, tc.request)
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, actual)
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("too many columns in request", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.MaxRequestDataColumnSidecars = 0
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
|
||||
request := ðpb.DataColumnSidecarsByRangeRequest{Count: 1, Columns: []uint64{1, 2, 3}}
|
||||
_, err := SendDataColumnSidecarsByRangeRequest(t.Context(), nil, nil, "aRandomPID", nil, request)
|
||||
require.ErrorContains(t, errMaxRequestDataColumnSidecarsExceeded.Error(), err)
|
||||
})
|
||||
|
||||
type slotIndex struct {
|
||||
Slot primitives.Slot
|
||||
Index uint64
|
||||
}
|
||||
|
||||
createSidecar := func(slotIndex slotIndex) *ethpb.DataColumnSidecar {
|
||||
const count = 4
|
||||
kzgCommitmentsInclusionProof := make([][]byte, 0, count)
|
||||
for range count {
|
||||
kzgCommitmentsInclusionProof = append(kzgCommitmentsInclusionProof, make([]byte, 32))
|
||||
}
|
||||
|
||||
return ðpb.DataColumnSidecar{
|
||||
Index: slotIndex.Index,
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: slotIndex.Slot,
|
||||
ParentRoot: make([]byte, fieldparams.RootLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
BodyRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
slotIndices []slotIndex
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "too many responses",
|
||||
slotIndices: []slotIndex{
|
||||
{Slot: 0, Index: 1},
|
||||
{Slot: 0, Index: 2},
|
||||
{Slot: 0, Index: 3},
|
||||
{Slot: 1, Index: 1},
|
||||
{Slot: 1, Index: 2},
|
||||
{Slot: 1, Index: 3},
|
||||
{Slot: 0, Index: 3}, // Duplicate
|
||||
},
|
||||
expectedError: errMaxResponseDataColumnSidecarsExceeded,
|
||||
},
|
||||
{
|
||||
name: "perfect match",
|
||||
slotIndices: []slotIndex{
|
||||
{Slot: 0, Index: 1},
|
||||
{Slot: 0, Index: 2},
|
||||
{Slot: 0, Index: 3},
|
||||
{Slot: 1, Index: 1},
|
||||
{Slot: 1, Index: 2},
|
||||
{Slot: 1, Index: 3},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "few responses than maximum possible",
|
||||
slotIndices: []slotIndex{
|
||||
{Slot: 0, Index: 1},
|
||||
{Slot: 0, Index: 2},
|
||||
{Slot: 0, Index: 3},
|
||||
{Slot: 1, Index: 1},
|
||||
{Slot: 1, Index: 2},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
protocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRangeTopicV1)
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
|
||||
p1, p2 := p2ptest.NewTestP2P(t), p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
|
||||
expected := make([]*ethpb.DataColumnSidecar, 0, len(tc.slotIndices))
|
||||
for _, slotIndex := range tc.slotIndices {
|
||||
sidecar := createSidecar(slotIndex)
|
||||
expected = append(expected, sidecar)
|
||||
}
|
||||
|
||||
requestSent := ðpb.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: 0,
|
||||
Count: 2,
|
||||
Columns: []uint64{1, 3, 2},
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
p2.SetStreamHandler(protocol, func(stream network.Stream) {
|
||||
wg.Done()
|
||||
|
||||
requestReceived := new(ethpb.DataColumnSidecarsByRangeRequest)
|
||||
err := p2.Encoding().DecodeWithMaxLength(stream, requestReceived)
|
||||
assert.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, requestSent, requestReceived)
|
||||
|
||||
for _, sidecar := range expected {
|
||||
err := WriteDataColumnSidecarChunk(stream, clock, p2.Encoding(), sidecar)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
err = stream.CloseWrite()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
ctx := t.Context()
|
||||
ctxMap := ContextByteVersions{[4]byte{245, 165, 253, 66}: version.Fulu}
|
||||
|
||||
actual, err := SendDataColumnSidecarsByRangeRequest(ctx, clock, p1, p2.PeerID(), ctxMap, requestSent)
|
||||
if tc.expectedError != nil {
|
||||
require.ErrorContains(t, tc.expectedError.Error(), err)
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
for i := range expected {
|
||||
require.DeepSSZEqual(t, expected[i], actual[i].DataColumnSidecar)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsSidecarSlotWithinBounds(t *testing.T) {
|
||||
request := ðpb.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: 10,
|
||||
Count: 10,
|
||||
}
|
||||
|
||||
validator, err := isSidecarSlotWithinBounds(request)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
slot primitives.Slot
|
||||
isErrorExpected bool
|
||||
}{
|
||||
{
|
||||
name: "too soon",
|
||||
slot: 9,
|
||||
isErrorExpected: true,
|
||||
},
|
||||
{
|
||||
name: "too late",
|
||||
slot: 20,
|
||||
isErrorExpected: true,
|
||||
},
|
||||
{
|
||||
name: "within bounds",
|
||||
slot: 15,
|
||||
isErrorExpected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
const count = 4
|
||||
kzgCommitmentsInclusionProof := make([][]byte, 0, count)
|
||||
for range count {
|
||||
kzgCommitmentsInclusionProof = append(kzgCommitmentsInclusionProof, make([]byte, 32))
|
||||
}
|
||||
|
||||
sidecarPb := ðpb.DataColumnSidecar{
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: tc.slot,
|
||||
ParentRoot: make([]byte, fieldparams.RootLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
BodyRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecar, err := blocks.NewRODataColumn(sidecarPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = validator(sidecar)
|
||||
if tc.isErrorExpected {
|
||||
require.NotNil(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsSidecarIndexRequested(t *testing.T) {
|
||||
request := ðpb.DataColumnSidecarsByRangeRequest{
|
||||
Columns: []uint64{2, 9, 4},
|
||||
}
|
||||
|
||||
validator := isSidecarIndexRequested(request)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
index uint64
|
||||
isErrorExpected bool
|
||||
}{
|
||||
{
|
||||
name: "not requested",
|
||||
index: 1,
|
||||
isErrorExpected: true,
|
||||
},
|
||||
{
|
||||
name: "requested",
|
||||
index: 9,
|
||||
isErrorExpected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
const count = 4
|
||||
kzgCommitmentsInclusionProof := make([][]byte, 0, count)
|
||||
for range count {
|
||||
kzgCommitmentsInclusionProof = append(kzgCommitmentsInclusionProof, make([]byte, 32))
|
||||
}
|
||||
|
||||
sidecarPb := ðpb.DataColumnSidecar{
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: 0,
|
||||
ParentRoot: make([]byte, fieldparams.RootLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
BodyRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
Index: tc.index,
|
||||
}
|
||||
|
||||
sidecar, err := blocks.NewRODataColumn(sidecarPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = validator(sidecar)
|
||||
if tc.isErrorExpected {
|
||||
require.NotNil(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSendDataColumnSidecarsByRootRequest(t *testing.T) {
|
||||
nilTestCases := []struct {
|
||||
name string
|
||||
request p2ptypes.DataColumnsByRootIdentifiers
|
||||
}{
|
||||
{
|
||||
name: "nil request",
|
||||
request: nil,
|
||||
},
|
||||
{
|
||||
name: "count is 0",
|
||||
request: p2ptypes.DataColumnsByRootIdentifiers{{}, {}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range nilTestCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actual, err := SendDataColumnSidecarsByRootRequest(t.Context(), nil, nil, "aRandomPID", nil, tc.request)
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, actual)
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("too many columns in request", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.MaxRequestDataColumnSidecars = 4
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
|
||||
request := p2ptypes.DataColumnsByRootIdentifiers{
|
||||
{Columns: []uint64{1, 2, 3}},
|
||||
{Columns: []uint64{4, 5, 6}},
|
||||
}
|
||||
|
||||
_, err := SendDataColumnSidecarsByRootRequest(t.Context(), nil, nil, "aRandomPID", nil, request)
|
||||
require.ErrorContains(t, errMaxRequestDataColumnSidecarsExceeded.Error(), err)
|
||||
})
|
||||
|
||||
type slotIndex struct {
|
||||
Slot primitives.Slot
|
||||
Index uint64
|
||||
}
|
||||
|
||||
createSidecar := func(rootIndex slotIndex) blocks.RODataColumn {
|
||||
const count = 4
|
||||
kzgCommitmentsInclusionProof := make([][]byte, 0, count)
|
||||
for range count {
|
||||
kzgCommitmentsInclusionProof = append(kzgCommitmentsInclusionProof, make([]byte, 32))
|
||||
}
|
||||
|
||||
sidecarPb := ðpb.DataColumnSidecar{
|
||||
Index: rootIndex.Index,
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, fieldparams.RootLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
BodyRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
roSidecar, err := blocks.NewRODataColumn(sidecarPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
return roSidecar
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
slotIndices []slotIndex
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "too many responses",
|
||||
slotIndices: []slotIndex{
|
||||
{Slot: 1, Index: 1},
|
||||
{Slot: 1, Index: 2},
|
||||
{Slot: 1, Index: 3},
|
||||
{Slot: 2, Index: 1},
|
||||
{Slot: 2, Index: 2},
|
||||
{Slot: 2, Index: 3},
|
||||
{Slot: 1, Index: 3}, // Duplicate
|
||||
},
|
||||
expectedError: errMaxResponseDataColumnSidecarsExceeded,
|
||||
},
|
||||
{
|
||||
name: "perfect match",
|
||||
slotIndices: []slotIndex{
|
||||
{Slot: 1, Index: 1},
|
||||
{Slot: 1, Index: 2},
|
||||
{Slot: 1, Index: 3},
|
||||
{Slot: 2, Index: 1},
|
||||
{Slot: 2, Index: 2},
|
||||
{Slot: 2, Index: 3},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "few responses than maximum possible",
|
||||
slotIndices: []slotIndex{
|
||||
{Slot: 1, Index: 1},
|
||||
{Slot: 1, Index: 2},
|
||||
{Slot: 1, Index: 3},
|
||||
{Slot: 2, Index: 1},
|
||||
{Slot: 2, Index: 2},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
protocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRootTopicV1)
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
|
||||
p1, p2 := p2ptest.NewTestP2P(t), p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
|
||||
expected := make([]blocks.RODataColumn, 0, len(tc.slotIndices))
|
||||
for _, slotIndex := range tc.slotIndices {
|
||||
roSidecar := createSidecar(slotIndex)
|
||||
expected = append(expected, roSidecar)
|
||||
}
|
||||
|
||||
blockRoot1, blockRoot2 := expected[0].BlockRoot(), expected[3].BlockRoot()
|
||||
|
||||
sentRequest := p2ptypes.DataColumnsByRootIdentifiers{
|
||||
{BlockRoot: blockRoot1[:], Columns: []uint64{1, 2, 3}},
|
||||
{BlockRoot: blockRoot2[:], Columns: []uint64{1, 2, 3}},
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
p2.SetStreamHandler(protocol, func(stream network.Stream) {
|
||||
wg.Done()
|
||||
|
||||
requestReceived := new(p2ptypes.DataColumnsByRootIdentifiers)
|
||||
err := p2.Encoding().DecodeWithMaxLength(stream, requestReceived)
|
||||
assert.NoError(t, err)
|
||||
|
||||
require.Equal(t, len(sentRequest), len(*requestReceived))
|
||||
for i := range sentRequest {
|
||||
require.DeepSSZEqual(t, (sentRequest)[i], (*requestReceived)[i])
|
||||
}
|
||||
|
||||
for _, sidecar := range expected {
|
||||
err := WriteDataColumnSidecarChunk(stream, clock, p2.Encoding(), sidecar.DataColumnSidecar)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
err = stream.CloseWrite()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
ctx := t.Context()
|
||||
ctxMap := ContextByteVersions{[4]byte{245, 165, 253, 66}: version.Fulu}
|
||||
|
||||
actual, err := SendDataColumnSidecarsByRootRequest(ctx, clock, p1, p2.PeerID(), ctxMap, sentRequest)
|
||||
if tc.expectedError != nil {
|
||||
require.ErrorContains(t, tc.expectedError.Error(), err)
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
for i := range expected {
|
||||
require.DeepSSZEqual(t, expected[i], actual[i])
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsSidecarIndexRootRequested(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
root [fieldparams.RootLength]byte
|
||||
index uint64
|
||||
isErrorExpected bool
|
||||
}{
|
||||
{
|
||||
name: "non requested root",
|
||||
root: [fieldparams.RootLength]byte{2},
|
||||
isErrorExpected: true,
|
||||
},
|
||||
{
|
||||
name: "non requested index",
|
||||
root: [fieldparams.RootLength]byte{1},
|
||||
index: 3,
|
||||
isErrorExpected: true,
|
||||
},
|
||||
{
|
||||
name: "nominal",
|
||||
root: [fieldparams.RootLength]byte{1},
|
||||
index: 2,
|
||||
isErrorExpected: false,
|
||||
},
|
||||
}
|
||||
|
||||
request := types.DataColumnsByRootIdentifiers{
|
||||
{BlockRoot: []byte{1}, Columns: []uint64{1, 2}},
|
||||
}
|
||||
|
||||
validator := isSidecarIndexRootRequested(request)
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
const count = 4
|
||||
kzgCommitmentsInclusionProof := make([][]byte, 0, count)
|
||||
for range count {
|
||||
kzgCommitmentsInclusionProof = append(kzgCommitmentsInclusionProof, make([]byte, 32))
|
||||
}
|
||||
|
||||
sidecarPb := ðpb.DataColumnSidecar{
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, fieldparams.RootLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
BodyRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
Index: tc.index,
|
||||
}
|
||||
|
||||
// There is a discrepancy between `tc.root` and the real root,
|
||||
// but we don't care about it here.
|
||||
sidecar, err := blocks.NewRODataColumnWithRoot(sidecarPb, tc.root)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = validator(sidecar)
|
||||
if tc.isErrorExpected {
|
||||
require.NotNil(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadChunkedDataColumnSidecar(t *testing.T) {
|
||||
t.Run("non nil status code", func(t *testing.T) {
|
||||
const reason = "a dummy reason"
|
||||
|
||||
p1, p2 := p2ptest.NewTestP2P(t), p2ptest.NewTestP2P(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
p2.SetStreamHandler(p2p.RPCDataColumnSidecarsByRootTopicV1, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
|
||||
_, err := readChunkedDataColumnSidecar(stream, p2, nil)
|
||||
require.ErrorContains(t, reason, err)
|
||||
})
|
||||
|
||||
p1.Connect(p2)
|
||||
|
||||
stream, err := p1.BHost.NewStream(t.Context(), p2.PeerID(), p2p.RPCDataColumnSidecarsByRootTopicV1)
|
||||
require.NoError(t, err)
|
||||
|
||||
writeErrorResponseToStream(responseCodeInvalidRequest, reason, stream, p1)
|
||||
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("unrecognized fork digest", func(t *testing.T) {
|
||||
p1, p2 := p2ptest.NewTestP2P(t), p2ptest.NewTestP2P(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
p2.SetStreamHandler(p2p.RPCDataColumnSidecarsByRootTopicV1, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
|
||||
_, err := readChunkedDataColumnSidecar(stream, p2, ContextByteVersions{})
|
||||
require.ErrorContains(t, "unrecognized fork digest", err)
|
||||
})
|
||||
|
||||
p1.Connect(p2)
|
||||
|
||||
stream, err := p1.BHost.NewStream(t.Context(), p2.PeerID(), p2p.RPCDataColumnSidecarsByRootTopicV1)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = stream.Write([]byte{responseCodeSuccess})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = writeContextToStream([]byte{42, 42, 42, 42}, stream)
|
||||
require.NoError(t, err)
|
||||
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("before fulu", func(t *testing.T) {
|
||||
p1, p2 := p2ptest.NewTestP2P(t), p2ptest.NewTestP2P(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
p2.SetStreamHandler(p2p.RPCDataColumnSidecarsByRootTopicV1, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
|
||||
_, err := readChunkedDataColumnSidecar(stream, p2, ContextByteVersions{[4]byte{1, 2, 3, 4}: version.Phase0})
|
||||
require.ErrorContains(t, "unexpected context bytes", err)
|
||||
})
|
||||
|
||||
p1.Connect(p2)
|
||||
|
||||
stream, err := p1.BHost.NewStream(t.Context(), p2.PeerID(), p2p.RPCDataColumnSidecarsByRootTopicV1)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = stream.Write([]byte{responseCodeSuccess})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = writeContextToStream([]byte{1, 2, 3, 4}, stream)
|
||||
require.NoError(t, err)
|
||||
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("one validation failed", func(t *testing.T) {
|
||||
const reason = "a dummy reason"
|
||||
|
||||
p1, p2 := p2ptest.NewTestP2P(t), p2ptest.NewTestP2P(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
p2.SetStreamHandler(p2p.RPCDataColumnSidecarsByRootTopicV1, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
|
||||
validationOne := func(column blocks.RODataColumn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
validationTwo := func(column blocks.RODataColumn) error {
|
||||
return errors.New(reason)
|
||||
}
|
||||
|
||||
_, err := readChunkedDataColumnSidecar(
|
||||
stream,
|
||||
p2,
|
||||
ContextByteVersions{[4]byte{1, 2, 3, 4}: version.Fulu},
|
||||
validationOne, // OK
|
||||
validationTwo, // Fail
|
||||
)
|
||||
|
||||
require.ErrorContains(t, reason, err)
|
||||
})
|
||||
|
||||
p1.Connect(p2)
|
||||
|
||||
stream, err := p1.BHost.NewStream(t.Context(), p2.PeerID(), p2p.RPCDataColumnSidecarsByRootTopicV1)
|
||||
require.NoError(t, err)
|
||||
|
||||
const count = 4
|
||||
kzgCommitmentsInclusionProof := make([][]byte, 0, count)
|
||||
for range count {
|
||||
kzgCommitmentsInclusionProof = append(kzgCommitmentsInclusionProof, make([]byte, 32))
|
||||
}
|
||||
|
||||
// Success response code.
|
||||
_, err = stream.Write([]byte{responseCodeSuccess})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Fork digest.
|
||||
err = writeContextToStream([]byte{1, 2, 3, 4}, stream)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Sidecar.
|
||||
_, err = p1.Encoding().EncodeWithMaxLength(stream, ðpb.DataColumnSidecar{
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, fieldparams.RootLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
BodyRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
if util.WaitTimeout(&wg, time.Minute) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
p1, p2 := p2ptest.NewTestP2P(t), p2ptest.NewTestP2P(t)
|
||||
|
||||
const count = 4
|
||||
kzgCommitmentsInclusionProof := make([][]byte, 0, count)
|
||||
for range count {
|
||||
kzgCommitmentsInclusionProof = append(kzgCommitmentsInclusionProof, make([]byte, 32))
|
||||
}
|
||||
|
||||
expected := ðpb.DataColumnSidecar{
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, fieldparams.RootLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
BodyRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
p2.SetStreamHandler(p2p.RPCDataColumnSidecarsByRootTopicV1, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
|
||||
actual, err := readChunkedDataColumnSidecar(stream, p2, ContextByteVersions{[4]byte{1, 2, 3, 4}: version.Fulu})
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, expected, actual.DataColumnSidecar)
|
||||
})
|
||||
|
||||
p1.Connect(p2)
|
||||
|
||||
stream, err := p1.BHost.NewStream(t.Context(), p2.PeerID(), p2p.RPCDataColumnSidecarsByRootTopicV1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Success response code.
|
||||
_, err = stream.Write([]byte{responseCodeSuccess})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Fork digest.
|
||||
err = writeContextToStream([]byte{1, 2, 3, 4}, stream)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Sidecar.
|
||||
_, err = p1.Encoding().EncodeWithMaxLength(stream, expected)
|
||||
require.NoError(t, err)
|
||||
|
||||
if util.WaitTimeout(&wg, time.Minute) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
"time"
|
||||
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/rand"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
@@ -103,12 +105,15 @@ type config struct {
|
||||
stateNotifier statefeed.Notifier
|
||||
blobStorage *filesystem.BlobStorage
|
||||
dataColumnStorage *filesystem.DataColumnStorage
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
batchVerifierLimit int
|
||||
}
|
||||
|
||||
// This defines the interface for interacting with block chain service
|
||||
type blockchainService interface {
|
||||
blockchain.BlockReceiver
|
||||
blockchain.BlobReceiver
|
||||
blockchain.DataColumnReceiver
|
||||
blockchain.HeadFetcher
|
||||
blockchain.FinalizationFetcher
|
||||
blockchain.ForkFetcher
|
||||
@@ -166,6 +171,8 @@ type Service struct {
|
||||
newBlobVerifier verification.NewBlobVerifier
|
||||
newColumnsVerifier verification.NewDataColumnsVerifier
|
||||
availableBlocker coverage.AvailableBlocker
|
||||
reconstructionLock sync.Mutex
|
||||
reconstructionRandGen *rand.Rand
|
||||
ctxMap ContextByteVersions
|
||||
slasherEnabled bool
|
||||
lcStore *lightClient.Store
|
||||
@@ -176,15 +183,15 @@ type Service struct {
|
||||
func NewService(ctx context.Context, opts ...Option) *Service {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
chainStarted: abool.New(),
|
||||
cfg: &config{clock: startup.NewClock(time.Unix(0, 0), [32]byte{})},
|
||||
slotToPendingBlocks: gcache.New(pendingBlockExpTime /* exp time */, 0 /* disable janitor */),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
dataColumnLogCh: make(chan dataColumnLogEntry, 1000),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
chainStarted: abool.New(),
|
||||
cfg: &config{clock: startup.NewClock(time.Unix(0, 0), [32]byte{})},
|
||||
slotToPendingBlocks: gcache.New(pendingBlockExpTime /* exp time */, 0 /* disable janitor */),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
dataColumnLogCh: make(chan dataColumnLogEntry, 1000),
|
||||
reconstructionRandGen: rand.NewGenerator(),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
@@ -192,6 +199,8 @@ func NewService(ctx context.Context, opts ...Option) *Service {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// Initialize signature channel with configured limit
|
||||
r.signatureChan = make(chan *signatureVerifier, r.cfg.batchVerifierLimit)
|
||||
// Correctly remove it from our seen pending block map.
|
||||
// The eviction method always assumes that the mutex is held.
|
||||
r.slotToPendingBlocks.OnEvicted(func(s string, i interface{}) {
|
||||
|
||||
@@ -6,12 +6,14 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/altair"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
@@ -188,14 +190,14 @@ func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
||||
)
|
||||
}
|
||||
|
||||
// New gossip topic in Fulu
|
||||
// New gossip topic in Fulu.
|
||||
if params.BeaconConfig().FuluForkEpoch <= epoch {
|
||||
s.subscribeWithParameters(
|
||||
p2p.DataColumnSubnetTopicFormat,
|
||||
s.validateDataColumn,
|
||||
func(context.Context, proto.Message) error { return nil },
|
||||
s.dataColumnSubscriber,
|
||||
digest,
|
||||
func(primitives.Slot) []uint64 { return nil },
|
||||
s.dataColumnSubnetIndices,
|
||||
func(currentSlot primitives.Slot) []uint64 { return []uint64{} },
|
||||
)
|
||||
}
|
||||
@@ -600,6 +602,19 @@ func (s *Service) enoughPeersAreConnected(subnetTopic string) bool {
|
||||
return peersWithSubnetCount >= threshold
|
||||
}
|
||||
|
||||
func (s *Service) dataColumnSubnetIndices(_ primitives.Slot) []uint64 {
|
||||
nodeID := s.cfg.p2p.NodeID()
|
||||
custodyGroupCount := s.cfg.custodyInfo.CustodyGroupSamplingSize(peerdas.Target)
|
||||
|
||||
nodeInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve peer info")
|
||||
return []uint64{}
|
||||
}
|
||||
|
||||
return sliceFromMap(nodeInfo.DataColumnsSubnets, true /*sorted*/)
|
||||
}
|
||||
|
||||
func (s *Service) persistentAndAggregatorSubnetIndices(currentSlot primitives.Slot) []uint64 {
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
return sliceFromCount(params.BeaconConfig().AttestationSubnetCount)
|
||||
@@ -727,3 +742,17 @@ func errorIsIgnored(err error) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// sliceFromMap returns a sorted list of keys from a map.
|
||||
func sliceFromMap(m map[uint64]bool, sorted ...bool) []uint64 {
|
||||
result := make([]uint64, 0, len(m))
|
||||
for k := range m {
|
||||
result = append(result, k)
|
||||
}
|
||||
|
||||
if len(sorted) > 0 && sorted[0] {
|
||||
slices.Sort(result)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
54
beacon-chain/sync/subscriber_data_column_sidecar.go
Normal file
54
beacon-chain/sync/subscriber_data_column_sidecar.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
opfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func (s *Service) dataColumnSubscriber(ctx context.Context, msg proto.Message) error {
|
||||
sidecar, ok := msg.(blocks.VerifiedRODataColumn)
|
||||
if !ok {
|
||||
return fmt.Errorf("message was not type blocks.VerifiedRODataColumn, type=%T", msg)
|
||||
}
|
||||
|
||||
if err := s.receiveDataColumnSidecar(ctx, sidecar); err != nil {
|
||||
return errors.Wrap(err, "receive data column")
|
||||
}
|
||||
|
||||
slot := sidecar.Slot()
|
||||
proposerIndex := sidecar.ProposerIndex()
|
||||
root := sidecar.BlockRoot()
|
||||
|
||||
if err := s.reconstructSaveBroadcastDataColumnSidecars(ctx, slot, proposerIndex, root); err != nil {
|
||||
return errors.Wrap(err, "reconstruct data columns")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) receiveDataColumnSidecar(ctx context.Context, sidecar blocks.VerifiedRODataColumn) error {
|
||||
slot := sidecar.SignedBlockHeader.Header.Slot
|
||||
proposerIndex := sidecar.SignedBlockHeader.Header.ProposerIndex
|
||||
columnIndex := sidecar.Index
|
||||
|
||||
s.setSeenDataColumnIndex(slot, proposerIndex, columnIndex)
|
||||
|
||||
if err := s.cfg.chain.ReceiveDataColumn(sidecar); err != nil {
|
||||
return errors.Wrap(err, "receive data column")
|
||||
}
|
||||
|
||||
s.cfg.operationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: opfeed.DataColumnSidecarReceived,
|
||||
Data: &opfeed.DataColumnSidecarReceivedData{
|
||||
DataColumn: &sidecar,
|
||||
},
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -15,10 +15,12 @@ func TestMain(m *testing.M) {
|
||||
|
||||
resetFlags := flags.Get()
|
||||
flags.Init(&flags.GlobalFlags{
|
||||
BlockBatchLimit: 64,
|
||||
BlockBatchLimitBurstFactor: 10,
|
||||
BlobBatchLimit: 32,
|
||||
BlobBatchLimitBurstFactor: 2,
|
||||
BlockBatchLimit: 64,
|
||||
BlockBatchLimitBurstFactor: 10,
|
||||
BlobBatchLimit: 32,
|
||||
BlobBatchLimitBurstFactor: 2,
|
||||
DataColumnBatchLimit: 4096,
|
||||
DataColumnBatchLimitBurstFactor: 2,
|
||||
})
|
||||
defer func() {
|
||||
flags.Init(resetFlags)
|
||||
|
||||
@@ -30,16 +30,16 @@ var (
|
||||
ErrBlobIndexInvalid = errors.Join(ErrBlobInvalid, errors.New("incorrect blob sidecar index"))
|
||||
|
||||
// errFromFutureSlot means RequireSlotNotTooEarly failed.
|
||||
errFromFutureSlot = errors.Join(ErrBlobInvalid, errors.New("slot is too far in the future"))
|
||||
errFromFutureSlot = errors.New("slot is too far in the future")
|
||||
|
||||
// errSlotNotAfterFinalized means RequireSlotAboveFinalized failed.
|
||||
errSlotNotAfterFinalized = errors.Join(ErrBlobInvalid, errors.New("slot <= finalized checkpoint"))
|
||||
errSlotNotAfterFinalized = errors.New("slot <= finalized checkpoint")
|
||||
|
||||
// ErrInvalidProposerSignature means RequireValidProposerSignature failed.
|
||||
ErrInvalidProposerSignature = errors.Join(ErrBlobInvalid, errors.New("proposer signature could not be verified"))
|
||||
|
||||
// errSidecarParentNotSeen means RequireSidecarParentSeen failed.
|
||||
errSidecarParentNotSeen = errors.Join(ErrBlobInvalid, errors.New("parent root has not been seen"))
|
||||
errSidecarParentNotSeen = errors.New("parent root has not been seen")
|
||||
|
||||
// errSidecarParentInvalid means RequireSidecarParentValid failed.
|
||||
errSidecarParentInvalid = errors.Join(ErrBlobInvalid, errors.New("parent block is not valid"))
|
||||
|
||||
3
changelog/bastin_fix-lc-versioning.md
Normal file
3
changelog/bastin_fix-lc-versioning.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Fixed the versioning bug for light client data types in the Beacon API.
|
||||
3
changelog/bastin_put-lc-store-behind-flag.md
Normal file
3
changelog/bastin_put-lc-store-behind-flag.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Put the initiation of LC Store behind the `enable-light-client` flag.
|
||||
3
changelog/bastin_refactor-lc-bootstrap-tests.md
Normal file
3
changelog/bastin_refactor-lc-bootstrap-tests.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- Refactor light client bootstrap tests in the RPC package.
|
||||
3
changelog/bastin_refactor-lc-kv-tests.md
Normal file
3
changelog/bastin_refactor-lc-kv-tests.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- Refactor light client kv tests.
|
||||
3
changelog/james-prysm_fix-duties-v2-assignment.md
Normal file
3
changelog/james-prysm_fix-duties-v2-assignment.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Fix panic on dutiesv2 when there is no committee assignment on the epoch
|
||||
7
changelog/james-prysm_ssz-validator-block.md
Normal file
7
changelog/james-prysm_ssz-validator-block.md
Normal file
@@ -0,0 +1,7 @@
|
||||
### Added
|
||||
|
||||
- New ssz-only flag for validator client to enable calling rest apis in SSZ, starting with get block endpoint.
|
||||
|
||||
### Changed
|
||||
|
||||
- when REST api is enabled the get Block api defaults to requesting and receiving SSZ instead of JSON, JSON is the fallback.
|
||||
2
changelog/manu-bootnodes.md
Normal file
2
changelog/manu-bootnodes.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Fixed
|
||||
- `--chain-config-file`: Do not use any more mainnet boot nodes.
|
||||
2
changelog/manu-peerdas-beacon-api.md
Normal file
2
changelog/manu-peerdas-beacon-api.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Added
|
||||
- Implement beacon API blob sidecar enpoint for Fulu.
|
||||
2
changelog/manu-peerdas-columns-by-range-handler.md
Normal file
2
changelog/manu-peerdas-columns-by-range-handler.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Added
|
||||
- Implement `dataColumnSidecarsByRangeRPCHandler`.
|
||||
@@ -0,0 +1,2 @@
|
||||
### Fixed
|
||||
- Non deterministic output order of `dataColumnSidecarByRootRPCHandler`.
|
||||
3
changelog/manu-peerdas-get-blobs-V2.md
Normal file
3
changelog/manu-peerdas-get-blobs-V2.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
- Implement engine method `GetBlobsV2`
|
||||
- Implement execution `ReconstructDataColumnSidecars`, which reconstruct data column sidecars from data fetched from the execution layer.
|
||||
2
changelog/manu-peerdas-metadata.md
Normal file
2
changelog/manu-peerdas-metadata.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Added
|
||||
- PeerDAS: Implement the new Fulu Metadata.
|
||||
2
changelog/manu-peerdas-reconstruction.md
Normal file
2
changelog/manu-peerdas-reconstruction.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Added
|
||||
- PeerDAS: Implement reconstruction.
|
||||
3
changelog/manu-peerdas-send-data-column-requests.md
Normal file
3
changelog/manu-peerdas-send-data-column-requests.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
- Implement `SendDataColumnSidecarsByRangeRequest`.
|
||||
- Implement `SendDataColumnSidecarsByRootRequest`.
|
||||
9
changelog/manu-peerdas-variou.md
Normal file
9
changelog/manu-peerdas-variou.md
Normal file
@@ -0,0 +1,9 @@
|
||||
### Changed
|
||||
- In `TopicFromMessage`: Do not assume anymore that all Fulu specific topic are V3 only.
|
||||
- `readChunkedDataColumnSidecar`: Add `validationFunctions` parameter and add tests.
|
||||
|
||||
### Added
|
||||
- New `StatusV2` proto message.
|
||||
|
||||
### Removed
|
||||
- Unused `DataColumnIdentifier` proto message.
|
||||
3
changelog/tt_45.md
Normal file
3
changelog/tt_45.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Increase mainnet DefaultBuilderGasLimit from 36M to 45M
|
||||
7
changelog/tt_formula_mlk.md
Normal file
7
changelog/tt_formula_mlk.md
Normal file
@@ -0,0 +1,7 @@
|
||||
### Changed
|
||||
|
||||
- Attest timely is now default. `attest-timely` flag is now deprecated.
|
||||
|
||||
### Added
|
||||
|
||||
- `disable-attest-timely` flag to disable attest timely.
|
||||
7
changelog/tt_noodles.md
Normal file
7
changelog/tt_noodles.md
Normal file
@@ -0,0 +1,7 @@
|
||||
### Added
|
||||
|
||||
- new `--batch-verifier-limit` flag to configure max number of signatures to batch verify on gossip
|
||||
|
||||
### Changed
|
||||
|
||||
- default batch signature verification limit increased from 50 to 1000
|
||||
3
changelog/tt_steak.md
Normal file
3
changelog/tt_steak.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Remove "invalid" from logs for incoming blob sidecar that is missing parent or out of range slot
|
||||
@@ -212,11 +212,18 @@ var (
|
||||
Usage: "The factor by which blob batch limit may increase on burst.",
|
||||
Value: 3,
|
||||
}
|
||||
// DataColumnBatchLimit specifies the requested data column batch size.
|
||||
DataColumnBatchLimit = &cli.IntFlag{
|
||||
Name: "data-column-batch-limit",
|
||||
Usage: "The amount of data columns the local peer is bounded to request and respond to in a batch.",
|
||||
Value: 4096,
|
||||
}
|
||||
// DataColumnBatchLimitBurstFactor specifies the factor by which data column batch size may increase.
|
||||
DataColumnBatchLimitBurstFactor = &cli.IntFlag{
|
||||
Name: "data-column-batch-limit-burst-factor",
|
||||
Usage: "The factor by which data column batch limit may increase on burst.",
|
||||
Value: 2,
|
||||
}
|
||||
// DisableDebugRPCEndpoints disables the debug Beacon API namespace.
|
||||
DisableDebugRPCEndpoints = &cli.BoolFlag{
|
||||
Name: "disable-debug-rpc-endpoints",
|
||||
@@ -331,4 +338,10 @@ var (
|
||||
Name: "subscribe-all-data-subnets",
|
||||
Usage: "Enable subscription to all data subnets.",
|
||||
}
|
||||
// BatchVerifierLimit sets the maximum number of signatures to batch verify at once.
|
||||
BatchVerifierLimit = &cli.IntFlag{
|
||||
Name: "batch-verifier-limit",
|
||||
Usage: "Maximum number of signatures to batch verify at once for beacon attestation p2p gossip.",
|
||||
Value: 1000,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -8,16 +8,17 @@ import (
|
||||
// GlobalFlags specifies all the global flags for the
|
||||
// beacon node.
|
||||
type GlobalFlags struct {
|
||||
SubscribeToAllSubnets bool
|
||||
SubscribeAllDataSubnets bool
|
||||
MinimumSyncPeers int
|
||||
MinimumPeersPerSubnet int
|
||||
MaxConcurrentDials int
|
||||
BlockBatchLimit int
|
||||
BlockBatchLimitBurstFactor int
|
||||
BlobBatchLimit int
|
||||
DataColumnBatchLimit int
|
||||
BlobBatchLimitBurstFactor int
|
||||
SubscribeToAllSubnets bool
|
||||
SubscribeAllDataSubnets bool
|
||||
MinimumSyncPeers int
|
||||
MinimumPeersPerSubnet int
|
||||
MaxConcurrentDials int
|
||||
BlockBatchLimit int
|
||||
BlockBatchLimitBurstFactor int
|
||||
BlobBatchLimit int
|
||||
BlobBatchLimitBurstFactor int
|
||||
DataColumnBatchLimit int
|
||||
DataColumnBatchLimitBurstFactor int
|
||||
}
|
||||
|
||||
var globalConfig *GlobalFlags
|
||||
@@ -55,8 +56,10 @@ func ConfigureGlobalFlags(ctx *cli.Context) {
|
||||
cfg.BlobBatchLimit = ctx.Int(BlobBatchLimit.Name)
|
||||
cfg.BlobBatchLimitBurstFactor = ctx.Int(BlobBatchLimitBurstFactor.Name)
|
||||
cfg.DataColumnBatchLimit = ctx.Int(DataColumnBatchLimit.Name)
|
||||
cfg.DataColumnBatchLimitBurstFactor = ctx.Int(DataColumnBatchLimitBurstFactor.Name)
|
||||
cfg.MinimumPeersPerSubnet = ctx.Int(MinPeersPerSubnet.Name)
|
||||
cfg.MaxConcurrentDials = ctx.Int(MaxConcurrentDials.Name)
|
||||
|
||||
configureMinimumPeers(ctx, cfg)
|
||||
|
||||
Init(cfg)
|
||||
|
||||
@@ -149,6 +149,7 @@ var appFlags = []cli.Flag{
|
||||
bflags.BackfillBatchSize,
|
||||
bflags.BackfillWorkerCount,
|
||||
bflags.BackfillOldestSlot,
|
||||
flags.BatchVerifierLimit,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -72,6 +72,7 @@ var appHelpFlagGroups = []flagGroup{
|
||||
flags.NetworkID,
|
||||
flags.RPCHost,
|
||||
flags.RPCPort,
|
||||
flags.BatchVerifierLimit,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -52,6 +52,7 @@ type Flags struct {
|
||||
EnableExperimentalAttestationPool bool // EnableExperimentalAttestationPool enables an experimental attestation pool design.
|
||||
EnableDutiesV2 bool // EnableDutiesV2 sets validator client to use the get Duties V2 endpoint
|
||||
EnableWeb bool // EnableWeb enables the webui on the validator client
|
||||
SSZOnly bool // SSZOnly forces the validator client to use SSZ for communication with the beacon node when REST mode is enabled (useful for debugging)
|
||||
// Logging related toggles.
|
||||
DisableGRPCConnectionLogs bool // Disables logging when a new grpc client has connected.
|
||||
EnableFullSSZDataLogging bool // Enables logging for full ssz data on rejected gossip messages
|
||||
@@ -155,7 +156,8 @@ func configureTestnet(ctx *cli.Context) error {
|
||||
params.UseHoodiNetworkConfig()
|
||||
} else {
|
||||
if ctx.IsSet(cmd.ChainConfigFileFlag.Name) {
|
||||
log.Warn("Running on custom Ethereum network specified in a chain configuration yaml file")
|
||||
log.Warning("Running on custom Ethereum network specified in a chain configuration YAML file")
|
||||
params.UseCustomNetworkConfig()
|
||||
} else {
|
||||
log.Info("Running on Ethereum Mainnet")
|
||||
}
|
||||
@@ -167,11 +169,11 @@ func configureTestnet(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
// Insert feature flags within the function to be enabled for Sepolia testnet.
|
||||
func applySepoliaFeatureFlags(ctx *cli.Context) {
|
||||
func applySepoliaFeatureFlags(_ *cli.Context) {
|
||||
}
|
||||
|
||||
// Insert feature flags within the function to be enabled for Holesky testnet.
|
||||
func applyHoleskyFeatureFlags(ctx *cli.Context) {
|
||||
func applyHoleskyFeatureFlags(_ *cli.Context) {
|
||||
}
|
||||
|
||||
// ConfigureBeaconChain sets the global config based
|
||||
@@ -316,9 +318,10 @@ func ConfigureValidator(ctx *cli.Context) error {
|
||||
logEnabled(writeWalletPasswordOnWebOnboarding)
|
||||
cfg.WriteWalletPasswordOnWebOnboarding = true
|
||||
}
|
||||
if ctx.Bool(attestTimely.Name) {
|
||||
logEnabled(attestTimely)
|
||||
cfg.AttestTimely = true
|
||||
cfg.AttestTimely = true
|
||||
if ctx.Bool(disableAttestTimely.Name) {
|
||||
logEnabled(disableAttestTimely)
|
||||
cfg.AttestTimely = false
|
||||
}
|
||||
if ctx.Bool(enableSlashingProtectionPruning.Name) {
|
||||
logEnabled(enableSlashingProtectionPruning)
|
||||
@@ -344,6 +347,11 @@ func ConfigureValidator(ctx *cli.Context) error {
|
||||
logEnabled(EnableWebFlag)
|
||||
cfg.EnableWeb = true
|
||||
}
|
||||
if ctx.Bool(SSZOnly.Name) {
|
||||
logEnabled(SSZOnly)
|
||||
cfg.SSZOnly = true
|
||||
}
|
||||
|
||||
cfg.KeystoreImportDebounceInterval = ctx.Duration(dynamicKeyReloadDebounceInterval.Name)
|
||||
Init(cfg)
|
||||
return nil
|
||||
|
||||
@@ -98,6 +98,11 @@ var (
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedAttestTimely = &cli.BoolFlag{
|
||||
Name: "attest-timely",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
)
|
||||
|
||||
// Deprecated flags for both the beacon node and validator client.
|
||||
@@ -118,6 +123,7 @@ var deprecatedFlags = []cli.Flag{
|
||||
deprecatedEnableCommitteeAwarePacking,
|
||||
deprecatedInteropGenesisTimeFlag,
|
||||
deprecatedEnableQuic,
|
||||
deprecatedAttestTimely,
|
||||
}
|
||||
|
||||
var upcomingDeprecation = []cli.Flag{
|
||||
|
||||
@@ -91,9 +91,9 @@ var (
|
||||
Name: "disable-broadcast-slashings",
|
||||
Usage: "Disables broadcasting slashings submitted to the beacon node.",
|
||||
}
|
||||
attestTimely = &cli.BoolFlag{
|
||||
Name: "attest-timely",
|
||||
Usage: "Fixes validator can attest timely after current block processes. See #8185 for more details.",
|
||||
disableAttestTimely = &cli.BoolFlag{
|
||||
Name: "disable-attest-timely",
|
||||
Usage: "Disable validator attesting timely after current block processes. See #8185 for more details.",
|
||||
}
|
||||
enableSlashingProtectionPruning = &cli.BoolFlag{
|
||||
Name: "enable-slashing-protection-history-pruning",
|
||||
@@ -201,6 +201,12 @@ var (
|
||||
Usage: "(Work in progress): Enables the web portal for the validator client.",
|
||||
Value: false,
|
||||
}
|
||||
|
||||
// SSZOnly forces the validator client to use SSZ for communication with the beacon node when REST mode is enabled
|
||||
SSZOnly = &cli.BoolFlag{
|
||||
Name: "ssz-only",
|
||||
Usage: "(debug): Forces the validator client to use SSZ for communication with the beacon node when REST mode is enabled",
|
||||
}
|
||||
)
|
||||
|
||||
// devModeFlags holds list of flags that are set when development mode is on.
|
||||
@@ -216,13 +222,14 @@ var ValidatorFlags = append(deprecatedFlags, []cli.Flag{
|
||||
HoodiTestnet,
|
||||
Mainnet,
|
||||
dynamicKeyReloadDebounceInterval,
|
||||
attestTimely,
|
||||
disableAttestTimely,
|
||||
enableSlashingProtectionPruning,
|
||||
EnableMinimalSlashingProtection,
|
||||
enableDoppelGangerProtection,
|
||||
EnableBeaconRESTApi,
|
||||
EnableDutiesV2,
|
||||
EnableWebFlag,
|
||||
SSZOnly,
|
||||
}...)
|
||||
|
||||
// E2EValidatorFlags contains a list of the validator feature flags to be tested in E2E.
|
||||
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
"mainnet_config.go",
|
||||
"minimal_config.go",
|
||||
"network_config.go",
|
||||
"testnet_custom_network_config.go",
|
||||
"testnet_e2e_config.go",
|
||||
"testnet_holesky_config.go",
|
||||
"testnet_hoodi_config.go",
|
||||
|
||||
@@ -268,7 +268,7 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
BytesPerLogsBloom: 256,
|
||||
MaxExtraDataBytes: 32,
|
||||
EthBurnAddressHex: "0x0000000000000000000000000000000000000000",
|
||||
DefaultBuilderGasLimit: uint64(36000000),
|
||||
DefaultBuilderGasLimit: uint64(45000000),
|
||||
|
||||
// Mevboost circuit breaker
|
||||
MaxBuilderConsecutiveMissedSlots: 3,
|
||||
|
||||
9
config/params/testnet_custom_network_config.go
Normal file
9
config/params/testnet_custom_network_config.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package params
|
||||
|
||||
func UseCustomNetworkConfig() {
|
||||
cfg := BeaconNetworkConfig().Copy()
|
||||
cfg.ContractDeploymentBlock = 0
|
||||
cfg.BootstrapNodes = []string{}
|
||||
|
||||
OverrideBeaconNetworkConfig(cfg)
|
||||
}
|
||||
@@ -4,7 +4,7 @@
|
||||
"fee_recipient": "0x50155530FCE8a85ec7055A5F8b2bE214B3DaeFd3",
|
||||
"builder": {
|
||||
"enabled": true,
|
||||
"gas_limit": "36000000"
|
||||
"gas_limit": "45000000"
|
||||
}
|
||||
},
|
||||
"0xb057816155ad77931185101128655c0191bd0214c201ca48ed887f6c4c6adf334070efcd75140eada5ac83a92506dd7b": {
|
||||
|
||||
@@ -9,4 +9,4 @@ default_config:
|
||||
fee_recipient: '0x6e35733c5af9B61374A128e6F85f553aF09ff89A'
|
||||
builder:
|
||||
enabled: false
|
||||
gas_limit: '36000000'
|
||||
gas_limit: '45000000'
|
||||
@@ -2,7 +2,6 @@ package blocks
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
consensus_types "github.com/OffchainLabs/prysm/v6/consensus-types"
|
||||
@@ -10,6 +9,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/ssz"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
"github.com/pkg/errors"
|
||||
fastssz "github.com/prysmaticlabs/fastssz"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
@@ -40,8 +40,10 @@ func NewWrappedExecutionData(v proto.Message) (interfaces.ExecutionData, error)
|
||||
case *enginev1.ExecutionBundleElectra:
|
||||
// note: no payload changes in electra so using deneb
|
||||
return WrappedExecutionPayloadDeneb(pbStruct.Payload)
|
||||
case *enginev1.ExecutionBundleFulu:
|
||||
return WrappedExecutionPayloadDeneb(pbStruct.Payload)
|
||||
default:
|
||||
return nil, ErrUnsupportedVersion
|
||||
return nil, errors.Wrapf(ErrUnsupportedVersion, "type %T", pbStruct)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@@ -12,7 +13,7 @@ import (
|
||||
// GetPayloadResponseV(1|2|3|4) value.
|
||||
type GetPayloadResponse struct {
|
||||
ExecutionData interfaces.ExecutionData
|
||||
BlobsBundle *pb.BlobsBundle
|
||||
BlobsBundler pb.BlobsBundler
|
||||
OverrideBuilder bool
|
||||
// todo: should we convert this to Gwei up front?
|
||||
Bid primitives.Wei
|
||||
@@ -24,6 +25,10 @@ type bundleGetter interface {
|
||||
GetBlobsBundle() *pb.BlobsBundle
|
||||
}
|
||||
|
||||
type bundleV2Getter interface {
|
||||
GetBlobsBundle() *pb.BlobsBundleV2
|
||||
}
|
||||
|
||||
// bidValueGetter is an interface satisfied by get payload responses that have a bid value.
|
||||
type bidValueGetter interface {
|
||||
GetValue() []byte
|
||||
@@ -41,10 +46,13 @@ func NewGetPayloadResponse(msg proto.Message) (*GetPayloadResponse, error) {
|
||||
r := &GetPayloadResponse{}
|
||||
bundleGetter, hasBundle := msg.(bundleGetter)
|
||||
if hasBundle {
|
||||
r.BlobsBundle = bundleGetter.GetBlobsBundle()
|
||||
r.BlobsBundler = bundleGetter.GetBlobsBundle()
|
||||
}
|
||||
bundleV2Getter, hasBundle := msg.(bundleV2Getter)
|
||||
if hasBundle {
|
||||
r.BlobsBundler = bundleV2Getter.GetBlobsBundle()
|
||||
}
|
||||
bidValueGetter, hasBid := msg.(bidValueGetter)
|
||||
executionRequestsGetter, hasExecutionRequests := msg.(executionRequestsGetter)
|
||||
wei := primitives.ZeroWei()
|
||||
if hasBid {
|
||||
// The protobuf types that engine api responses unmarshal into store their values in little endian form.
|
||||
@@ -60,13 +68,15 @@ func NewGetPayloadResponse(msg proto.Message) (*GetPayloadResponse, error) {
|
||||
}
|
||||
ed, err := NewWrappedExecutionData(msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "new wrapped execution data")
|
||||
}
|
||||
r.ExecutionData = ed
|
||||
|
||||
executionRequestsGetter, hasExecutionRequests := msg.(executionRequestsGetter)
|
||||
if hasExecutionRequests {
|
||||
requests, err := executionRequestsGetter.GetDecodedExecutionRequests(params.BeaconConfig().ExecutionRequestLimits())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "get decoded execution requests")
|
||||
}
|
||||
r.ExecutionRequests = requests
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@@ -83,7 +84,7 @@ func (h *bootstrapAltair) SizeSSZ() int {
|
||||
}
|
||||
|
||||
func (h *bootstrapAltair) Version() int {
|
||||
return version.Altair
|
||||
return slots.ToForkVersion(h.header.Beacon().Slot)
|
||||
}
|
||||
|
||||
func (h *bootstrapAltair) Proto() proto.Message {
|
||||
@@ -188,7 +189,7 @@ func (h *bootstrapCapella) SizeSSZ() int {
|
||||
}
|
||||
|
||||
func (h *bootstrapCapella) Version() int {
|
||||
return version.Capella
|
||||
return slots.ToForkVersion(h.header.Beacon().Slot)
|
||||
}
|
||||
|
||||
func (h *bootstrapCapella) Proto() proto.Message {
|
||||
@@ -293,7 +294,7 @@ func (h *bootstrapDeneb) SizeSSZ() int {
|
||||
}
|
||||
|
||||
func (h *bootstrapDeneb) Version() int {
|
||||
return version.Deneb
|
||||
return slots.ToForkVersion(h.header.Beacon().Slot)
|
||||
}
|
||||
|
||||
func (h *bootstrapDeneb) Proto() proto.Message {
|
||||
@@ -398,7 +399,7 @@ func (h *bootstrapElectra) SizeSSZ() int {
|
||||
}
|
||||
|
||||
func (h *bootstrapElectra) Version() int {
|
||||
return version.Electra
|
||||
return slots.ToForkVersion(h.header.Beacon().Slot)
|
||||
}
|
||||
|
||||
func (h *bootstrapElectra) Proto() proto.Message {
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@@ -174,7 +174,7 @@ func (u *finalityUpdateAltair) Proto() proto.Message {
|
||||
}
|
||||
|
||||
func (u *finalityUpdateAltair) Version() int {
|
||||
return version.Altair
|
||||
return slots.ToForkVersion(u.attestedHeader.Beacon().Slot)
|
||||
}
|
||||
|
||||
func (u *finalityUpdateAltair) AttestedHeader() interfaces.LightClientHeader {
|
||||
@@ -286,7 +286,7 @@ func (u *finalityUpdateCapella) Proto() proto.Message {
|
||||
}
|
||||
|
||||
func (u *finalityUpdateCapella) Version() int {
|
||||
return version.Capella
|
||||
return slots.ToForkVersion(u.attestedHeader.Beacon().Slot)
|
||||
}
|
||||
|
||||
func (u *finalityUpdateCapella) AttestedHeader() interfaces.LightClientHeader {
|
||||
@@ -398,7 +398,7 @@ func (u *finalityUpdateDeneb) Proto() proto.Message {
|
||||
}
|
||||
|
||||
func (u *finalityUpdateDeneb) Version() int {
|
||||
return version.Deneb
|
||||
return slots.ToForkVersion(u.attestedHeader.Beacon().Slot)
|
||||
}
|
||||
|
||||
func (u *finalityUpdateDeneb) AttestedHeader() interfaces.LightClientHeader {
|
||||
@@ -511,7 +511,7 @@ func (u *finalityUpdateElectra) Proto() proto.Message {
|
||||
}
|
||||
|
||||
func (u *finalityUpdateElectra) Version() int {
|
||||
return version.Electra
|
||||
return slots.ToForkVersion(u.attestedHeader.Beacon().Slot)
|
||||
}
|
||||
|
||||
func (u *finalityUpdateElectra) AttestedHeader() interfaces.LightClientHeader {
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@@ -139,7 +139,7 @@ func (u *optimisticUpdateAltair) Proto() proto.Message {
|
||||
}
|
||||
|
||||
func (u *optimisticUpdateAltair) Version() int {
|
||||
return version.Altair
|
||||
return slots.ToForkVersion(u.attestedHeader.Beacon().Slot)
|
||||
}
|
||||
|
||||
func (u *optimisticUpdateAltair) AttestedHeader() interfaces.LightClientHeader {
|
||||
@@ -223,7 +223,7 @@ func (u *optimisticUpdateCapella) Proto() proto.Message {
|
||||
}
|
||||
|
||||
func (u *optimisticUpdateCapella) Version() int {
|
||||
return version.Capella
|
||||
return slots.ToForkVersion(u.attestedHeader.Beacon().Slot)
|
||||
}
|
||||
|
||||
func (u *optimisticUpdateCapella) AttestedHeader() interfaces.LightClientHeader {
|
||||
@@ -307,7 +307,7 @@ func (u *optimisticUpdateDeneb) Proto() proto.Message {
|
||||
}
|
||||
|
||||
func (u *optimisticUpdateDeneb) Version() int {
|
||||
return version.Deneb
|
||||
return slots.ToForkVersion(u.attestedHeader.Beacon().Slot)
|
||||
}
|
||||
|
||||
func (u *optimisticUpdateDeneb) AttestedHeader() interfaces.LightClientHeader {
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@@ -105,7 +106,7 @@ func (u *updateAltair) Proto() proto.Message {
|
||||
}
|
||||
|
||||
func (u *updateAltair) Version() int {
|
||||
return version.Altair
|
||||
return slots.ToForkVersion(u.attestedHeader.Beacon().Slot)
|
||||
}
|
||||
|
||||
func (u *updateAltair) AttestedHeader() interfaces.LightClientHeader {
|
||||
@@ -272,7 +273,7 @@ func (u *updateCapella) Proto() proto.Message {
|
||||
}
|
||||
|
||||
func (u *updateCapella) Version() int {
|
||||
return version.Capella
|
||||
return slots.ToForkVersion(u.attestedHeader.Beacon().Slot)
|
||||
}
|
||||
|
||||
func (u *updateCapella) AttestedHeader() interfaces.LightClientHeader {
|
||||
@@ -439,7 +440,7 @@ func (u *updateDeneb) Proto() proto.Message {
|
||||
}
|
||||
|
||||
func (u *updateDeneb) Version() int {
|
||||
return version.Deneb
|
||||
return slots.ToForkVersion(u.attestedHeader.Beacon().Slot)
|
||||
}
|
||||
|
||||
func (u *updateDeneb) AttestedHeader() interfaces.LightClientHeader {
|
||||
@@ -607,7 +608,7 @@ func (u *updateElectra) Proto() proto.Message {
|
||||
}
|
||||
|
||||
func (u *updateElectra) Version() int {
|
||||
return version.Electra
|
||||
return slots.ToForkVersion(u.attestedHeader.Beacon().Slot)
|
||||
}
|
||||
|
||||
func (u *updateElectra) AttestedHeader() interfaces.LightClientHeader {
|
||||
|
||||
@@ -2,14 +2,13 @@
|
||||
# Common
|
||||
##############################################################################
|
||||
|
||||
load("@rules_proto//proto:defs.bzl", "proto_library")
|
||||
|
||||
##############################################################################
|
||||
# Go
|
||||
##############################################################################
|
||||
# gazelle:ignore
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
|
||||
load("@rules_proto//proto:defs.bzl", "proto_library")
|
||||
load("//proto:ssz_proto_library.bzl", "ssz_proto_files")
|
||||
load("//tools:ssz.bzl", "SSZ_DEPS", "ssz_gen_marshal")
|
||||
|
||||
@@ -189,6 +188,7 @@ ssz_fulu_objs = [
|
||||
"DataColumnIdentifier",
|
||||
"DataColumnsByRootIdentifier",
|
||||
"DataColumnSidecar",
|
||||
"StatusV2",
|
||||
"SignedBeaconBlockContentsFulu",
|
||||
"SignedBeaconBlockFulu",
|
||||
"SignedBlindedBeaconBlockFulu",
|
||||
@@ -359,15 +359,17 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = SSZ_DEPS + [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/ext:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_golang_protobuf//proto:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library", # keep
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@googleapis//google/api:annotations_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:descriptor_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
|
||||
@@ -382,8 +384,6 @@ go_library(
|
||||
"@org_golang_google_protobuf//runtime/protoimpl:go_default_library",
|
||||
"@org_golang_google_protobuf//types/descriptorpb:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -5,6 +5,12 @@ import (
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
)
|
||||
|
||||
// GenericConverter defines any struct that can be converted to a generic beacon block.
|
||||
// We assume all your versioned block structs implement this method.
|
||||
type GenericConverter interface {
|
||||
ToGeneric() (*GenericBeaconBlock, error)
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Phase 0
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
125
proto/prysm/v1alpha1/data_columns.pb.go
generated
125
proto/prysm/v1alpha1/data_columns.pb.go
generated
@@ -109,61 +109,6 @@ func (x *DataColumnSidecar) GetKzgCommitmentsInclusionProof() [][]byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
type DataColumnIdentifier struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
BlockRoot []byte `protobuf:"bytes,1,opt,name=block_root,json=blockRoot,proto3" json:"block_root,omitempty" ssz-size:"32"`
|
||||
Index uint64 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DataColumnIdentifier) Reset() {
|
||||
*x = DataColumnIdentifier{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_prysm_v1alpha1_data_columns_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DataColumnIdentifier) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DataColumnIdentifier) ProtoMessage() {}
|
||||
|
||||
func (x *DataColumnIdentifier) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_data_columns_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DataColumnIdentifier.ProtoReflect.Descriptor instead.
|
||||
func (*DataColumnIdentifier) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_data_columns_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *DataColumnIdentifier) GetBlockRoot() []byte {
|
||||
if x != nil {
|
||||
return x.BlockRoot
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *DataColumnIdentifier) GetIndex() uint64 {
|
||||
if x != nil {
|
||||
return x.Index
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type DataColumnsByRootIdentifier struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
@@ -176,7 +121,7 @@ type DataColumnsByRootIdentifier struct {
|
||||
func (x *DataColumnsByRootIdentifier) Reset() {
|
||||
*x = DataColumnsByRootIdentifier{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_prysm_v1alpha1_data_columns_proto_msgTypes[2]
|
||||
mi := &file_proto_prysm_v1alpha1_data_columns_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -189,7 +134,7 @@ func (x *DataColumnsByRootIdentifier) String() string {
|
||||
func (*DataColumnsByRootIdentifier) ProtoMessage() {}
|
||||
|
||||
func (x *DataColumnsByRootIdentifier) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_data_columns_proto_msgTypes[2]
|
||||
mi := &file_proto_prysm_v1alpha1_data_columns_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -202,7 +147,7 @@ func (x *DataColumnsByRootIdentifier) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use DataColumnsByRootIdentifier.ProtoReflect.Descriptor instead.
|
||||
func (*DataColumnsByRootIdentifier) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_data_columns_proto_rawDescGZIP(), []int{2}
|
||||
return file_proto_prysm_v1alpha1_data_columns_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *DataColumnsByRootIdentifier) GetBlockRoot() []byte {
|
||||
@@ -253,29 +198,24 @@ var file_proto_prysm_v1alpha1_data_columns_proto_rawDesc = []byte{
|
||||
0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x06, 0x20,
|
||||
0x03, 0x28, 0x0c, 0x42, 0x08, 0x8a, 0xb5, 0x18, 0x04, 0x34, 0x2c, 0x33, 0x32, 0x52, 0x1c, 0x6b,
|
||||
0x7a, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x49, 0x6e, 0x63,
|
||||
0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0x53, 0x0a, 0x14, 0x44,
|
||||
0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66,
|
||||
0x69, 0x65, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x72, 0x6f, 0x6f,
|
||||
0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52,
|
||||
0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e,
|
||||
0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78,
|
||||
0x22, 0x67, 0x0a, 0x1b, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x42,
|
||||
0x79, 0x52, 0x6f, 0x6f, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12,
|
||||
0x25, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, 0x62, 0x6c, 0x6f,
|
||||
0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x21, 0x0a, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e,
|
||||
0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x04, 0x42, 0x07, 0x92, 0xb5, 0x18, 0x03, 0x31, 0x32, 0x38,
|
||||
0x52, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x42, 0x9a, 0x01, 0x0a, 0x19, 0x6f, 0x72,
|
||||
0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76,
|
||||
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x10, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c,
|
||||
0x75, 0x6d, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74,
|
||||
0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e,
|
||||
0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
|
||||
0x61, 0x31, 0x3b, 0x65, 0x74, 0x68, 0xaa, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75,
|
||||
0x6d, 0x2e, 0x45, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02,
|
||||
0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c, 0x45, 0x74, 0x68, 0x5c, 0x76, 0x31,
|
||||
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0x67, 0x0a, 0x1b, 0x44,
|
||||
0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x42, 0x79, 0x52, 0x6f, 0x6f, 0x74,
|
||||
0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c,
|
||||
0x6f, 0x63, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06,
|
||||
0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6f, 0x6f,
|
||||
0x74, 0x12, 0x21, 0x0a, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03,
|
||||
0x28, 0x04, 0x42, 0x07, 0x92, 0xb5, 0x18, 0x03, 0x31, 0x32, 0x38, 0x52, 0x07, 0x63, 0x6f, 0x6c,
|
||||
0x75, 0x6d, 0x6e, 0x73, 0x42, 0x9a, 0x01, 0x0a, 0x19, 0x6f, 0x72, 0x67, 0x2e, 0x65, 0x74, 0x68,
|
||||
0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
|
||||
0x61, 0x31, 0x42, 0x10, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x50,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
|
||||
0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f,
|
||||
0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70,
|
||||
0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x74,
|
||||
0x68, 0xaa, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x74, 0x68,
|
||||
0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65,
|
||||
0x72, 0x65, 0x75, 0x6d, 0x5c, 0x45, 0x74, 0x68, 0x5c, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
|
||||
0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -290,15 +230,14 @@ func file_proto_prysm_v1alpha1_data_columns_proto_rawDescGZIP() []byte {
|
||||
return file_proto_prysm_v1alpha1_data_columns_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_proto_prysm_v1alpha1_data_columns_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
|
||||
var file_proto_prysm_v1alpha1_data_columns_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_proto_prysm_v1alpha1_data_columns_proto_goTypes = []interface{}{
|
||||
(*DataColumnSidecar)(nil), // 0: ethereum.eth.v1alpha1.DataColumnSidecar
|
||||
(*DataColumnIdentifier)(nil), // 1: ethereum.eth.v1alpha1.DataColumnIdentifier
|
||||
(*DataColumnsByRootIdentifier)(nil), // 2: ethereum.eth.v1alpha1.DataColumnsByRootIdentifier
|
||||
(*SignedBeaconBlockHeader)(nil), // 3: ethereum.eth.v1alpha1.SignedBeaconBlockHeader
|
||||
(*DataColumnsByRootIdentifier)(nil), // 1: ethereum.eth.v1alpha1.DataColumnsByRootIdentifier
|
||||
(*SignedBeaconBlockHeader)(nil), // 2: ethereum.eth.v1alpha1.SignedBeaconBlockHeader
|
||||
}
|
||||
var file_proto_prysm_v1alpha1_data_columns_proto_depIdxs = []int32{
|
||||
3, // 0: ethereum.eth.v1alpha1.DataColumnSidecar.signed_block_header:type_name -> ethereum.eth.v1alpha1.SignedBeaconBlockHeader
|
||||
2, // 0: ethereum.eth.v1alpha1.DataColumnSidecar.signed_block_header:type_name -> ethereum.eth.v1alpha1.SignedBeaconBlockHeader
|
||||
1, // [1:1] is the sub-list for method output_type
|
||||
1, // [1:1] is the sub-list for method input_type
|
||||
1, // [1:1] is the sub-list for extension type_name
|
||||
@@ -326,18 +265,6 @@ func file_proto_prysm_v1alpha1_data_columns_proto_init() {
|
||||
}
|
||||
}
|
||||
file_proto_prysm_v1alpha1_data_columns_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DataColumnIdentifier); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_proto_prysm_v1alpha1_data_columns_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DataColumnsByRootIdentifier); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
@@ -356,7 +283,7 @@ func file_proto_prysm_v1alpha1_data_columns_proto_init() {
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_proto_prysm_v1alpha1_data_columns_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 3,
|
||||
NumMessages: 2,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user