Compare commits

...

10 Commits

Author SHA1 Message Date
terence tsao
0d4f8a2af4 Implement KZG proof batch verification for data column gossip validation 2025-08-20 10:44:07 -07:00
terence tsao
3d4f3cd973 Create gloas.proto 2025-08-18 06:58:42 -07:00
terence
6528fb9cea Update consensus spec to v1.6.0-alpha.4 and implement data column support (#15590)
* Update consensus spec to v1.6.0-alpha.4 and implement data column support for forkchoice spectests

* Apply suggestion from @prestonvanloon

Co-Authored-By: Preston Van Loon <pvanloon@offchainlabs.com>

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2025-08-16 15:49:12 +00:00
terence
5021131811 Fix NewSignedBeaconBlock calls to use Block field for equivocation handling (#15595) 2025-08-16 14:19:11 +00:00
kasey
26cec9d9c7 omit NetworkScheduleEntry fields that are not part of BlobScheduleEntry (#15557)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2025-08-14 22:45:47 +00:00
Justin Traglia
4ed90a02ef Rename various variables/functions to be more clear (#15529)
* Rename various variables/functions to be more clear

* Add changelog fragment

---------

Co-authored-by: Bastin <43618253+Inspector-Butters@users.noreply.github.com>
2025-08-14 11:06:22 +00:00
trinadh61
7d528c75bb adding user agent validator beacon client (#15574)
* adding user agent validator beacon client

* Update runtime/version/version.go

Co-authored-by: Bastin <43618253+Inspector-Butters@users.noreply.github.com>

* test cases

* contribution readme

* setting user agent to build version data

---------

Co-authored-by: Bastin <43618253+Inspector-Butters@users.noreply.github.com>
2025-08-13 21:36:17 +00:00
Preston Van Loon
e7b2953d5a Address out of bounds concern in beacon-chain/core/peerdas/das_core.go (#15586) 2025-08-13 15:07:26 +00:00
Muzry
acf35e849e Update endpoint to return 404 after isOptimistic check (#15559)
* Update endpoint to return 404 after isOptimistic check

* Fix error handling by using predefined errors

* fix: helpers build.bazel

* remove the StateIdDecodeError

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2025-08-13 14:40:20 +00:00
Preston Van Loon
c826d334a1 Add missing Fulu block type in stream handler (grpc StreamBlocksAltair) (#15583) 2025-08-12 22:26:26 +00:00
63 changed files with 750 additions and 84 deletions

View File

@@ -253,16 +253,16 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.6.0-alpha.1"
consensus_spec_version = "v1.6.0-alpha.4"
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
consensus_spec_tests(
name = "consensus_spec_tests",
flavors = {
"general": "sha256-o4t9p3R+fQHF4KOykGmwlG3zDw5wUdVWprkzId8aIsk=",
"minimal": "sha256-sU7ToI8t3MR8x0vVjC8ERmAHZDWpEmnAC9FWIpHi5x4=",
"mainnet": "sha256-YKS4wngg0LgI9Upp4MYJ77aG+8+e/G4YeqEIlp06LZw=",
"general": "sha256-MaN4zu3o0vWZypUHS5r4D8WzJF4wANoadM8qm6iyDs4=",
"minimal": "sha256-aZGNPp/bBvJgq3Wf6vyR0H6G3DOkbSuggEmOL4jEmtg=",
"mainnet": "sha256-C7jjosvpzUgw3GPajlsWBV02ZbkZ5Uv4ikmOqfDGajI=",
},
version = consensus_spec_version,
)
@@ -278,7 +278,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-Nv4TEuEJPQIM4E6T9J0FOITsmappmXZjGtlhe1HEXnU=",
integrity = "sha256-qreawRS77l8CebiNww8z727qUItw7KlHY1Xqj7IrPdk=",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -584,7 +584,7 @@ func (s *Service) runLateBlockTasks() {
// It returns a map where each key represents a missing BlobSidecar index.
// An empty map means we have all indices; a non-empty map can be used to compare incoming
// BlobSidecars against the set of known missing sidecars.
func missingBlobIndices(bs *filesystem.BlobStorage, root [fieldparams.RootLength]byte, expected [][]byte, slot primitives.Slot) (map[uint64]bool, error) {
func missingBlobIndices(store *filesystem.BlobStorage, root [fieldparams.RootLength]byte, expected [][]byte, slot primitives.Slot) (map[uint64]bool, error) {
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
if len(expected) == 0 {
return nil, nil
@@ -592,7 +592,7 @@ func missingBlobIndices(bs *filesystem.BlobStorage, root [fieldparams.RootLength
if len(expected) > maxBlobsPerBlock {
return nil, errMaxBlobsExceeded
}
indices := bs.Summary(root)
indices := store.Summary(root)
missing := make(map[uint64]bool, len(expected))
for i := range expected {
if len(expected[i]) > 0 && !indices.HasIndex(uint64(i)) {
@@ -607,7 +607,7 @@ func missingBlobIndices(bs *filesystem.BlobStorage, root [fieldparams.RootLength
// It returns a map where each key represents a missing DataColumnSidecar index.
// An empty map means we have all indices; a non-empty map can be used to compare incoming
// DataColumns against the set of known missing sidecars.
func missingDataColumnIndices(bs *filesystem.DataColumnStorage, root [fieldparams.RootLength]byte, expected map[uint64]bool) (map[uint64]bool, error) {
func missingDataColumnIndices(store *filesystem.DataColumnStorage, root [fieldparams.RootLength]byte, expected map[uint64]bool) (map[uint64]bool, error) {
if len(expected) == 0 {
return nil, nil
}
@@ -619,7 +619,7 @@ func missingDataColumnIndices(bs *filesystem.DataColumnStorage, root [fieldparam
}
// Get a summary of the data columns stored in the database.
summary := bs.Summary(root)
summary := store.Summary(root)
// Check all expected data columns against the summary.
missing := make(map[uint64]bool)
@@ -717,7 +717,7 @@ func (s *Service) areDataColumnsAvailable(
summary := s.dataColumnStorage.Summary(root)
storedDataColumnsCount := summary.Count()
minimumColumnCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
minimumColumnCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
// As soon as we have enough data column sidecars, we can reconstruct the missing ones.
// We don't need to wait for the rest of the data columns to declare the block as available.
@@ -820,7 +820,7 @@ func (s *Service) areDataColumnsAvailable(
missingIndices = uint64MapToSortedSlice(missingMap)
}
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing %v", block.Slot(), root, missingIndices)
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing: %v", block.Slot(), root, missingIndices)
}
}
}

View File

@@ -2889,7 +2889,7 @@ func TestIsDataAvailable(t *testing.T) {
})
t.Run("Fulu - more than half of the columns in custody", func(t *testing.T) {
minimumColumnsCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
indices := make([]uint64, 0, minimumColumnsCountToReconstruct)
for i := range minimumColumnsCountToReconstruct {
indices = append(indices, i)
@@ -2974,7 +2974,7 @@ func TestIsDataAvailable(t *testing.T) {
startWaiting := make(chan bool)
minimumColumnsCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
indices := make([]uint64, 0, minimumColumnsCountToReconstruct-missingColumns)
for i := range minimumColumnsCountToReconstruct - missingColumns {

View File

@@ -17,7 +17,7 @@ func (s *Service) ReceiveDataColumns(dataColumnSidecars []blocks.VerifiedRODataC
// ReceiveDataColumn receives a single data column.
func (s *Service) ReceiveDataColumn(dataColumnSidecar blocks.VerifiedRODataColumn) error {
if err := s.dataColumnStorage.Save([]blocks.VerifiedRODataColumn{dataColumnSidecar}); err != nil {
return errors.Wrap(err, "save data column sidecars")
return errors.Wrap(err, "save data column sidecar")
}
return nil

View File

@@ -89,7 +89,7 @@ func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
return nil
}
func (mb *mockBroadcaster) BroadcastDataColumn(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
func (mb *mockBroadcaster) BroadcastDataColumnSidecar(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
mb.broadcastCalled = true
return nil
}

View File

@@ -223,6 +223,14 @@ func dataColumnsSidecars(
cellsForRow := cellsAndProofs[rowIndex].Cells
proofsForRow := cellsAndProofs[rowIndex].Proofs
// Validate that we have enough cells and proofs for this column index
if columnIndex >= uint64(len(cellsForRow)) {
return nil, errors.Errorf("column index %d exceeds cells length %d for blob %d", columnIndex, len(cellsForRow), rowIndex)
}
if columnIndex >= uint64(len(proofsForRow)) {
return nil, errors.Errorf("column index %d exceeds proofs length %d for blob %d", columnIndex, len(proofsForRow), rowIndex)
}
cell := cellsForRow[columnIndex]
column = append(column, cell)

View File

@@ -67,6 +67,55 @@ func TestDataColumnSidecars(t *testing.T) {
_, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
require.ErrorIs(t, err, peerdas.ErrSizeMismatch)
})
t.Run("cells array too short for column index", func(t *testing.T) {
// Create a Fulu block with a blob commitment.
signedBeaconBlockPb := util.NewBeaconBlockFulu()
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, 48)}
// Create a signed beacon block from the protobuf.
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
require.NoError(t, err)
// Create cells and proofs with insufficient cells for the number of columns.
// This simulates a scenario where cellsAndProofs has fewer cells than expected columns.
cellsAndProofs := []kzg.CellsAndProofs{
{
Cells: make([]kzg.Cell, 10), // Only 10 cells
Proofs: make([]kzg.Proof, 10), // Only 10 proofs
},
}
// This should fail because the function will try to access columns up to NumberOfColumns
// but we only have 10 cells/proofs.
_, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
require.ErrorContains(t, "column index", err)
require.ErrorContains(t, "exceeds cells length", err)
})
t.Run("proofs array too short for column index", func(t *testing.T) {
// Create a Fulu block with a blob commitment.
signedBeaconBlockPb := util.NewBeaconBlockFulu()
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, 48)}
// Create a signed beacon block from the protobuf.
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
require.NoError(t, err)
// Create cells and proofs with sufficient cells but insufficient proofs.
numberOfColumns := params.BeaconConfig().NumberOfColumns
cellsAndProofs := []kzg.CellsAndProofs{
{
Cells: make([]kzg.Cell, numberOfColumns),
Proofs: make([]kzg.Proof, 5), // Only 5 proofs, less than columns
},
}
// This should fail when trying to access proof beyond index 4.
_, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
require.ErrorContains(t, "column index", err)
require.ErrorContains(t, "exceeds proofs length", err)
})
}
func TestComputeCustodyGroupForColumn(t *testing.T) {

View File

@@ -18,8 +18,8 @@ var (
ErrBlobsCellsProofsMismatch = errors.New("blobs and cells proofs mismatch")
)
// MinimumColumnsCountToReconstruct return the minimum number of columns needed to proceed to a reconstruction.
func MinimumColumnsCountToReconstruct() uint64 {
// MinimumColumnCountToReconstruct return the minimum number of columns needed to proceed to a reconstruction.
func MinimumColumnCountToReconstruct() uint64 {
// If the number of columns is odd, then we need total / 2 + 1 columns to reconstruct.
// If the number of columns is even, then we need total / 2 columns to reconstruct.
return (params.BeaconConfig().NumberOfColumns + 1) / 2
@@ -58,7 +58,7 @@ func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataC
// Check if there is enough sidecars to reconstruct the missing columns.
sidecarCount := len(sidecarByIndex)
if uint64(sidecarCount) < MinimumColumnsCountToReconstruct() {
if uint64(sidecarCount) < MinimumColumnCountToReconstruct() {
return nil, ErrNotEnoughDataColumnSidecars
}

View File

@@ -48,7 +48,7 @@ func TestMinimumColumnsCountToReconstruct(t *testing.T) {
params.OverrideBeaconConfig(cfg)
// Compute the minimum number of columns needed to reconstruct.
actual := peerdas.MinimumColumnsCountToReconstruct()
actual := peerdas.MinimumColumnCountToReconstruct()
require.Equal(t, tc.expected, actual)
})
}
@@ -100,7 +100,7 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
t.Run("not enough columns to enable reconstruction", func(t *testing.T) {
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3)
minimum := peerdas.MinimumColumnsCountToReconstruct()
minimum := peerdas.MinimumColumnCountToReconstruct()
_, err := peerdas.ReconstructDataColumnSidecars(verifiedRoSidecars[:minimum-1])
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
})

View File

@@ -822,6 +822,7 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
regularsync.WithSlasherEnabled(b.slasherEnabled),
regularsync.WithLightClientStore(b.lcStore),
regularsync.WithBatchVerifierLimit(b.cliCtx.Int(flags.BatchVerifierLimit.Name)),
regularsync.WithKzgBatchVerifierLimit(b.cliCtx.Int(flags.KzgBatchVerifierLimit.Name)),
)
return b.services.RegisterService(rs)
}

View File

@@ -305,15 +305,15 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
return nil
}
// BroadcastDataColumn broadcasts a data column to the p2p network, the message is assumed to be
// BroadcastDataColumnSidecar broadcasts a data column to the p2p network, the message is assumed to be
// broadcasted to the current fork and to the input column subnet.
func (s *Service) BroadcastDataColumn(
func (s *Service) BroadcastDataColumnSidecar(
root [fieldparams.RootLength]byte,
dataColumnSubnet uint64,
dataColumnSidecar *ethpb.DataColumnSidecar,
) error {
// Add tracing to the function.
ctx, span := trace.StartSpan(s.ctx, "p2p.BroadcastDataColumn")
ctx, span := trace.StartSpan(s.ctx, "p2p.BroadcastDataColumnSidecar")
defer span.End()
// Ensure the data column sidecar is not nil.
@@ -330,12 +330,12 @@ func (s *Service) BroadcastDataColumn(
}
// Non-blocking broadcast, with attempts to discover a column subnet peer if none available.
go s.internalBroadcastDataColumn(ctx, root, dataColumnSubnet, dataColumnSidecar, forkDigest)
go s.internalBroadcastDataColumnSidecar(ctx, root, dataColumnSubnet, dataColumnSidecar, forkDigest)
return nil
}
func (s *Service) internalBroadcastDataColumn(
func (s *Service) internalBroadcastDataColumnSidecar(
ctx context.Context,
root [fieldparams.RootLength]byte,
columnSubnet uint64,
@@ -343,7 +343,7 @@ func (s *Service) internalBroadcastDataColumn(
forkDigest [fieldparams.VersionLength]byte,
) {
// Add tracing to the function.
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastDataColumn")
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastDataColumnSidecar")
defer span.End()
// Increase the number of broadcast attempts.

View File

@@ -716,7 +716,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
// Attempt to broadcast nil object should fail.
var emptyRoot [fieldparams.RootLength]byte
err = service.BroadcastDataColumn(emptyRoot, subnet, nil)
err = service.BroadcastDataColumnSidecar(emptyRoot, subnet, nil)
require.ErrorContains(t, "attempted to broadcast nil", err)
// Subscribe to the topic.
@@ -727,7 +727,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
time.Sleep(50 * time.Millisecond)
// Broadcast to peers and wait.
err = service.BroadcastDataColumn(emptyRoot, subnet, sidecar)
err = service.BroadcastDataColumnSidecar(emptyRoot, subnet, sidecar)
require.NoError(t, err)
// Receive the message.

View File

@@ -51,7 +51,7 @@ type (
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
BroadcastLightClientOptimisticUpdate(ctx context.Context, update interfaces.LightClientOptimisticUpdate) error
BroadcastLightClientFinalityUpdate(ctx context.Context, update interfaces.LightClientFinalityUpdate) error
BroadcastDataColumn(root [fieldparams.RootLength]byte, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar) error
BroadcastDataColumnSidecar(root [fieldparams.RootLength]byte, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar) error
}
// SetStreamHandler configures p2p to handle streams of a certain topic ID.

View File

@@ -167,8 +167,8 @@ func (*FakeP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interfac
return nil
}
// BroadcastDataColumn -- fake.
func (*FakeP2P) BroadcastDataColumn(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
// BroadcastDataColumnSidecar -- fake.
func (*FakeP2P) BroadcastDataColumnSidecar(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
return nil
}

View File

@@ -62,8 +62,8 @@ func (m *MockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
return nil
}
// BroadcastDataColumn broadcasts a data column for mock.
func (m *MockBroadcaster) BroadcastDataColumn([fieldparams.RootLength]byte, uint64, *ethpb.DataColumnSidecar) error {
// BroadcastDataColumnSidecar broadcasts a data column for mock.
func (m *MockBroadcaster) BroadcastDataColumnSidecar([fieldparams.RootLength]byte, uint64, *ethpb.DataColumnSidecar) error {
m.BroadcastCalled.Store(true)
return nil
}

View File

@@ -228,8 +228,8 @@ func (p *TestP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interf
return nil
}
// BroadcastDataColumn broadcasts a data column for mock.
func (p *TestP2P) BroadcastDataColumn([fieldparams.RootLength]byte, uint64, *ethpb.DataColumnSidecar) error {
// BroadcastDataColumnSidecar broadcasts a data column for mock.
func (p *TestP2P) BroadcastDataColumnSidecar([fieldparams.RootLength]byte, uint64, *ethpb.DataColumnSidecar) error {
p.BroadcastCalled.Store(true)
return nil
}

View File

@@ -72,6 +72,7 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"handlers_equivocation_test.go",
"handlers_pool_test.go",
"handlers_state_test.go",
"handlers_test.go",

View File

@@ -701,7 +701,7 @@ func (s *Server) publishBlockSSZ(ctx context.Context, w http.ResponseWriter, r *
// Validate and optionally broadcast sidecars on equivocation.
if err := s.validateBroadcast(ctx, r, genericBlock); err != nil {
if errors.Is(err, errEquivocatedBlock) {
b, err := blocks.NewSignedBeaconBlock(genericBlock)
b, err := blocks.NewSignedBeaconBlock(genericBlock.Block)
if err != nil {
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
return
@@ -855,7 +855,7 @@ func (s *Server) publishBlock(ctx context.Context, w http.ResponseWriter, r *htt
// Validate and optionally broadcast sidecars on equivocation.
if err := s.validateBroadcast(ctx, r, genericBlock); err != nil {
if errors.Is(err, errEquivocatedBlock) {
b, err := blocks.NewSignedBeaconBlock(genericBlock)
b, err := blocks.NewSignedBeaconBlock(genericBlock.Block)
if err != nil {
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
return
@@ -1220,7 +1220,7 @@ func (s *Server) GetStateFork(w http.ResponseWriter, r *http.Request) {
fork := st.Fork()
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
if err != nil {
httputil.HandleError(w, "Could not check optimistic status"+err.Error(), http.StatusInternalServerError)
helpers.HandleIsOptimisticError(w, err)
return
}
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
@@ -1331,7 +1331,7 @@ func (s *Server) GetCommittees(w http.ResponseWriter, r *http.Request) {
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
if err != nil {
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
helpers.HandleIsOptimisticError(w, err)
return
}
@@ -1512,7 +1512,7 @@ func (s *Server) GetFinalityCheckpoints(w http.ResponseWriter, r *http.Request)
}
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
if err != nil {
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
helpers.HandleIsOptimisticError(w, err)
return
}
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
@@ -1686,7 +1686,7 @@ func (s *Server) GetPendingConsolidations(w http.ResponseWriter, r *http.Request
} else {
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
if err != nil {
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
helpers.HandleIsOptimisticError(w, err)
return
}
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
@@ -1742,7 +1742,7 @@ func (s *Server) GetPendingDeposits(w http.ResponseWriter, r *http.Request) {
} else {
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
if err != nil {
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
helpers.HandleIsOptimisticError(w, err)
return
}
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
@@ -1798,7 +1798,7 @@ func (s *Server) GetPendingPartialWithdrawals(w http.ResponseWriter, r *http.Req
} else {
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
if err != nil {
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
helpers.HandleIsOptimisticError(w, err)
return
}
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
@@ -1851,7 +1851,7 @@ func (s *Server) GetProposerLookahead(w http.ResponseWriter, r *http.Request) {
} else {
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
if err != nil {
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
helpers.HandleIsOptimisticError(w, err)
return
}
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()

View File

@@ -0,0 +1,35 @@
package beacon
import (
"encoding/json"
"testing"
"github.com/OffchainLabs/prysm/v6/api/server/structs"
rpctesting "github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/shared/testing"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/testing/require"
)
// TestBlocks_NewSignedBeaconBlock_EquivocationFix tests that blocks.NewSignedBeaconBlock
// correctly handles the fixed case where genericBlock.Block is passed instead of genericBlock
func TestBlocks_NewSignedBeaconBlock_EquivocationFix(t *testing.T) {
// Parse the Phase0 JSON block
var block structs.SignedBeaconBlock
err := json.Unmarshal([]byte(rpctesting.Phase0Block), &block)
require.NoError(t, err)
// Convert to generic format
genericBlock, err := block.ToGeneric()
require.NoError(t, err)
// Test the FIX: pass genericBlock.Block instead of genericBlock
// This is what our fix changed in handlers.go line 704 and 858
_, err = blocks.NewSignedBeaconBlock(genericBlock.Block)
require.NoError(t, err, "NewSignedBeaconBlock should work with genericBlock.Block")
// Test the BROKEN version: pass genericBlock directly (this should fail)
_, err = blocks.NewSignedBeaconBlock(genericBlock)
if err == nil {
t.Errorf("NewSignedBeaconBlock should fail with whole genericBlock but succeeded")
}
}

View File

@@ -56,7 +56,7 @@ func (s *Server) GetStateRoot(w http.ResponseWriter, r *http.Request) {
}
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
if err != nil {
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
helpers.HandleIsOptimisticError(w, err)
return
}
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
@@ -125,7 +125,7 @@ func (s *Server) GetRandao(w http.ResponseWriter, r *http.Request) {
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
if err != nil {
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
helpers.HandleIsOptimisticError(w, err)
return
}
@@ -227,7 +227,7 @@ func (s *Server) GetSyncCommittees(w http.ResponseWriter, r *http.Request) {
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
if err != nil {
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
helpers.HandleIsOptimisticError(w, err)
return
}

View File

@@ -44,7 +44,7 @@ func (s *Server) GetValidators(w http.ResponseWriter, r *http.Request) {
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
if err != nil {
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
helpers.HandleIsOptimisticError(w, err)
return
}
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
@@ -222,7 +222,7 @@ func (s *Server) GetValidator(w http.ResponseWriter, r *http.Request) {
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
if err != nil {
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
helpers.HandleIsOptimisticError(w, err)
return
}
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
@@ -258,7 +258,7 @@ func (s *Server) GetValidatorBalances(w http.ResponseWriter, r *http.Request) {
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
if err != nil {
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
helpers.HandleIsOptimisticError(w, err)
return
}
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
@@ -419,7 +419,7 @@ func (s *Server) getValidatorIdentitiesJSON(
) {
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
if err != nil {
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
helpers.HandleIsOptimisticError(w, err)
return
}
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()

View File

@@ -46,7 +46,7 @@ func (s *Server) getBeaconStateV2(ctx context.Context, w http.ResponseWriter, id
isOptimistic, err := helpers.IsOptimistic(ctx, id, s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
if err != nil {
httputil.HandleError(w, "Could not check if state is optimistic: "+err.Error(), http.StatusInternalServerError)
helpers.HandleIsOptimisticError(w, err)
return
}
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()

View File

@@ -12,6 +12,7 @@ go_library(
deps = [
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/rpc/eth/shared:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
@@ -21,6 +22,7 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//encoding/bytesutil:go_default_library",
"//network/httputil:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
@@ -40,6 +42,7 @@ go_test(
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",
"//beacon-chain/rpc/testutil:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
@@ -57,5 +60,6 @@ go_test(
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -2,11 +2,14 @@ package helpers
import (
"errors"
"net/http"
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/shared"
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/lookup"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/network/httputil"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
@@ -28,6 +31,22 @@ func PrepareStateFetchGRPCError(err error) error {
return status.Errorf(codes.Internal, "Invalid state ID: %v", err)
}
// HandleIsOptimisticError handles errors from IsOptimistic function calls and writes appropriate HTTP responses.
func HandleIsOptimisticError(w http.ResponseWriter, err error) {
var fetchErr *lookup.FetchStateError
if errors.As(err, &fetchErr) {
shared.WriteStateFetchError(w, err)
return
}
var blockRootsNotFoundErr *lookup.BlockRootsNotFoundError
if errors.As(err, &blockRootsNotFoundErr) {
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusNotFound)
return
}
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
}
// IndexedVerificationFailure represents a collection of verification failures.
type IndexedVerificationFailure struct {
Failures []*SingleIndexedVerificationFailure `json:"failures"`

View File

@@ -56,7 +56,8 @@ func IsOptimistic(
if bytesutil.IsHex(stateId) {
id, err := hexutil.Decode(stateIdString)
if err != nil {
return false, err
e := lookup.NewStateIdParseError(err)
return false, &e
}
return isStateRootOptimistic(ctx, id, optimisticModeFetcher, stateFetcher, chainInfo, database)
} else if len(stateId) == 32 {
@@ -127,7 +128,7 @@ func isStateRootOptimistic(
) (bool, error) {
st, err := stateFetcher.State(ctx, stateId)
if err != nil {
return true, errors.Wrap(err, "could not fetch state")
return true, lookup.NewFetchStateError(err)
}
if st.Slot() == chainInfo.HeadSlot() {
return optimisticModeFetcher.IsOptimistic(ctx)
@@ -137,7 +138,7 @@ func isStateRootOptimistic(
return true, errors.Wrapf(err, "could not get block roots for slot %d", st.Slot())
}
if !has {
return true, errors.New("no block roots returned from the database")
return true, lookup.NewBlockRootsNotFoundError()
}
for _, r := range roots {
b, err := database.Block(ctx, r)

View File

@@ -1,12 +1,15 @@
package helpers
import (
"net/http"
"net/http/httptest"
"strconv"
"testing"
chainmock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/lookup"
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/testutil"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
@@ -21,6 +24,7 @@ import (
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
)
func TestIsOptimistic(t *testing.T) {
@@ -226,7 +230,67 @@ func TestIsOptimistic(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, true, o)
})
t.Run("State not found", func(t *testing.T) {
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
b.SetStateRoot(bytesutil.PadTo([]byte("root"), 32))
db := dbtest.SetupDB(t)
require.NoError(t, db.SaveBlock(ctx, b))
chainSt, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, chainSt.SetSlot(fieldparams.SlotsPerEpoch))
bRoot, err := b.Block().HashTreeRoot()
require.NoError(t, err)
cs := &chainmock.ChainService{State: chainSt, OptimisticRoots: map[[32]byte]bool{bRoot: true}}
mf := &testutil.MockStater{
CustomError: lookup.NewFetchStateError(nil),
}
_, err = IsOptimistic(ctx, []byte(hexutil.Encode(bytesutil.PadTo([]byte("root"), 32))), cs, mf, cs, db)
var fetchErr *lookup.FetchStateError
require.Equal(t, true, errors.As(err, &fetchErr))
})
t.Run("stateId invalid", func(t *testing.T) {
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
b.SetStateRoot(bytesutil.PadTo([]byte("root"), 32))
db := dbtest.SetupDB(t)
require.NoError(t, db.SaveBlock(ctx, b))
chainSt, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, chainSt.SetSlot(fieldparams.SlotsPerEpoch))
bRoot, err := b.Block().HashTreeRoot()
require.NoError(t, err)
cs := &chainmock.ChainService{State: chainSt, OptimisticRoots: map[[32]byte]bool{bRoot: true}}
mf := &testutil.MockStater{
CustomError: lookup.NewFetchStateError(nil),
}
_, err = IsOptimistic(ctx, []byte("0xabc"), cs, mf, cs, db)
var fetchErr *lookup.FetchStateError
require.Equal(t, false, errors.As(err, &fetchErr))
})
t.Run("block roots not found", func(t *testing.T) {
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
b.SetStateRoot(bytesutil.PadTo([]byte("root"), 32))
db := dbtest.SetupDB(t)
require.NoError(t, db.SaveBlock(ctx, b))
chainSt, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, chainSt.SetSlot(fieldparams.SlotsPerEpoch))
bRoot, err := b.Block().HashTreeRoot()
require.NoError(t, err)
cs := &chainmock.ChainService{State: chainSt, OptimisticRoots: map[[32]byte]bool{bRoot: true}}
st, err := util.NewBeaconState()
require.NoError(t, st.SetSlot(primitives.Slot(fieldparams.SlotsPerEpoch+1)))
require.NoError(t, err)
mf := &testutil.MockStater{BeaconState: st}
_, err = IsOptimistic(ctx, []byte(hexutil.Encode(bytesutil.PadTo([]byte("root"), 32))), cs, mf, cs, db)
var blockRootsNotFoundErr *lookup.BlockRootsNotFoundError
require.Equal(t, true, errors.As(err, &blockRootsNotFoundErr))
})
})
t.Run("slot", func(t *testing.T) {
t.Run("head is not optimistic", func(t *testing.T) {
cs := &chainmock.ChainService{Optimistic: false}
@@ -319,6 +383,36 @@ func TestIsOptimistic(t *testing.T) {
})
}
func TestHandleIsOptimisticError(t *testing.T) {
t.Run("fetch-state error handled as 404", func(t *testing.T) {
rr := httptest.NewRecorder()
notFoundErr := lookup.StateNotFoundError{}
fetchErr := lookup.NewFetchStateError(&notFoundErr)
HandleIsOptimisticError(rr, fetchErr)
require.Equal(t, http.StatusNotFound, rr.Code)
require.StringContains(t, notFoundErr.Error(), rr.Body.String())
})
t.Run("no block roots error handled as 404", func(t *testing.T) {
rr := httptest.NewRecorder()
blockRootsErr := lookup.NewBlockRootsNotFoundError()
HandleIsOptimisticError(rr, blockRootsErr)
require.Equal(t, http.StatusNotFound, rr.Code)
require.StringContains(t, blockRootsErr.Error(), rr.Body.String())
})
t.Run("generic error handled as 500", func(t *testing.T) {
rr := httptest.NewRecorder()
genericErr := errors.New("boom")
HandleIsOptimisticError(rr, genericErr)
require.Equal(t, http.StatusInternalServerError, rr.Code)
require.StringContains(t, "Could not check optimistic status: boom", rr.Body.String())
})
}
// prepareForkchoiceState prepares a beacon state with the given data to mock
// insert into forkchoice
func prepareForkchoiceState(

View File

@@ -23,6 +23,20 @@ import (
"github.com/pkg/errors"
)
type BlockRootsNotFoundError struct {
message string
}
func NewBlockRootsNotFoundError() *BlockRootsNotFoundError {
return &BlockRootsNotFoundError{
message: "no block roots returned from the database",
}
}
func (e BlockRootsNotFoundError) Error() string {
return e.message
}
// BlockIdParseError represents an error scenario where a block ID could not be parsed.
type BlockIdParseError struct {
message string
@@ -341,7 +355,7 @@ func (p *BeaconDbBlocker) blobsFromStoredDataColumns(block blocks.ROBlock, indic
stored := summary.Stored()
count := uint64(len(stored))
if count < peerdas.MinimumColumnsCountToReconstruct() {
if count < peerdas.MinimumColumnCountToReconstruct() {
// There is no way to reconstruct the data columns.
return nil, &core.RpcError{
Err: errors.Errorf("the node does not custody enough data columns to reconstruct blobs - please start the beacon node with the `--%s` flag to ensure this call to succeed, or retry later if it is already the case", flags.SubscribeAllDataSubnets.Name),

View File

@@ -443,7 +443,7 @@ func TestGetBlob(t *testing.T) {
setupFulu(t)
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars[1 : peerdas.MinimumColumnsCountToReconstruct()+1])
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars[1 : peerdas.MinimumColumnCountToReconstruct()+1])
require.NoError(t, err)
blocker := &BeaconDbBlocker{

View File

@@ -21,6 +21,27 @@ import (
"github.com/pkg/errors"
)
type FetchStateError struct {
message string
cause error
}
func NewFetchStateError(cause error) *FetchStateError {
return &FetchStateError{
message: "could not fetch state",
cause: cause,
}
}
func (e *FetchStateError) Error() string {
if e.cause != nil {
return e.message + ": " + e.cause.Error()
}
return e.message
}
func (e *FetchStateError) Unwrap() error { return e.cause }
// StateIdParseError represents an error scenario where a state ID could not be parsed.
type StateIdParseError struct {
message string

View File

@@ -56,11 +56,7 @@ func (s *Server) GetValidatorCount(w http.ResponseWriter, r *http.Request) {
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateID), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
if err != nil {
errJson := &httputil.DefaultJsonError{
Message: fmt.Sprintf("could not check if slot's block is optimistic: %v", err),
Code: http.StatusInternalServerError,
}
httputil.WriteError(w, errJson)
helpers.HandleIsOptimisticError(w, err)
return
}

View File

@@ -248,6 +248,8 @@ func (vs *Server) sendBlocks(stream ethpb.BeaconNodeValidator_StreamBlocksAltair
b.Block = &ethpb.StreamBlocksResponse_DenebBlock{DenebBlock: p}
case *ethpb.SignedBeaconBlockElectra:
b.Block = &ethpb.StreamBlocksResponse_ElectraBlock{ElectraBlock: p}
case *ethpb.SignedBeaconBlockFulu:
b.Block = &ethpb.StreamBlocksResponse_FuluBlock{FuluBlock: p}
default:
log.Errorf("Unknown block type %T", p)
}

View File

@@ -381,3 +381,92 @@ func TestServer_StreamSlotsVerified_OnHeadUpdated(t *testing.T) {
}
<-exitRoutine
}
func TestServer_StreamBlocksVerified_FuluBlock(t *testing.T) {
db := dbTest.SetupDB(t)
ctx := t.Context()
beaconState, privs := util.DeterministicGenesisStateFulu(t, 32)
c, err := altair.NextSyncCommittee(ctx, beaconState)
require.NoError(t, err)
require.NoError(t, beaconState.SetCurrentSyncCommittee(c))
b, err := util.GenerateFullBlockFulu(beaconState, privs, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
wrappedBlk := util.SaveBlock(t, ctx, db, b)
chainService := &chainMock.ChainService{State: beaconState}
server := &Server{
Ctx: ctx,
StateNotifier: chainService.StateNotifier(),
HeadFetcher: chainService,
}
exitRoutine := make(chan bool)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockStream := mock.NewMockBeaconNodeValidatorAltair_StreamBlocksServer(ctrl)
mockStream.EXPECT().Send(&ethpb.StreamBlocksResponse{Block: &ethpb.StreamBlocksResponse_FuluBlock{FuluBlock: b}}).Do(func(arg0 interface{}) {
exitRoutine <- true
})
mockStream.EXPECT().Context().Return(ctx).AnyTimes()
go func(tt *testing.T) {
err := server.StreamBlocksAltair(&ethpb.StreamBlocksRequest{VerifiedOnly: true}, mockStream)
if s, _ := status.FromError(err); s.Code() != codes.Canceled {
assert.NoError(tt, err)
}
}(t)
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
for sent := 0; sent == 0; {
sent = server.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{Slot: b.Block.Slot, BlockRoot: r, SignedBlock: wrappedBlk},
})
}
<-exitRoutine
}
func TestServer_StreamBlocks_FuluBlock(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.BeaconConfig())
ctx := t.Context()
beaconState, privs := util.DeterministicGenesisStateFulu(t, 64)
c, err := altair.NextSyncCommittee(ctx, beaconState)
require.NoError(t, err)
require.NoError(t, beaconState.SetCurrentSyncCommittee(c))
b, err := util.GenerateFullBlockFulu(beaconState, privs, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
chainService := &chainMock.ChainService{State: beaconState}
server := &Server{
Ctx: ctx,
BlockNotifier: chainService.BlockNotifier(),
HeadFetcher: chainService,
}
exitRoutine := make(chan bool)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockStream := mock.NewMockBeaconNodeValidatorAltair_StreamBlocksServer(ctrl)
mockStream.EXPECT().Send(&ethpb.StreamBlocksResponse{Block: &ethpb.StreamBlocksResponse_FuluBlock{FuluBlock: b}}).Do(func(arg0 interface{}) {
exitRoutine <- true
})
mockStream.EXPECT().Context().Return(ctx).AnyTimes()
go func(tt *testing.T) {
err := server.StreamBlocksAltair(&ethpb.StreamBlocksRequest{}, mockStream)
if s, _ := status.FromError(err); s.Code() != codes.Canceled {
assert.NoError(tt, err)
}
}(t)
wrappedBlk, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
for sent := 0; sent == 0; {
sent = server.BlockNotifier.BlockFeed().Send(&feed.Event{
Type: blockfeed.ReceivedBlock,
Data: &blockfeed.ReceivedBlockData{SignedBlock: wrappedBlk},
})
}
<-exitRoutine
}

View File

@@ -15,10 +15,14 @@ type MockStater struct {
BeaconStateRoot []byte
StatesBySlot map[primitives.Slot]state.BeaconState
StatesByRoot map[[32]byte]state.BeaconState
CustomError error
}
// State --
func (m *MockStater) State(ctx context.Context, id []byte) (state.BeaconState, error) {
if m.CustomError != nil {
return nil, m.CustomError
}
if m.StateProviderFunc != nil {
return m.StateProviderFunc(ctx, id)
}

View File

@@ -4,6 +4,8 @@ import (
"context"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/crypto/bls"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
@@ -11,13 +13,21 @@ import (
"github.com/pkg/errors"
)
const signatureVerificationInterval = 50 * time.Millisecond
const (
signatureVerificationInterval = 50 * time.Millisecond
kzgVerificationInterval = 250 * time.Millisecond
)
type signatureVerifier struct {
set *bls.SignatureBatch
resChan chan error
}
type kzgVerifier struct {
dataColumns []blocks.RODataColumn
resChan chan error
}
// A routine that runs in the background to perform batch
// verifications of incoming messages from gossip.
func (s *Service) verifierRoutine() {
@@ -47,6 +57,34 @@ func (s *Service) verifierRoutine() {
}
}
// A routine that runs in the background to perform batch
// KZG verifications of data column sidecars from gossip.
func (s *Service) kzgVerifierRoutine() {
kzgBatch := make([]*kzgVerifier, 0)
ticker := time.NewTicker(kzgVerificationInterval)
for {
select {
case <-s.ctx.Done():
ticker.Stop()
for i := 0; i < len(kzgBatch); i++ {
kzgBatch[i].resChan <- s.ctx.Err()
}
return
case kzg := <-s.kzgChan:
kzgBatch = append(kzgBatch, kzg)
if len(kzgBatch) >= s.cfg.kzgBatchVerifierLimit {
verifyKzgBatch(kzgBatch)
kzgBatch = []*kzgVerifier{}
}
case <-ticker.C:
if len(kzgBatch) > 0 {
verifyKzgBatch(kzgBatch)
kzgBatch = []*kzgVerifier{}
}
}
}
}
func (s *Service) validateWithBatchVerifier(ctx context.Context, message string, set *bls.SignatureBatch) (pubsub.ValidationResult, error) {
_, span := trace.StartSpan(ctx, "sync.validateWithBatchVerifier")
defer span.End()
@@ -120,3 +158,47 @@ func performBatchAggregation(aggSet *bls.SignatureBatch) (*bls.SignatureBatch, e
}
return aggSet, nil
}
func (s *Service) validateWithKzgBatchVerifier(ctx context.Context, message string, dataColumns []blocks.RODataColumn) (pubsub.ValidationResult, error) {
_, span := trace.StartSpan(ctx, "sync.validateWithKzgBatchVerifier")
defer span.End()
resChan := make(chan error)
verificationSet := &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
s.kzgChan <- verificationSet
resErr := <-resChan
close(resChan)
if resErr != nil {
log.WithError(resErr).Tracef("Could not perform batch verification of %s", message)
err := peerdas.VerifyDataColumnsSidecarKZGProofs(dataColumns)
if err != nil {
verErr := errors.Wrapf(err, "Could not verify %s", message)
tracing.AnnotateError(span, verErr)
return pubsub.ValidationReject, verErr
}
}
return pubsub.ValidationAccept, nil
}
func verifyKzgBatch(kzgBatch []*kzgVerifier) {
if len(kzgBatch) == 0 {
return
}
allDataColumns := make([]blocks.RODataColumn, 0)
for _, kzgVerifier := range kzgBatch {
allDataColumns = append(allDataColumns, kzgVerifier.dataColumns...)
}
var verificationErr error
err := peerdas.VerifyDataColumnsSidecarKZGProofs(allDataColumns)
if err != nil {
verificationErr = errors.Wrap(err, "batch KZG verification failed")
}
// Send the same result to all verifiers in the batch
for i := 0; i < len(kzgBatch); i++ {
kzgBatch[i].resChan <- verificationErr
}
}

View File

@@ -44,7 +44,7 @@ func (s *Service) reconstructSaveBroadcastDataColumnSidecars(
numberOfColumns := params.BeaconConfig().NumberOfColumns
// If reconstruction is not possible or if all columns are already stored, exit early.
if storedColumnsCount < peerdas.MinimumColumnsCountToReconstruct() || storedColumnsCount == numberOfColumns {
if storedColumnsCount < peerdas.MinimumColumnCountToReconstruct() || storedColumnsCount == numberOfColumns {
return nil
}
@@ -198,7 +198,7 @@ func (s *Service) broadcastMissingDataColumnSidecars(
subnet := peerdas.ComputeSubnetForDataColumnSidecar(verifiedRODataColumn.Index)
// Broadcast the missing data column.
if err := s.cfg.p2p.BroadcastDataColumn(root, subnet, verifiedRODataColumn.DataColumnSidecar); err != nil {
if err := s.cfg.p2p.BroadcastDataColumnSidecar(root, subnet, verifiedRODataColumn.DataColumnSidecar); err != nil {
log.WithError(err).Error("Broadcast data column")
}

View File

@@ -30,7 +30,7 @@ func TestReconstructDataColumns(t *testing.T) {
root, block := roBlock.Root(), roBlock.Block()
slot, proposerIndex := block.Slot(), block.ProposerIndex()
minimumCount := peerdas.MinimumColumnsCountToReconstruct()
minimumCount := peerdas.MinimumColumnCountToReconstruct()
t.Run("not enough stored sidecars", func(t *testing.T) {
storage := filesystem.NewEphemeralDataColumnStorage(t)
@@ -61,7 +61,7 @@ func TestReconstructDataColumns(t *testing.T) {
const cgc = 8
storage := filesystem.NewEphemeralDataColumnStorage(t)
minimumCount := peerdas.MinimumColumnsCountToReconstruct()
minimumCount := peerdas.MinimumColumnCountToReconstruct()
err := storage.Save(verifiedRoDataColumns[:minimumCount])
require.NoError(t, err)

View File

@@ -229,3 +229,11 @@ func WithBatchVerifierLimit(limit int) Option {
return nil
}
}
// WithKzgBatchVerifierLimit sets the maximum number of KZG proofs to batch verify at once.
func WithKzgBatchVerifierLimit(limit int) Option {
return func(s *Service) error {
s.cfg.kzgBatchVerifierLimit = limit
return nil
}
}

View File

@@ -106,6 +106,7 @@ type config struct {
blobStorage *filesystem.BlobStorage
dataColumnStorage *filesystem.DataColumnStorage
batchVerifierLimit int
kzgBatchVerifierLimit int
}
// This defines the interface for interacting with block chain service
@@ -164,6 +165,7 @@ type Service struct {
syncContributionBitsOverlapLock sync.RWMutex
syncContributionBitsOverlapCache *lru.Cache
signatureChan chan *signatureVerifier
kzgChan chan *kzgVerifier
clockWaiter startup.ClockWaiter
initialSyncComplete chan struct{}
verifierWaiter *verification.InitializerWaiter
@@ -202,6 +204,8 @@ func NewService(ctx context.Context, opts ...Option) *Service {
}
// Initialize signature channel with configured limit
r.signatureChan = make(chan *signatureVerifier, r.cfg.batchVerifierLimit)
// Initialize KZG channel with configured limit
r.kzgChan = make(chan *kzgVerifier, r.cfg.kzgBatchVerifierLimit)
// Correctly remove it from our seen pending block map.
// The eviction method always assumes that the mutex is held.
r.slotToPendingBlocks.OnEvicted(func(s string, i interface{}) {
@@ -254,6 +258,7 @@ func (s *Service) Start() {
s.newColumnsVerifier = newDataColumnsVerifierFromInitializer(v)
go s.verifierRoutine()
go s.kzgVerifierRoutine()
go s.startTasksPostInitialSync()
go s.processDataColumnLogs()

View File

@@ -151,7 +151,7 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, ro
sidecar := sidecars[columnIndex]
if err := s.cfg.p2p.BroadcastDataColumn(blockRoot, sidecar.Index, sidecar.DataColumnSidecar); err != nil {
if err := s.cfg.p2p.BroadcastDataColumnSidecar(blockRoot, sidecar.Index, sidecar.DataColumnSidecar); err != nil {
log.WithError(err).Error("Failed to broadcast data column")
}

View File

@@ -18,7 +18,7 @@ func (s *Service) dataColumnSubscriber(ctx context.Context, msg proto.Message) e
}
if err := s.receiveDataColumnSidecar(ctx, sidecar); err != nil {
return errors.Wrap(err, "receive data column")
return errors.Wrap(err, "receive data column sidecar")
}
slot := sidecar.Slot()
@@ -26,7 +26,7 @@ func (s *Service) dataColumnSubscriber(ctx context.Context, msg proto.Message) e
root := sidecar.BlockRoot()
if err := s.reconstructSaveBroadcastDataColumnSidecars(ctx, slot, proposerIndex, root); err != nil {
return errors.Wrap(err, "reconstruct data columns")
return errors.Wrap(err, "reconstruct/save/broadcast data column sidecars")
}
return nil

View File

@@ -145,9 +145,12 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
}
// [REJECT] The sidecar's column data is valid as verified by `verify_data_column_sidecar_kzg_proofs(sidecar)`.
if err := verifier.SidecarKzgProofVerified(); err != nil {
return pubsub.ValidationReject, err
validationResult, err := s.validateWithKzgBatchVerifier(ctx, "data column KZG proof", roDataColumns)
if validationResult != pubsub.ValidationAccept {
return validationResult, err
}
// Mark KZG verification as satisfied since we did it via batch verifier
verifier.SatisfyRequirement(verification.RequireSidecarKzgProofVerified)
// [IGNORE] The sidecar is the first sidecar for the tuple `(block_header.slot, block_header.proposer_index, sidecar.index)`
// with valid header signature, sidecar inclusion proof, and kzg proof.

View File

@@ -7,6 +7,7 @@ import (
"testing"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
@@ -25,6 +26,9 @@ import (
)
func TestValidateDataColumn(t *testing.T) {
err := kzg.Start()
require.NoError(t, err)
ctx := t.Context()
t.Run("from self", func(t *testing.T) {
@@ -63,10 +67,14 @@ func TestValidateDataColumn(t *testing.T) {
clock := startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)
service := &Service{
cfg: &config{p2p: p, initialSync: &mockSync.Sync{}, clock: clock, chain: chainService},
cfg: &config{p2p: p, initialSync: &mockSync.Sync{}, clock: clock, chain: chainService, batchVerifierLimit: 10, kzgBatchVerifierLimit: 10},
ctx: ctx,
newColumnsVerifier: newDataColumnsVerifier,
seenDataColumnCache: newSlotAwareCache(seenDataColumnSize),
kzgChan: make(chan *kzgVerifier, 10),
}
// Start the KZG verifier routine for batch verification
go service.kzgVerifierRoutine()
// Encode a `beaconBlock` message instead of expected.
buf := new(bytes.Buffer)

View File

@@ -47,6 +47,10 @@ var (
RequireSidecarKzgProofVerified,
}
// SpectestDataColumnSidecarRequirements is used by the forkchoice spectests when verifying data columns used in the on_block tests.
SpectestDataColumnSidecarRequirements = requirementList(GossipDataColumnSidecarRequirements).excluding(
RequireSidecarParentSeen, RequireSidecarParentValid)
errColumnsInvalid = errors.New("data columns failed verification")
errBadTopicLength = errors.New("topic length is invalid")
errBadTopic = errors.New("topic is not of the one expected")

View File

@@ -0,0 +1,3 @@
### Changed
- Renamed various variables/functions to be more clear.

View File

@@ -0,0 +1,2 @@
### Ignored
- omits non-standard blob schedule entry struct fields from marshaling.

View File

@@ -0,0 +1,2 @@
### Fixed
- Fixed endpoint response to return 404 or 400 after isOptimistic check

View File

@@ -0,0 +1,3 @@
### Added
- Added fulu block support to StreamBlocksAltair

View File

@@ -0,0 +1,3 @@
### Fixed
- Safeguard against accidental out of bounds array access in dataColumnSidecars method.

View File

@@ -0,0 +1,3 @@
### Fixed
- Fixed NewSignedBeaconBlock calls to use Block field for proper equivocation handling

View File

@@ -0,0 +1,3 @@
### Added
- KZG proof batch verification for data column gossip validation

View File

@@ -0,0 +1,3 @@
### Changed
- Update consensus spec to v1.6.0-alpha.4 and implement data column support for forkchoice spectests

View File

@@ -0,0 +1,4 @@
### Added
- All outbound HTTP requests from the validator client now include a custom `User-Agent` header in the format `Prysm/<name>/<version>`. This enhances observability and enables upstream systems to correctly identify Prysm validator clients by their name and version.
- Fixes [#15435](https://github.com/OffchainLabs/prysm/issues/15435).

View File

@@ -344,4 +344,10 @@ var (
Usage: "Maximum number of signatures to batch verify at once for beacon attestation p2p gossip.",
Value: 1000,
}
// KzgBatchVerifierLimit sets the maximum number of KZG proofs to batch verify at once.
KzgBatchVerifierLimit = &cli.IntFlag{
Name: "kzg-batch-verifier-limit",
Usage: "Maximum number of KZG proofs to batch verify at once for data column p2p gossip.",
Value: 25,
}
)

View File

@@ -148,6 +148,7 @@ var appFlags = []cli.Flag{
bflags.BackfillWorkerCount,
bflags.BackfillOldestSlot,
flags.BatchVerifierLimit,
flags.KzgBatchVerifierLimit,
}
func init() {

View File

@@ -73,6 +73,7 @@ var appHelpFlagGroups = []flagGroup{
flags.RPCHost,
flags.RPCPort,
flags.BatchVerifierLimit,
flags.KzgBatchVerifierLimit,
},
},
{

View File

@@ -348,13 +348,13 @@ func (b *BeaconChainConfig) ExecutionRequestLimits() enginev1.ExecutionRequestLi
}
type NetworkScheduleEntry struct {
ForkVersion [fieldparams.VersionLength]byte
ForkDigest [4]byte
MaxBlobsPerBlock uint64 `yaml:"MAX_BLOBS_PER_BLOCK" json:"MAX_BLOBS_PER_BLOCK"`
Epoch primitives.Epoch `yaml:"EPOCH" json:"EPOCH"`
BPOEpoch primitives.Epoch
VersionEnum int
isFork bool
ForkVersion [fieldparams.VersionLength]byte `yaml:"-" json:"-"`
ForkDigest [4]byte `yaml:"-" json:"-"`
MaxBlobsPerBlock uint64 `yaml:"MAX_BLOBS_PER_BLOCK" json:"MAX_BLOBS_PER_BLOCK"`
Epoch primitives.Epoch `yaml:"EPOCH" json:"EPOCH"`
BPOEpoch primitives.Epoch `yaml:"-" json:"-"`
VersionEnum int `yaml:"-" json:"-"`
isFork bool `yaml:"-" json:"-"`
}
func (e NetworkScheduleEntry) LogFields() log.Fields {

View File

@@ -37,11 +37,12 @@ var placeholderFields = []string{
"EIP7805_FORK_EPOCH",
"EIP7805_FORK_VERSION",
"EPOCHS_PER_SHUFFLING_PHASE",
"INCLUSION_LIST_SUBMISSION_DEADLINE",
"MAX_BYTES_PER_INCLUSION_LIST",
"MAX_REQUEST_BLOB_SIDECARS_FULU",
"MAX_REQUEST_INCLUSION_LIST",
"MAX_REQUEST_PAYLOADS", // Compile time constant on BeaconBlockBody.ExecutionRequests
"PROPOSER_INCLUSION_LIST_CUT_OFF",
"PROPOSER_INCLUSION_LIST_CUTOFF",
"PROPOSER_SCORE_BOOST_EIP7732",
"PROPOSER_SELECTION_GAP",
"TARGET_NUMBER_OF_PEERS",

View File

@@ -0,0 +1,26 @@
syntax = "proto3";
package ethereum.eth.v1alpha1;
import "proto/eth/ext/options.proto";
message ExecutionPayloadHeaderGloas {
bytes parent_block_hash = 1 [ (ethereum.eth.ext.ssz_size) = "32" ];
bytes parent_block_root = 2 [ (ethereum.eth.ext.ssz_size) = "32" ];
bytes block_hash = 3 [ (ethereum.eth.ext.ssz_size) = "32" ];
uint64 gas_limit = 4;
uint64 builder_index = 5 [ (ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v6/"
"consensus-types/primitives.ValidatorIndex" ];
uint64 slot = 6 [
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives.Slot"
];
uint64 value = 7;
bytes blob_kzg_commitments_root = 8 [ (ethereum.eth.ext.ssz_size) = "32" ];
}
message SignedExecutionPayloadHeader {
ExecutionPayloadHeaderGloas message = 1;
bytes signature = 2 [ (ethereum.eth.ext.ssz_size) = "96" ];
}

View File

@@ -17,6 +17,7 @@ go_library(
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db/filesystem:go_default_library",

View File

@@ -10,6 +10,7 @@ import (
"strings"
"testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
@@ -62,6 +63,7 @@ func runTest(t *testing.T, config string, fork int, basePath string) { // nolint
for _, folder := range testFolders {
t.Run(folder.Name(), func(t *testing.T) {
helpers.ClearCache()
preStepsFile, err := util.BazelFileBytes(testsFolderPath, folder.Name(), "steps.yaml")
require.NoError(t, err)
var steps []Step
@@ -148,6 +150,9 @@ func runTest(t *testing.T, config string, fork int, basePath string) { // nolint
}
}
runBlobStep(t, step, beaconBlock, fork, folder, testsFolderPath, builder)
if len(step.DataColumns) > 0 {
runDataColumnStep(t, step, beaconBlock, fork, folder, testsFolderPath, builder)
}
if beaconBlock != nil {
if step.Valid != nil && !*step.Valid {
builder.InvalidBlock(t, beaconBlock)
@@ -293,10 +298,154 @@ func runBlobStep(t *testing.T,
}
}
func runDataColumnStep(t *testing.T,
step Step,
beaconBlock interfaces.ReadOnlySignedBeaconBlock,
fork int,
folder os.DirEntry,
testsFolderPath string,
builder *Builder,
) {
columnFiles := step.DataColumns
require.NotNil(t, beaconBlock)
require.Equal(t, true, fork >= version.Fulu)
block := beaconBlock.Block()
root, err := block.HashTreeRoot()
require.NoError(t, err)
kzgs, err := block.Body().BlobKzgCommitments()
require.NoError(t, err)
sh, err := beaconBlock.Header()
require.NoError(t, err)
// Use the same error that the verification system returns for data columns
errDataColumnsInvalid := errors.New("data columns failed verification")
requireVerifyExpected := errAssertionForStep(step, errDataColumnsInvalid)
var allColumns []blocks.RODataColumn
for columnIndex, columnFile := range columnFiles {
if columnFile == nil || *columnFile == "null" {
continue
}
dataColumnFile, err := util.BazelFileBytes(testsFolderPath, folder.Name(), fmt.Sprint(*columnFile, ".ssz_snappy"))
require.NoError(t, err)
dataColumnSSZ, err := snappy.Decode(nil /* dst */, dataColumnFile)
require.NoError(t, err)
var pb *ethpb.DataColumnSidecar
if step.Valid != nil && !*step.Valid {
pb = &ethpb.DataColumnSidecar{}
if err := pb.UnmarshalSSZ(dataColumnSSZ); err != nil {
pb = &ethpb.DataColumnSidecar{
Index: uint64(columnIndex),
Column: [][]byte{},
KzgCommitments: kzgs,
KzgProofs: make([][]byte, 0),
SignedBlockHeader: sh,
}
}
} else {
numCells := len(kzgs)
column := make([][]byte, numCells)
for cellIndex := 0; cellIndex < numCells; cellIndex++ {
cell := make([]byte, 2048)
cellStart := cellIndex * 2048
cellEnd := cellStart + 2048
if cellEnd <= len(dataColumnSSZ) {
copy(cell, dataColumnSSZ[cellStart:cellEnd])
}
column[cellIndex] = cell
}
inclusionProof, err := blocks.MerkleProofKZGCommitments(block.Body())
require.NoError(t, err)
pb = &ethpb.DataColumnSidecar{
Index: uint64(columnIndex),
Column: column,
KzgCommitments: kzgs,
SignedBlockHeader: sh,
KzgCommitmentsInclusionProof: inclusionProof,
}
}
ro, err := blocks.NewRODataColumnWithRoot(pb, root)
require.NoError(t, err)
allColumns = append(allColumns, ro)
}
if len(allColumns) > 0 {
ini, err := builder.vwait.WaitForInitializer(context.Background())
require.NoError(t, err)
// Use different verification requirements based on whether this is a valid or invalid test case
var forkchoiceReqs []verification.Requirement
if step.Valid != nil && !*step.Valid {
forkchoiceReqs = verification.SpectestDataColumnSidecarRequirements
} else {
forkchoiceReqs = []verification.Requirement{
verification.RequireNotFromFutureSlot,
verification.RequireSlotAboveFinalized,
verification.RequireValidProposerSignature,
verification.RequireSidecarParentSlotLower,
verification.RequireSidecarDescendsFromFinalized,
verification.RequireSidecarInclusionProven,
verification.RequireSidecarProposerExpected,
}
}
dv := ini.NewDataColumnsVerifier(allColumns, forkchoiceReqs)
ctx := t.Context()
if step.Valid != nil && !*step.Valid {
if err := dv.ValidFields(); err != nil {
t.Logf("ValidFields error: %s", err.Error())
}
}
if err := dv.NotFromFutureSlot(); err != nil {
t.Logf("NotFromFutureSlot error: %s", err.Error())
}
if err := dv.SlotAboveFinalized(); err != nil {
t.Logf("SlotAboveFinalized error: %s", err.Error())
}
if err := dv.SidecarInclusionProven(); err != nil {
t.Logf("SidecarInclusionProven error: %s", err.Error())
}
if err := dv.ValidProposerSignature(ctx); err != nil {
t.Logf("ValidProposerSignature error: %s", err.Error())
}
if err := dv.SidecarParentSlotLower(); err != nil {
t.Logf("SidecarParentSlotLower error: %s", err.Error())
}
if err := dv.SidecarDescendsFromFinalized(); err != nil {
t.Logf("SidecarDescendsFromFinalized error: %s", err.Error())
}
if err := dv.SidecarProposerExpected(ctx); err != nil {
t.Logf("SidecarProposerExpected error: %s", err.Error())
}
vdc, err := dv.VerifiedRODataColumns()
requireVerifyExpected(t, err)
if err == nil {
for _, column := range vdc {
require.NoError(t, builder.service.ReceiveDataColumn(column))
}
}
}
}
func errAssertionForStep(step Step, expect error) func(t *testing.T, err error) {
if !*step.Valid {
return func(t *testing.T, err error) {
require.ErrorIs(t, err, expect)
if expect.Error() == "data columns failed verification" {
require.NotNil(t, err)
require.Equal(t, true, strings.Contains(err.Error(), expect.Error()))
} else {
require.ErrorIs(t, err, expect)
}
}
}
return func(t *testing.T, err error) {

View File

@@ -11,6 +11,7 @@ type Step struct {
PayloadStatus *MockEngineResp `json:"payload_status"`
PowBlock *string `json:"pow_block"`
Check *Check `json:"checks"`
DataColumns []*string `json:"columns"`
}
type Check struct {

View File

@@ -14,6 +14,7 @@ import (
"github.com/OffchainLabs/prysm/v6/api/apiutil"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/network/httputil"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -74,6 +75,7 @@ func (c *BeaconApiRestHandler) Get(ctx context.Context, endpoint string, resp in
if err != nil {
return errors.Wrapf(err, "failed to create request for endpoint %s", url)
}
req.Header.Set("User-Agent", version.BuildData())
httpResp, err := c.client.Do(req)
if err != nil {
return errors.Wrapf(err, "failed to perform request for endpoint %s", url)
@@ -103,6 +105,7 @@ func (c *BeaconApiRestHandler) GetSSZ(ctx context.Context, endpoint string) ([]b
o(req)
}
req.Header.Set("User-Agent", version.BuildData())
httpResp, err := c.client.Do(req)
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to perform request for endpoint %s", url)
@@ -162,7 +165,7 @@ func (c *BeaconApiRestHandler) Post(
req.Header.Set(headerKey, headerValue)
}
req.Header.Set("Content-Type", api.JsonMediaType)
req.Header.Set("User-Agent", version.BuildData())
httpResp, err := c.client.Do(req)
if err != nil {
return errors.Wrapf(err, "failed to perform request for endpoint %s", url)

View File

@@ -15,6 +15,7 @@ import (
"github.com/OffchainLabs/prysm/v6/api/server/structs"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/network/httputil"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/pkg/errors"
@@ -36,7 +37,7 @@ func TestGet(t *testing.T) {
mux.HandleFunc(endpoint, func(w http.ResponseWriter, r *http.Request) {
marshalledJson, err := json.Marshal(genesisJson)
require.NoError(t, err)
assert.Equal(t, version.BuildData(), r.Header.Get("User-Agent"))
w.Header().Set("Content-Type", api.JsonMediaType)
_, err = w.Write(marshalledJson)
require.NoError(t, err)
@@ -70,6 +71,7 @@ func TestGetSSZ(t *testing.T) {
mux := http.NewServeMux()
mux.HandleFunc(endpoint, func(w http.ResponseWriter, r *http.Request) {
assert.StringContains(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
assert.Equal(t, version.BuildData(), r.Header.Get("User-Agent"))
w.Header().Set("Content-Type", api.OctetStreamMediaType)
_, err := w.Write(expectedBody)
require.NoError(t, err)
@@ -182,6 +184,7 @@ func TestPost(t *testing.T) {
mux.HandleFunc(endpoint, func(w http.ResponseWriter, r *http.Request) {
// Make sure the request headers have been set
assert.Equal(t, "bar", r.Header.Get("foo"))
assert.Equal(t, version.BuildData(), r.Header.Get("User-Agent"))
assert.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
// Make sure the data matches