Compare commits

...

7 Commits

Author SHA1 Message Date
Manu NALEPA
039521d793 NewGenesisBlockForState: Add Fulu. 2025-09-22 14:26:19 +02:00
Muzry
02dca85251 fix getStateRandao not returning historic RANDAO mix values (#15653)
* fix getStateRandao not returning historic RANDAO mix values

* update: the lower bound statement

* Update changelog/muzry_fix_state_randao.md

* Update changelog/muzry_fix_state_randao.md

---------

Co-authored-by: Radosław Kapka <radoslaw.kapka@gmail.com>
2025-09-21 23:35:03 +00:00
Manu NALEPA
39b2163702 Fix orphaned blocks (#15720)
* `Broadcasted data column sidecar` log: Add `blobCount`.

* `broadcastAndReceiveDataColumns`: Broadcast and receive data columns in parallel.

* `ProposeBeaconBlock`: First broadcast/receive block, and then sidecars.

* `broadcastReceiveBlock`: Add log.

* Add changelog

* Fix deadlock-option 1.

* Fix deadlock-option 2.

* Take notifier out of the critical section

* only compute common info once, for all sidecars

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2025-09-19 15:45:44 +00:00
james-prysm
d67ee62efa Debug data columns API endpoint (#15701)
* initial

* fixing from self review

* changelog

* fixing endpoints and adding test

* removed unneeded test

* self review

* fixing mock columns

* fixing tests

* gofmt

* fixing endpoint

* gaz

* gofmt

* fixing tests

* gofmt

* gaz

* radek comments

* gaz

* fixing formatting

* deduplicating and fixing an old bug, will break into separate PR

* better way for version

* optimizing post merge and fixing tests

* Update beacon-chain/rpc/eth/debug/handlers.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/rpc/eth/debug/handlers.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/rpc/lookup/blocker.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/rpc/lookup/blocker.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/rpc/lookup/blocker.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* adding some of radek's feedback

* reverting and gaz

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2025-09-19 15:20:43 +00:00
Jun Song
9f9401e615 SSZ-QL: Handle Bitlist and Bitvector (#15704)
* Add bitvector field for FixedTestContainer

* Handle Bitvector type using isBitfield flag

* Add Bitvector case for Stringify

* Add bitlist field for VariableTestContainer

* Add bitlistInfo

* Changelog

* Add bitvectorInfo

* Remove analyzeBit* functions and just inline them

* Fix misleading comments

* Add comments for bitlistInfo's Size

* Apply reviews from Radek
2025-09-19 13:58:56 +00:00
Muzry
6b89d839f6 Fix prysmctl panic when baseFee is not set in genesis.json (#15687)
* Fix prysmctl panic when baseFee is not set in genesis.json

* update add unit test

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2025-09-18 21:48:28 +00:00
james-prysm
d826a3c7fe adding justified support to blocker and optimizing logic (#15715)
* adding justified support to blocker and optimizing logic

* gaz

* fixing tests

* Update beacon-chain/rpc/lookup/blocker.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/rpc/lookup/blocker.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* better error handling from radek feedback

* unneeded change

* more radek review

* addressing more feedback

* fixing small comment

* fixing errors

* removing fallback

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2025-09-18 17:01:27 +00:00
44 changed files with 2081 additions and 344 deletions

View File

@@ -56,3 +56,19 @@ type ForkChoiceNodeExtraData struct {
TimeStamp string `json:"timestamp"`
Target string `json:"target"`
}
type GetDebugDataColumnSidecarsResponse struct {
Version string `json:"version"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
Data []*DataColumnSidecar `json:"data"`
}
type DataColumnSidecar struct {
Index string `json:"index"`
Column []string `json:"column"`
KzgCommitments []string `json:"kzg_commitments"`
KzgProofs []string `json:"kzg_proofs"`
SignedBeaconBlockHeader *SignedBeaconBlockHeader `json:"signed_block_header"`
KzgCommitmentsInclusionProof []string `json:"kzg_commitments_inclusion_proof"`
}

View File

@@ -184,45 +184,54 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
})
case *ethpb.BeaconStateElectra:
return blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockElectra{
Block: &ethpb.BeaconBlockElectra{
ParentRoot: params.BeaconConfig().ZeroHash[:],
StateRoot: root[:],
Body: &ethpb.BeaconBlockBodyElectra{
RandaoReveal: make([]byte, 96),
Eth1Data: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
},
Graffiti: make([]byte, 32),
SyncAggregate: &ethpb.SyncAggregate{
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
},
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{
ParentHash: make([]byte, 32),
FeeRecipient: make([]byte, 20),
StateRoot: make([]byte, 32),
ReceiptsRoot: make([]byte, 32),
LogsBloom: make([]byte, 256),
PrevRandao: make([]byte, 32),
ExtraData: make([]byte, 0),
BaseFeePerGas: make([]byte, 32),
BlockHash: make([]byte, 32),
Transactions: make([][]byte, 0),
Withdrawals: make([]*enginev1.Withdrawal, 0),
},
BlsToExecutionChanges: make([]*ethpb.SignedBLSToExecutionChange, 0),
BlobKzgCommitments: make([][]byte, 0),
ExecutionRequests: &enginev1.ExecutionRequests{
Withdrawals: make([]*enginev1.WithdrawalRequest, 0),
Deposits: make([]*enginev1.DepositRequest, 0),
Consolidations: make([]*enginev1.ConsolidationRequest, 0),
},
},
},
Block: electraGenesisBlock(root),
Signature: params.BeaconConfig().EmptySignature[:],
})
case *ethpb.BeaconStateFulu:
return blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockFulu{
Block: electraGenesisBlock(root),
Signature: params.BeaconConfig().EmptySignature[:],
})
default:
return nil, ErrUnrecognizedState
}
}
func electraGenesisBlock(root [fieldparams.RootLength]byte) *ethpb.BeaconBlockElectra {
return &ethpb.BeaconBlockElectra{
ParentRoot: params.BeaconConfig().ZeroHash[:],
StateRoot: root[:],
Body: &ethpb.BeaconBlockBodyElectra{
RandaoReveal: make([]byte, 96),
Eth1Data: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
},
Graffiti: make([]byte, 32),
SyncAggregate: &ethpb.SyncAggregate{
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
},
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{
ParentHash: make([]byte, 32),
FeeRecipient: make([]byte, 20),
StateRoot: make([]byte, 32),
ReceiptsRoot: make([]byte, 32),
LogsBloom: make([]byte, 256),
PrevRandao: make([]byte, 32),
ExtraData: make([]byte, 0),
BaseFeePerGas: make([]byte, 32),
BlockHash: make([]byte, 32),
Transactions: make([][]byte, 0),
Withdrawals: make([]*enginev1.Withdrawal, 0),
},
BlsToExecutionChanges: make([]*ethpb.SignedBLSToExecutionChange, 0),
BlobKzgCommitments: make([][]byte, 0),
ExecutionRequests: &enginev1.ExecutionRequests{
Withdrawals: make([]*enginev1.WithdrawalRequest, 0),
Deposits: make([]*enginev1.DepositRequest, 0),
Consolidations: make([]*enginev1.ConsolidationRequest, 0),
},
},
}
}

View File

@@ -109,15 +109,14 @@ func DataColumnSidecars(rows []kzg.CellsAndProofs, src ConstructionPopulator) ([
if err != nil {
return nil, errors.Wrap(err, "rotate cells and proofs")
}
info, err := src.extract()
if err != nil {
return nil, errors.Wrap(err, "extract block info")
}
maxIdx := params.BeaconConfig().NumberOfColumns
roSidecars := make([]blocks.RODataColumn, 0, maxIdx)
for idx := range maxIdx {
info, err := src.extract()
if err != nil {
return nil, errors.Wrap(err, "extract block info")
}
sidecar := &ethpb.DataColumnSidecar{
Index: idx,
Column: cells[idx],

View File

@@ -381,6 +381,7 @@ func (s *Service) internalBroadcastDataColumnSidecar(
"timeSinceSlotStart": time.Since(slotStartTime),
"root": fmt.Sprintf("%#x", dataColumnSidecar.BlockRoot()),
"columnSubnet": columnSubnet,
"blobCount": len(dataColumnSidecar.Column),
}).Debug("Broadcasted data column sidecar")
// Increase the number of successful broadcasts.

View File

@@ -106,7 +106,7 @@ func (s *Service) endpoints(
}
if enableDebug {
endpoints = append(endpoints, s.debugEndpoints(stater)...)
endpoints = append(endpoints, s.debugEndpoints(stater, blocker)...)
}
return endpoints
@@ -1097,7 +1097,7 @@ func (s *Service) lightClientEndpoints() []endpoint {
}
}
func (s *Service) debugEndpoints(stater lookup.Stater) []endpoint {
func (s *Service) debugEndpoints(stater lookup.Stater, blocker lookup.Blocker) []endpoint {
server := &debug.Server{
BeaconDB: s.cfg.BeaconDB,
HeadFetcher: s.cfg.HeadFetcher,
@@ -1107,6 +1107,8 @@ func (s *Service) debugEndpoints(stater lookup.Stater) []endpoint {
ForkchoiceFetcher: s.cfg.ForkchoiceFetcher,
FinalizationFetcher: s.cfg.FinalizationFetcher,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
Blocker: blocker,
}
const namespace = "debug"
@@ -1141,6 +1143,16 @@ func (s *Service) debugEndpoints(stater lookup.Stater) []endpoint {
handler: server.GetForkChoice,
methods: []string{http.MethodGet},
},
{
template: "/eth/v1/debug/beacon/data_column_sidecars/{block_id}",
name: namespace + ".GetDataColumnSidecars",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.DataColumnSidecars,
methods: []string{http.MethodGet},
},
}
}

View File

@@ -80,9 +80,10 @@ func Test_endpoints(t *testing.T) {
}
debugRoutes := map[string][]string{
"/eth/v2/debug/beacon/states/{state_id}": {http.MethodGet},
"/eth/v2/debug/beacon/heads": {http.MethodGet},
"/eth/v1/debug/fork_choice": {http.MethodGet},
"/eth/v2/debug/beacon/states/{state_id}": {http.MethodGet},
"/eth/v2/debug/beacon/heads": {http.MethodGet},
"/eth/v1/debug/fork_choice": {http.MethodGet},
"/eth/v1/debug/beacon/data_column_sidecars/{block_id}": {http.MethodGet},
}
eventsRoutes := map[string][]string{

View File

@@ -109,10 +109,10 @@ func (s *Server) GetRandao(w http.ResponseWriter, r *http.Request) {
// future epochs and epochs too far back are not supported.
randaoEpochLowerBound := uint64(0)
// Lower bound should not underflow.
if uint64(stEpoch) > uint64(st.RandaoMixesLength()) {
randaoEpochLowerBound = uint64(stEpoch) - uint64(st.RandaoMixesLength())
if uint64(stEpoch) >= uint64(st.RandaoMixesLength()) {
randaoEpochLowerBound = uint64(stEpoch) - uint64(st.RandaoMixesLength()) + 1
}
if epoch > stEpoch || uint64(epoch) < randaoEpochLowerBound+1 {
if epoch > stEpoch || (uint64(epoch) < randaoEpochLowerBound) {
httputil.HandleError(w, "Epoch is out of range for the randao mixes of the state", http.StatusBadRequest)
return
}

View File

@@ -192,8 +192,14 @@ func TestGetRandao(t *testing.T) {
assert.Equal(t, hexutil.Encode(mixOld[:]), resp.Data.Randao)
})
t.Run("head state below `EpochsPerHistoricalVector`", func(t *testing.T) {
s.Stater = &testutil.MockStater{
BeaconState: headSt,
s := &Server{
Stater: &testutil.MockStater{
BeaconState: headSt,
},
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
FinalizationFetcher: chainService,
BeaconDB: db,
}
request := httptest.NewRequest(http.MethodGet, "http://example.com//eth/v1/beacon/states/{state_id}/randao", nil)
@@ -303,6 +309,74 @@ func TestGetRandao(t *testing.T) {
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.DeepEqual(t, true, resp.Finalized)
})
t.Run("early epoch scenario - epoch 0 from state at epoch (EpochsPerHistoricalVector - 1)", func(t *testing.T) {
// Create a state at early epoch
earlyEpochState, err := util.NewBeaconState()
require.NoError(t, err)
earlyEpoch := params.BeaconConfig().EpochsPerHistoricalVector - 1
require.NoError(t, earlyEpochState.SetSlot(params.BeaconConfig().SlotsPerEpoch*primitives.Slot(earlyEpoch)))
// Set up RANDAO mix for epoch 0
// In real networks, this would be the ETH1 block hash used for genesis
epoch0Randao := bytesutil.ToBytes32([]byte("epoch0"))
require.NoError(t, earlyEpochState.UpdateRandaoMixesAtIndex(0, epoch0Randao))
earlyServer := &Server{
Stater: &testutil.MockStater{
BeaconState: earlyEpochState,
},
HeadFetcher: &chainMock.ChainService{},
OptimisticModeFetcher: &chainMock.ChainService{},
FinalizationFetcher: &chainMock.ChainService{},
}
// Query epoch 0 from state at epoch (EpochsPerHistoricalVector - 1) - should succeed
request := httptest.NewRequest(http.MethodGet, "http://example.com//eth/v1/beacon/states/{state_id}/randao?epoch=0", nil)
request.SetPathValue("state_id", "head")
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
earlyServer.GetRandao(writer, request)
require.Equal(t, http.StatusOK, writer.Code, "Early epoch queries should succeed when within bounds")
resp := &structs.GetRandaoResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, hexutil.Encode(epoch0Randao[:]), resp.Data.Randao)
})
t.Run("early epoch scenario - epoch 0 from state at epoch EpochsPerHistoricalVector", func(t *testing.T) {
// Create a state at early epoch
earlyEpochState, err := util.NewBeaconState()
require.NoError(t, err)
earlyEpoch := params.BeaconConfig().EpochsPerHistoricalVector
require.NoError(t, earlyEpochState.SetSlot(params.BeaconConfig().SlotsPerEpoch*primitives.Slot(earlyEpoch)))
// Set up RANDAO mix for epoch 0
// In real networks, this would be the ETH1 block hash used for genesis
epoch0Randao := bytesutil.ToBytes32([]byte("epoch0"))
require.NoError(t, earlyEpochState.UpdateRandaoMixesAtIndex(0, epoch0Randao))
earlyServer := &Server{
Stater: &testutil.MockStater{
BeaconState: earlyEpochState,
},
HeadFetcher: &chainMock.ChainService{},
OptimisticModeFetcher: &chainMock.ChainService{},
FinalizationFetcher: &chainMock.ChainService{},
}
// Query epoch 0 from state at epoch EpochsPerHistoricalVector - should fail
request := httptest.NewRequest(http.MethodGet, "http://example.com//eth/v1/beacon/states/{state_id}/randao?epoch=0", nil)
request.SetPathValue("state_id", "head")
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
earlyServer.GetRandao(writer, request)
require.Equal(t, http.StatusBadRequest, writer.Code)
e := &httputil.DefaultJsonError{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
require.StringContains(t, "Epoch is out of range for the randao mixes of the state", e.Message)
})
}
func Test_currentCommitteeIndicesFromState(t *testing.T) {

View File

@@ -71,7 +71,7 @@ func TestBlobs(t *testing.T) {
e := &httputil.DefaultJsonError{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.StringContains(t, "blobs are not supported for Phase 0 fork", e.Message)
assert.StringContains(t, "not supported for Phase 0 fork", e.Message)
})
t.Run("head", func(t *testing.T) {
u := "http://foo.example/head"
@@ -336,11 +336,23 @@ func TestBlobs(t *testing.T) {
require.Equal(t, false, resp.Finalized)
})
t.Run("slot before Deneb fork", func(t *testing.T) {
// Create and save a pre-Deneb block at slot 31
predenebBlock := util.NewBeaconBlock()
predenebBlock.Block.Slot = 31
util.SaveBlock(t, t.Context(), db, predenebBlock)
u := "http://foo.example/31"
request := httptest.NewRequest("GET", u, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.Blocker = &lookup.BeaconDbBlocker{}
s.Blocker = &lookup.BeaconDbBlocker{
ChainInfoFetcher: &mockChain.ChainService{},
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
BlobStorage: bs,
}
s.Blobs(writer, request)
@@ -348,7 +360,7 @@ func TestBlobs(t *testing.T) {
e := &httputil.DefaultJsonError{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.StringContains(t, "blobs are not supported before Deneb fork", e.Message)
assert.StringContains(t, "not supported before", e.Message)
})
t.Run("malformed block ID", func(t *testing.T) {
u := "http://foo.example/foo"
@@ -609,7 +621,7 @@ func TestGetBlobs(t *testing.T) {
e := &httputil.DefaultJsonError{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.StringContains(t, "blobs are not supported for Phase 0 fork", e.Message)
assert.StringContains(t, "not supported for Phase 0 fork", e.Message)
})
t.Run("head", func(t *testing.T) {
u := "http://foo.example/head"
@@ -808,11 +820,22 @@ func TestGetBlobs(t *testing.T) {
require.Equal(t, false, resp.Finalized)
})
t.Run("slot before Deneb fork", func(t *testing.T) {
// Create and save a pre-Deneb block at slot 31
predenebBlock := util.NewBeaconBlock()
predenebBlock.Block.Slot = 31
util.SaveBlock(t, t.Context(), db, predenebBlock)
u := "http://foo.example/31"
request := httptest.NewRequest("GET", u, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.Blocker = &lookup.BeaconDbBlocker{}
s.Blocker = &lookup.BeaconDbBlocker{
BeaconDB: db,
ChainInfoFetcher: &mockChain.ChainService{},
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
}
s.GetBlobs(writer, request)
@@ -820,7 +843,7 @@ func TestGetBlobs(t *testing.T) {
e := &httputil.DefaultJsonError{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.StringContains(t, "blobs are not supported before Deneb fork", e.Message)
assert.StringContains(t, "not supported before", e.Message)
})
t.Run("malformed block ID", func(t *testing.T) {
u := "http://foo.example/foo"

View File

@@ -13,13 +13,19 @@ go_library(
"//api/server/structs:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/rpc/core:go_default_library",
"//beacon-chain/rpc/eth/helpers:go_default_library",
"//beacon-chain/rpc/eth/shared:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//network/httputil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)
@@ -34,9 +40,13 @@ go_test(
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/rpc/core:go_default_library",
"//beacon-chain/rpc/testutil:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",

View File

@@ -4,19 +4,31 @@ import (
"context"
"encoding/json"
"fmt"
"math"
"net/http"
"net/url"
"strconv"
"strings"
"github.com/OffchainLabs/prysm/v6/api"
"github.com/OffchainLabs/prysm/v6/api/server/structs"
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/core"
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/shared"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
"github.com/OffchainLabs/prysm/v6/network/httputil"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
)
const errMsgStateFromConsensus = "Could not convert consensus state to response"
const (
errMsgStateFromConsensus = "Could not convert consensus state to response"
)
// GetBeaconStateV2 returns the full beacon state for a given state ID.
func (s *Server) GetBeaconStateV2(w http.ResponseWriter, r *http.Request) {
@@ -208,3 +220,181 @@ func (s *Server) GetForkChoice(w http.ResponseWriter, r *http.Request) {
}
httputil.WriteJson(w, resp)
}
// DataColumnSidecars retrieves data column sidecars for a given block id.
func (s *Server) DataColumnSidecars(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "debug.DataColumnSidecars")
defer span.End()
// Check if we're before Fulu fork - data columns are only available from Fulu onwards
fuluForkEpoch := params.BeaconConfig().FuluForkEpoch
if fuluForkEpoch == math.MaxUint64 {
httputil.HandleError(w, "Data columns are not supported - Fulu fork not configured", http.StatusBadRequest)
return
}
// Check if we're before Fulu fork based on current slot
currentSlot := s.GenesisTimeFetcher.CurrentSlot()
currentEpoch := primitives.Epoch(currentSlot / params.BeaconConfig().SlotsPerEpoch)
if currentEpoch < fuluForkEpoch {
httputil.HandleError(w, "Data columns are not supported - before Fulu fork", http.StatusBadRequest)
return
}
indices, err := parseDataColumnIndices(r.URL)
if err != nil {
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
return
}
segments := strings.Split(r.URL.Path, "/")
blockId := segments[len(segments)-1]
verifiedDataColumns, rpcErr := s.Blocker.DataColumns(ctx, blockId, indices)
if rpcErr != nil {
code := core.ErrorReasonToHTTP(rpcErr.Reason)
switch code {
case http.StatusBadRequest:
httputil.HandleError(w, "Bad request: "+rpcErr.Err.Error(), code)
return
case http.StatusNotFound:
httputil.HandleError(w, "Not found: "+rpcErr.Err.Error(), code)
return
case http.StatusInternalServerError:
httputil.HandleError(w, "Internal server error: "+rpcErr.Err.Error(), code)
return
default:
httputil.HandleError(w, rpcErr.Err.Error(), code)
return
}
}
blk, err := s.Blocker.Block(ctx, []byte(blockId))
if err != nil {
httputil.HandleError(w, "Could not fetch block: "+err.Error(), http.StatusInternalServerError)
return
}
if blk == nil {
httputil.HandleError(w, "Block not found", http.StatusNotFound)
return
}
if httputil.RespondWithSsz(r) {
sszResp, err := buildDataColumnSidecarsSSZResponse(verifiedDataColumns)
if err != nil {
httputil.HandleError(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set(api.VersionHeader, version.String(blk.Version()))
httputil.WriteSsz(w, sszResp)
return
}
blkRoot, err := blk.Block().HashTreeRoot()
if err != nil {
httputil.HandleError(w, "Could not hash block: "+err.Error(), http.StatusInternalServerError)
return
}
isOptimistic, err := s.OptimisticModeFetcher.IsOptimisticForRoot(ctx, blkRoot)
if err != nil {
httputil.HandleError(w, "Could not check if block is optimistic: "+err.Error(), http.StatusInternalServerError)
return
}
data := buildDataColumnSidecarsJsonResponse(verifiedDataColumns)
resp := &structs.GetDebugDataColumnSidecarsResponse{
Version: version.String(blk.Version()),
Data: data,
ExecutionOptimistic: isOptimistic,
Finalized: s.FinalizationFetcher.IsFinalized(ctx, blkRoot),
}
w.Header().Set(api.VersionHeader, version.String(blk.Version()))
httputil.WriteJson(w, resp)
}
// parseDataColumnIndices filters out invalid and duplicate data column indices
func parseDataColumnIndices(url *url.URL) ([]int, error) {
numberOfColumns := params.BeaconConfig().NumberOfColumns
rawIndices := url.Query()["indices"]
indices := make([]int, 0, numberOfColumns)
invalidIndices := make([]string, 0)
loop:
for _, raw := range rawIndices {
ix, err := strconv.Atoi(raw)
if err != nil {
invalidIndices = append(invalidIndices, raw)
continue
}
if !(0 <= ix && uint64(ix) < numberOfColumns) {
invalidIndices = append(invalidIndices, raw)
continue
}
for i := range indices {
if ix == indices[i] {
continue loop
}
}
indices = append(indices, ix)
}
if len(invalidIndices) > 0 {
return nil, fmt.Errorf("requested data column indices %v are invalid", invalidIndices)
}
return indices, nil
}
func buildDataColumnSidecarsJsonResponse(verifiedDataColumns []blocks.VerifiedRODataColumn) []*structs.DataColumnSidecar {
sidecars := make([]*structs.DataColumnSidecar, len(verifiedDataColumns))
for i, dc := range verifiedDataColumns {
column := make([]string, len(dc.Column))
for j, cell := range dc.Column {
column[j] = hexutil.Encode(cell)
}
kzgCommitments := make([]string, len(dc.KzgCommitments))
for j, commitment := range dc.KzgCommitments {
kzgCommitments[j] = hexutil.Encode(commitment)
}
kzgProofs := make([]string, len(dc.KzgProofs))
for j, proof := range dc.KzgProofs {
kzgProofs[j] = hexutil.Encode(proof)
}
kzgCommitmentsInclusionProof := make([]string, len(dc.KzgCommitmentsInclusionProof))
for j, proof := range dc.KzgCommitmentsInclusionProof {
kzgCommitmentsInclusionProof[j] = hexutil.Encode(proof)
}
sidecars[i] = &structs.DataColumnSidecar{
Index: strconv.FormatUint(dc.Index, 10),
Column: column,
KzgCommitments: kzgCommitments,
KzgProofs: kzgProofs,
SignedBeaconBlockHeader: structs.SignedBeaconBlockHeaderFromConsensus(dc.SignedBlockHeader),
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
}
}
return sidecars
}
// buildDataColumnSidecarsSSZResponse builds SSZ response for data column sidecars
func buildDataColumnSidecarsSSZResponse(verifiedDataColumns []blocks.VerifiedRODataColumn) ([]byte, error) {
if len(verifiedDataColumns) == 0 {
return []byte{}, nil
}
// Pre-allocate buffer for all sidecars using the known SSZ size
sizePerSidecar := (&ethpb.DataColumnSidecar{}).SizeSSZ()
ssz := make([]byte, 0, sizePerSidecar*len(verifiedDataColumns))
// Marshal and append each sidecar
for i, sidecar := range verifiedDataColumns {
sszrep, err := sidecar.MarshalSSZ()
if err != nil {
return nil, errors.Wrapf(err, "failed to marshal data column sidecar at index %d", i)
}
ssz = append(ssz, sszrep...)
}
return ssz, nil
}

View File

@@ -2,9 +2,13 @@ package debug
import (
"bytes"
"context"
"encoding/json"
"errors"
"math"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"github.com/OffchainLabs/prysm/v6/api"
@@ -13,9 +17,13 @@ import (
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/core"
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/testutil"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
@@ -515,3 +523,285 @@ func TestGetForkChoice(t *testing.T) {
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, "2", resp.FinalizedCheckpoint.Epoch)
}
func TestDataColumnSidecars(t *testing.T) {
t.Run("Fulu fork not configured", func(t *testing.T) {
// Save the original config
originalConfig := params.BeaconConfig()
defer func() { params.OverrideBeaconConfig(originalConfig) }()
// Set Fulu fork epoch to MaxUint64 (unconfigured)
config := params.BeaconConfig().Copy()
config.FuluForkEpoch = math.MaxUint64
params.OverrideBeaconConfig(config)
chainService := &blockchainmock.ChainService{}
// Create a mock blocker to avoid nil pointer
mockBlocker := &testutil.MockBlocker{}
s := &Server{
GenesisTimeFetcher: chainService,
Blocker: mockBlocker,
}
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/debug/beacon/data_column_sidecars/head", nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.DataColumnSidecars(writer, request)
require.Equal(t, http.StatusBadRequest, writer.Code)
assert.StringContains(t, "Data columns are not supported - Fulu fork not configured", writer.Body.String())
})
t.Run("Before Fulu fork", func(t *testing.T) {
// Save the original config
originalConfig := params.BeaconConfig()
defer func() { params.OverrideBeaconConfig(originalConfig) }()
// Set Fulu fork epoch to 100
config := params.BeaconConfig().Copy()
config.FuluForkEpoch = 100
params.OverrideBeaconConfig(config)
chainService := &blockchainmock.ChainService{}
currentSlot := primitives.Slot(0) // Current slot 0 (epoch 0, before epoch 100)
chainService.Slot = &currentSlot
// Create a mock blocker to avoid nil pointer
mockBlocker := &testutil.MockBlocker{
DataColumnsFunc: func(ctx context.Context, id string, indices []int) ([]blocks.VerifiedRODataColumn, *core.RpcError) {
return nil, &core.RpcError{Err: errors.New("before Fulu fork"), Reason: core.BadRequest}
},
}
s := &Server{
GenesisTimeFetcher: chainService,
Blocker: mockBlocker,
}
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/debug/beacon/data_column_sidecars/head", nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.DataColumnSidecars(writer, request)
require.Equal(t, http.StatusBadRequest, writer.Code)
assert.StringContains(t, "Data columns are not supported - before Fulu fork", writer.Body.String())
})
t.Run("Invalid indices", func(t *testing.T) {
// Save the original config
originalConfig := params.BeaconConfig()
defer func() { params.OverrideBeaconConfig(originalConfig) }()
// Set Fulu fork epoch to 0 (already activated)
config := params.BeaconConfig().Copy()
config.FuluForkEpoch = 0
params.OverrideBeaconConfig(config)
chainService := &blockchainmock.ChainService{}
currentSlot := primitives.Slot(0) // Current slot 0 (epoch 0, at fork)
chainService.Slot = &currentSlot
// Create a mock blocker to avoid nil pointer
mockBlocker := &testutil.MockBlocker{
DataColumnsFunc: func(ctx context.Context, id string, indices []int) ([]blocks.VerifiedRODataColumn, *core.RpcError) {
return nil, &core.RpcError{Err: errors.New("invalid index"), Reason: core.BadRequest}
},
}
s := &Server{
GenesisTimeFetcher: chainService,
Blocker: mockBlocker,
}
// Test with invalid index (out of range)
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/debug/beacon/data_column_sidecars/head?indices=9999", nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.DataColumnSidecars(writer, request)
require.Equal(t, http.StatusBadRequest, writer.Code)
assert.StringContains(t, "requested data column indices [9999] are invalid", writer.Body.String())
})
t.Run("Block not found", func(t *testing.T) {
// Save the original config
originalConfig := params.BeaconConfig()
defer func() { params.OverrideBeaconConfig(originalConfig) }()
// Set Fulu fork epoch to 0 (already activated)
config := params.BeaconConfig().Copy()
config.FuluForkEpoch = 0
params.OverrideBeaconConfig(config)
chainService := &blockchainmock.ChainService{}
currentSlot := primitives.Slot(0) // Current slot 0
chainService.Slot = &currentSlot
// Create a mock blocker that returns block not found
mockBlocker := &testutil.MockBlocker{
DataColumnsFunc: func(ctx context.Context, id string, indices []int) ([]blocks.VerifiedRODataColumn, *core.RpcError) {
return nil, &core.RpcError{Err: errors.New("block not found"), Reason: core.NotFound}
},
BlockToReturn: nil, // Block not found
}
s := &Server{
GenesisTimeFetcher: chainService,
Blocker: mockBlocker,
}
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/debug/beacon/data_column_sidecars/head", nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.DataColumnSidecars(writer, request)
require.Equal(t, http.StatusNotFound, writer.Code)
})
t.Run("Empty data columns", func(t *testing.T) {
// Save the original config
originalConfig := params.BeaconConfig()
defer func() { params.OverrideBeaconConfig(originalConfig) }()
// Set Fulu fork epoch to 0
config := params.BeaconConfig().Copy()
config.FuluForkEpoch = 0
params.OverrideBeaconConfig(config)
// Create a simple test block
signedTestBlock := util.NewBeaconBlock()
roBlock, err := blocks.NewSignedBeaconBlock(signedTestBlock)
require.NoError(t, err)
chainService := &blockchainmock.ChainService{}
currentSlot := primitives.Slot(0) // Current slot 0
chainService.Slot = &currentSlot
chainService.OptimisticRoots = make(map[[32]byte]bool)
chainService.FinalizedRoots = make(map[[32]byte]bool)
mockBlocker := &testutil.MockBlocker{
DataColumnsFunc: func(ctx context.Context, id string, indices []int) ([]blocks.VerifiedRODataColumn, *core.RpcError) {
return []blocks.VerifiedRODataColumn{}, nil // Empty data columns
},
BlockToReturn: roBlock,
}
s := &Server{
GenesisTimeFetcher: chainService,
OptimisticModeFetcher: chainService,
FinalizationFetcher: chainService,
Blocker: mockBlocker,
}
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/debug/beacon/data_column_sidecars/head", nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.DataColumnSidecars(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetDebugDataColumnSidecarsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 0, len(resp.Data))
})
}
func TestParseDataColumnIndices(t *testing.T) {
// Save the original config
originalConfig := params.BeaconConfig()
defer func() { params.OverrideBeaconConfig(originalConfig) }()
// Set NumberOfColumns to 128 for testing
config := params.BeaconConfig().Copy()
config.NumberOfColumns = 128
params.OverrideBeaconConfig(config)
tests := []struct {
name string
queryParams map[string][]string
expected []int
expectError bool
}{
{
name: "no indices",
queryParams: map[string][]string{},
expected: []int{},
expectError: false,
},
{
name: "valid indices",
queryParams: map[string][]string{"indices": {"0", "1", "127"}},
expected: []int{0, 1, 127},
expectError: false,
},
{
name: "duplicate indices",
queryParams: map[string][]string{"indices": {"0", "1", "0"}},
expected: []int{0, 1},
expectError: false,
},
{
name: "invalid string index",
queryParams: map[string][]string{"indices": {"abc"}},
expected: nil,
expectError: true,
},
{
name: "negative index",
queryParams: map[string][]string{"indices": {"-1"}},
expected: nil,
expectError: true,
},
{
name: "index too large",
queryParams: map[string][]string{"indices": {"128"}}, // 128 >= NumberOfColumns (128)
expected: nil,
expectError: true,
},
{
name: "mixed valid and invalid",
queryParams: map[string][]string{"indices": {"0", "abc", "1"}},
expected: nil,
expectError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
u, err := url.Parse("http://example.com/test")
require.NoError(t, err)
q := u.Query()
for key, values := range tt.queryParams {
for _, value := range values {
q.Add(key, value)
}
}
u.RawQuery = q.Encode()
result, err := parseDataColumnIndices(u)
if tt.expectError {
assert.NotNil(t, err)
} else {
require.NoError(t, err)
assert.DeepEqual(t, tt.expected, result)
}
})
}
}
func TestBuildDataColumnSidecarsSSZResponse(t *testing.T) {
t.Run("empty data columns", func(t *testing.T) {
result, err := buildDataColumnSidecarsSSZResponse([]blocks.VerifiedRODataColumn{})
require.NoError(t, err)
require.DeepEqual(t, []byte{}, result)
})
t.Run("get SSZ size", func(t *testing.T) {
size := (&ethpb.DataColumnSidecar{}).SizeSSZ()
assert.Equal(t, true, size > 0)
})
}

View File

@@ -20,4 +20,6 @@ type Server struct {
ForkchoiceFetcher blockchain.ForkchoiceFetcher
FinalizationFetcher blockchain.FinalizationFetcher
ChainInfoFetcher blockchain.ChainInfoFetcher
GenesisTimeFetcher blockchain.TimeFetcher
Blocker lookup.Blocker
}

View File

@@ -39,8 +39,8 @@ func HandleIsOptimisticError(w http.ResponseWriter, err error) {
return
}
var blockRootsNotFoundErr *lookup.BlockRootsNotFoundError
if errors.As(err, &blockRootsNotFoundErr) {
var blockNotFoundErr *lookup.BlockNotFoundError
if errors.As(err, &blockNotFoundErr) {
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusNotFound)
return
}

View File

@@ -138,7 +138,7 @@ func isStateRootOptimistic(
return true, errors.Wrapf(err, "could not get block roots for slot %d", st.Slot())
}
if !has {
return true, lookup.NewBlockRootsNotFoundError()
return true, lookup.NewBlockNotFoundError("no block roots returned from the database")
}
for _, r := range roots {
b, err := database.Block(ctx, r)

View File

@@ -286,8 +286,8 @@ func TestIsOptimistic(t *testing.T) {
require.NoError(t, err)
mf := &testutil.MockStater{BeaconState: st}
_, err = IsOptimistic(ctx, []byte(hexutil.Encode(bytesutil.PadTo([]byte("root"), 32))), cs, mf, cs, db)
var blockRootsNotFoundErr *lookup.BlockRootsNotFoundError
require.Equal(t, true, errors.As(err, &blockRootsNotFoundErr))
var blockNotFoundErr *lookup.BlockNotFoundError
require.Equal(t, true, errors.As(err, &blockNotFoundErr))
})
})
@@ -395,11 +395,11 @@ func TestHandleIsOptimisticError(t *testing.T) {
})
t.Run("no block roots error handled as 404", func(t *testing.T) {
rr := httptest.NewRecorder()
blockRootsErr := lookup.NewBlockRootsNotFoundError()
HandleIsOptimisticError(rr, blockRootsErr)
blockNotFoundErr := lookup.NewBlockNotFoundError("no block roots returned from the database")
HandleIsOptimisticError(rr, blockNotFoundErr)
require.Equal(t, http.StatusNotFound, rr.Code)
require.StringContains(t, blockRootsErr.Error(), rr.Body.String())
require.StringContains(t, blockNotFoundErr.Error(), rr.Body.String())
})
t.Run("generic error handled as 500", func(t *testing.T) {

View File

@@ -24,17 +24,19 @@ import (
"github.com/pkg/errors"
)
type BlockRootsNotFoundError struct {
// BlockNotFoundError represents an error when a block cannot be found.
type BlockNotFoundError struct {
message string
}
func NewBlockRootsNotFoundError() *BlockRootsNotFoundError {
return &BlockRootsNotFoundError{
message: "no block roots returned from the database",
// NewBlockNotFoundError creates a new BlockNotFoundError with a custom message.
func NewBlockNotFoundError(msg string) *BlockNotFoundError {
return &BlockNotFoundError{
message: msg,
}
}
func (e BlockRootsNotFoundError) Error() string {
func (e *BlockNotFoundError) Error() string {
return e.message
}
@@ -59,6 +61,7 @@ func (e BlockIdParseError) Error() string {
type Blocker interface {
Block(ctx context.Context, id []byte) (interfaces.ReadOnlySignedBeaconBlock, error)
Blobs(ctx context.Context, id string, opts ...options.BlobsOption) ([]*blocks.VerifiedROBlob, *core.RpcError)
DataColumns(ctx context.Context, id string, indices []int) ([]blocks.VerifiedRODataColumn, *core.RpcError)
}
// BeaconDbBlocker is an implementation of Blocker. It retrieves blocks from the beacon chain database.
@@ -70,6 +73,141 @@ type BeaconDbBlocker struct {
DataColumnStorage *filesystem.DataColumnStorage
}
// resolveBlockIDByRootOrSlot resolves a block ID that is either a root (hex string or raw bytes) or a slot number.
func (p *BeaconDbBlocker) resolveBlockIDByRootOrSlot(ctx context.Context, id string) ([fieldparams.RootLength]byte, interfaces.ReadOnlySignedBeaconBlock, error) {
var rootSlice []byte
// Check if it's a hex-encoded root
if bytesutil.IsHex([]byte(id)) {
var err error
rootSlice, err = bytesutil.DecodeHexWithLength(id, fieldparams.RootLength)
if err != nil {
e := NewBlockIdParseError(err)
return [32]byte{}, nil, &e
}
} else if len(id) == 32 {
// Handle raw 32-byte root
rootSlice = []byte(id)
} else {
// Try to parse as slot number
slot, err := strconv.ParseUint(id, 10, 64)
if err != nil {
e := NewBlockIdParseError(err)
return [32]byte{}, nil, &e
}
// Get block roots for the slot
ok, roots, err := p.BeaconDB.BlockRootsBySlot(ctx, primitives.Slot(slot))
if err != nil {
return [32]byte{}, nil, errors.Wrapf(err, "could not retrieve block roots for slot %d", slot)
}
if !ok || len(roots) == 0 {
return [32]byte{}, nil, NewBlockNotFoundError(fmt.Sprintf("no blocks found at slot %d", slot))
}
// Find the canonical block root
if p.ChainInfoFetcher == nil {
return [32]byte{}, nil, errors.New("chain info fetcher is not configured")
}
for _, root := range roots {
canonical, err := p.ChainInfoFetcher.IsCanonical(ctx, root)
if err != nil {
return [32]byte{}, nil, errors.Wrapf(err, "could not determine if block root is canonical")
}
if canonical {
rootSlice = root[:]
break
}
}
// If no canonical block found, rootSlice remains nil
if rootSlice == nil {
// No canonical block at this slot
return [32]byte{}, nil, NewBlockNotFoundError(fmt.Sprintf("no canonical block found at slot %d", slot))
}
}
// Fetch the block using the root
root := bytesutil.ToBytes32(rootSlice)
blk, err := p.BeaconDB.Block(ctx, root)
if err != nil {
return [32]byte{}, nil, errors.Wrapf(err, "failed to retrieve block %#x from db", rootSlice)
}
if blk == nil {
return [32]byte{}, nil, NewBlockNotFoundError(fmt.Sprintf("block %#x not found in db", rootSlice))
}
return root, blk, nil
}
// resolveBlockID resolves a block ID to root and signed block.
// Fork validation is handled outside this function by the calling methods.
func (p *BeaconDbBlocker) resolveBlockID(ctx context.Context, id string) ([fieldparams.RootLength]byte, interfaces.ReadOnlySignedBeaconBlock, error) {
switch id {
case "genesis":
blk, err := p.BeaconDB.GenesisBlock(ctx)
if err != nil {
return [32]byte{}, nil, errors.Wrap(err, "could not retrieve genesis block")
}
if blk == nil {
return [32]byte{}, nil, NewBlockNotFoundError("genesis block not found")
}
root, err := blk.Block().HashTreeRoot()
if err != nil {
return [32]byte{}, nil, errors.Wrap(err, "could not get genesis block root")
}
return root, blk, nil
case "head":
blk, err := p.ChainInfoFetcher.HeadBlock(ctx)
if err != nil {
return [32]byte{}, nil, errors.Wrap(err, "could not retrieve head block")
}
if blk == nil {
return [32]byte{}, nil, NewBlockNotFoundError("head block not found")
}
root, err := blk.Block().HashTreeRoot()
if err != nil {
return [32]byte{}, nil, errors.Wrap(err, "could not get head block root")
}
return root, blk, nil
case "finalized":
finalized := p.ChainInfoFetcher.FinalizedCheckpt()
if finalized == nil {
return [32]byte{}, nil, errors.New("received nil finalized checkpoint")
}
finalizedRoot := bytesutil.ToBytes32(finalized.Root)
blk, err := p.BeaconDB.Block(ctx, finalizedRoot)
if err != nil {
return [32]byte{}, nil, errors.Wrap(err, "could not retrieve finalized block")
}
if blk == nil {
return [32]byte{}, nil, NewBlockNotFoundError(fmt.Sprintf("finalized block %#x not found", finalizedRoot))
}
return finalizedRoot, blk, nil
case "justified":
jcp := p.ChainInfoFetcher.CurrentJustifiedCheckpt()
if jcp == nil {
return [32]byte{}, nil, errors.New("received nil justified checkpoint")
}
justifiedRoot := bytesutil.ToBytes32(jcp.Root)
blk, err := p.BeaconDB.Block(ctx, justifiedRoot)
if err != nil {
return [32]byte{}, nil, errors.Wrap(err, "could not retrieve justified block")
}
if blk == nil {
return [32]byte{}, nil, NewBlockNotFoundError(fmt.Sprintf("justified block %#x not found", justifiedRoot))
}
return justifiedRoot, blk, nil
default:
return p.resolveBlockIDByRootOrSlot(ctx, id)
}
}
// Block returns the beacon block for a given identifier. The identifier can be one of:
// - "head" (canonical head in node's view)
// - "genesis"
@@ -79,71 +217,9 @@ type BeaconDbBlocker struct {
// - <hex encoded block root with '0x' prefix>
// - <block root>
func (p *BeaconDbBlocker) Block(ctx context.Context, id []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
var err error
var blk interfaces.ReadOnlySignedBeaconBlock
switch string(id) {
case "head":
blk, err = p.ChainInfoFetcher.HeadBlock(ctx)
if err != nil {
return nil, errors.Wrap(err, "could not retrieve head block")
}
case "finalized":
finalized := p.ChainInfoFetcher.FinalizedCheckpt()
finalizedRoot := bytesutil.ToBytes32(finalized.Root)
blk, err = p.BeaconDB.Block(ctx, finalizedRoot)
if err != nil {
return nil, errors.New("could not get finalized block from db")
}
case "genesis":
blk, err = p.BeaconDB.GenesisBlock(ctx)
if err != nil {
return nil, errors.Wrap(err, "could not retrieve genesis block")
}
default:
if bytesutil.IsHex(id) {
decoded, err := hexutil.Decode(string(id))
if err != nil {
e := NewBlockIdParseError(err)
return nil, &e
}
blk, err = p.BeaconDB.Block(ctx, bytesutil.ToBytes32(decoded))
if err != nil {
return nil, errors.Wrap(err, "could not retrieve block")
}
} else if len(id) == 32 {
blk, err = p.BeaconDB.Block(ctx, bytesutil.ToBytes32(id))
if err != nil {
return nil, errors.Wrap(err, "could not retrieve block")
}
} else {
slot, err := strconv.ParseUint(string(id), 10, 64)
if err != nil {
e := NewBlockIdParseError(err)
return nil, &e
}
blks, err := p.BeaconDB.BlocksBySlot(ctx, primitives.Slot(slot))
if err != nil {
return nil, errors.Wrapf(err, "could not retrieve blocks for slot %d", slot)
}
_, roots, err := p.BeaconDB.BlockRootsBySlot(ctx, primitives.Slot(slot))
if err != nil {
return nil, errors.Wrapf(err, "could not retrieve block roots for slot %d", slot)
}
numBlks := len(blks)
if numBlks == 0 {
return nil, nil
}
for i, b := range blks {
canonical, err := p.ChainInfoFetcher.IsCanonical(ctx, roots[i])
if err != nil {
return nil, errors.Wrapf(err, "could not determine if block root is canonical")
}
if canonical {
blk = b
break
}
}
}
_, blk, err := p.resolveBlockID(ctx, string(id))
if err != nil {
return nil, err
}
return blk, nil
}
@@ -171,88 +247,36 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, opts ...options.
opt(cfg)
}
// Resolve block ID to root
var rootSlice []byte
switch id {
case "genesis":
return nil, &core.RpcError{Err: errors.New("blobs are not supported for Phase 0 fork"), Reason: core.BadRequest}
case "head":
var err error
rootSlice, err = p.ChainInfoFetcher.HeadRoot(ctx)
if err != nil {
return nil, &core.RpcError{Err: errors.Wrapf(err, "could not retrieve head root"), Reason: core.Internal}
}
case "finalized":
fcp := p.ChainInfoFetcher.FinalizedCheckpt()
if fcp == nil {
return nil, &core.RpcError{Err: errors.New("received nil finalized checkpoint"), Reason: core.Internal}
}
rootSlice = fcp.Root
case "justified":
jcp := p.ChainInfoFetcher.CurrentJustifiedCheckpt()
if jcp == nil {
return nil, &core.RpcError{Err: errors.New("received nil justified checkpoint"), Reason: core.Internal}
}
rootSlice = jcp.Root
default:
if bytesutil.IsHex([]byte(id)) {
var err error
rootSlice, err = bytesutil.DecodeHexWithLength(id, fieldparams.RootLength)
if err != nil {
return nil, &core.RpcError{Err: NewBlockIdParseError(err), Reason: core.BadRequest}
}
} else {
slot, err := strconv.ParseUint(id, 10, 64)
if err != nil {
return nil, &core.RpcError{Err: NewBlockIdParseError(err), Reason: core.BadRequest}
}
denebStart, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
if err != nil {
return nil, &core.RpcError{Err: errors.Wrap(err, "could not calculate Deneb start slot"), Reason: core.Internal}
}
if primitives.Slot(slot) < denebStart {
return nil, &core.RpcError{Err: errors.New("blobs are not supported before Deneb fork"), Reason: core.BadRequest}
}
ok, roots, err := p.BeaconDB.BlockRootsBySlot(ctx, primitives.Slot(slot))
if !ok {
return nil, &core.RpcError{Err: fmt.Errorf("no block roots at slot %d", slot), Reason: core.NotFound}
}
if err != nil {
return nil, &core.RpcError{Err: errors.Wrapf(err, "failed to get block roots for slot %d", slot), Reason: core.Internal}
}
rootSlice = roots[0][:]
if len(roots) == 1 {
break
}
for _, blockRoot := range roots {
canonical, err := p.ChainInfoFetcher.IsCanonical(ctx, blockRoot)
if err != nil {
return nil, &core.RpcError{Err: errors.Wrapf(err, "could not determine if block %#x is canonical", blockRoot), Reason: core.Internal}
}
if canonical {
rootSlice = blockRoot[:]
break
}
}
}
// Check for genesis block first (not supported for blobs)
if id == "genesis" {
return nil, &core.RpcError{Err: errors.New("not supported for Phase 0 fork"), Reason: core.BadRequest}
}
root := bytesutil.ToBytes32(rootSlice)
roSignedBlock, err := p.BeaconDB.Block(ctx, root)
// Resolve block ID to root and block
root, roSignedBlock, err := p.resolveBlockID(ctx, id)
if err != nil {
return nil, &core.RpcError{Err: errors.Wrapf(err, "failed to retrieve block %#x from db", rootSlice), Reason: core.Internal}
var blockNotFound *BlockNotFoundError
var blockIdParseErr *BlockIdParseError
reason := core.Internal // Default to Internal for unexpected errors
if errors.As(err, &blockNotFound) {
reason = core.NotFound
} else if errors.As(err, &blockIdParseErr) {
reason = core.BadRequest
}
return nil, &core.RpcError{Err: err, Reason: core.ErrorReason(reason)}
}
if roSignedBlock == nil {
return nil, &core.RpcError{Err: fmt.Errorf("block %#x not found in db", rootSlice), Reason: core.NotFound}
slot := roSignedBlock.Block().Slot()
if slots.ToEpoch(slot) < params.BeaconConfig().DenebForkEpoch {
return nil, &core.RpcError{Err: errors.New("blobs are not supported before Deneb fork"), Reason: core.BadRequest}
}
roBlock := roSignedBlock.Block()
commitments, err := roBlock.Body().BlobKzgCommitments()
if err != nil {
return nil, &core.RpcError{Err: errors.Wrapf(err, "failed to retrieve kzg commitments from block %#x", rootSlice), Reason: core.Internal}
return nil, &core.RpcError{Err: errors.Wrapf(err, "failed to retrieve kzg commitments from block %#x", root), Reason: core.Internal}
}
// If there are no commitments return 200 w/ empty list
@@ -266,7 +290,7 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, opts ...options.
if fuluForkEpoch != primitives.Epoch(math.MaxUint64) {
fuluForkSlot, err = slots.EpochStart(fuluForkEpoch)
if err != nil {
return nil, &core.RpcError{Err: errors.Wrap(err, "could not calculate peerDAS start slot"), Reason: core.Internal}
return nil, &core.RpcError{Err: errors.Wrap(err, "could not calculate Fulu start slot"), Reason: core.Internal}
}
}
@@ -461,3 +485,93 @@ func (p *BeaconDbBlocker) neededDataColumnSidecars(root [fieldparams.RootLength]
return verifiedRoSidecars, nil
}
// DataColumns returns the data column sidecars for a given block id identifier and column indices. The identifier can be one of:
// - "head" (canonical head in node's view)
// - "genesis"
// - "finalized"
// - "justified"
// - <slot>
// - <hex encoded block root with '0x' prefix>
// - <block root>
//
// cases:
// - no block, 404
// - block exists, before Fulu fork, 400 (data columns are not supported before Fulu fork)
func (p *BeaconDbBlocker) DataColumns(ctx context.Context, id string, indices []int) ([]blocks.VerifiedRODataColumn, *core.RpcError) {
// Check for genesis block first (not supported for data columns)
if id == "genesis" {
return nil, &core.RpcError{Err: errors.New("data columns are not supported for Phase 0 fork"), Reason: core.BadRequest}
}
// Resolve block ID to root and block
root, roSignedBlock, err := p.resolveBlockID(ctx, id)
if err != nil {
var blockNotFound *BlockNotFoundError
var blockIdParseErr *BlockIdParseError
reason := core.Internal // Default to Internal for unexpected errors
if errors.As(err, &blockNotFound) {
reason = core.NotFound
} else if errors.As(err, &blockIdParseErr) {
reason = core.BadRequest
}
return nil, &core.RpcError{Err: err, Reason: core.ErrorReason(reason)}
}
slot := roSignedBlock.Block().Slot()
fuluForkEpoch := params.BeaconConfig().FuluForkEpoch
fuluForkSlot, err := slots.EpochStart(fuluForkEpoch)
if err != nil {
return nil, &core.RpcError{Err: errors.Wrap(err, "could not calculate Fulu start slot"), Reason: core.Internal}
}
if slot < fuluForkSlot {
return nil, &core.RpcError{Err: errors.New("data columns are not supported before Fulu fork"), Reason: core.BadRequest}
}
roBlock := roSignedBlock.Block()
commitments, err := roBlock.Body().BlobKzgCommitments()
if err != nil {
return nil, &core.RpcError{Err: errors.Wrapf(err, "failed to retrieve kzg commitments from block %#x", root), Reason: core.Internal}
}
// If there are no commitments return 200 w/ empty list
if len(commitments) == 0 {
return make([]blocks.VerifiedRODataColumn, 0), nil
}
// Get column indices to retrieve
columnIndices := make([]uint64, 0)
if len(indices) == 0 {
// If no indices specified, return all columns this node is custodying
summary := p.DataColumnStorage.Summary(root)
stored := summary.Stored()
for index := range stored {
columnIndices = append(columnIndices, index)
}
} else {
// Validate and convert indices
numberOfColumns := params.BeaconConfig().NumberOfColumns
for _, index := range indices {
if index < 0 || uint64(index) >= numberOfColumns {
return nil, &core.RpcError{
Err: fmt.Errorf("requested index %d is outside valid range [0, %d)", index, numberOfColumns),
Reason: core.BadRequest,
}
}
columnIndices = append(columnIndices, uint64(index))
}
}
// Retrieve data column sidecars from storage
verifiedRoDataColumns, err := p.DataColumnStorage.Get(root, columnIndices)
if err != nil {
return nil, &core.RpcError{
Err: errors.Wrapf(err, "could not retrieve data columns for block root %#x", root),
Reason: core.Internal,
}
}
return verifiedRoDataColumns, nil
}

View File

@@ -61,11 +61,12 @@ func TestGetBlock(t *testing.T) {
fetcher := &BeaconDbBlocker{
BeaconDB: beaconDB,
ChainInfoFetcher: &mockChain.ChainService{
DB: beaconDB,
Block: wsb,
Root: headBlock.BlockRoot,
FinalizedCheckPoint: &ethpb.Checkpoint{Root: blkContainers[64].BlockRoot},
CanonicalRoots: canonicalRoots,
DB: beaconDB,
Block: wsb,
Root: headBlock.BlockRoot,
FinalizedCheckPoint: &ethpb.Checkpoint{Root: blkContainers[64].BlockRoot},
CurrentJustifiedCheckPoint: &ethpb.Checkpoint{Root: blkContainers[32].BlockRoot},
CanonicalRoots: canonicalRoots,
},
}
@@ -108,6 +109,11 @@ func TestGetBlock(t *testing.T) {
blockID: []byte("finalized"),
want: blkContainers[64].Block.(*ethpb.BeaconBlockContainer_Phase0Block).Phase0Block,
},
{
name: "justified",
blockID: []byte("justified"),
want: blkContainers[32].Block.(*ethpb.BeaconBlockContainer_Phase0Block).Phase0Block,
},
{
name: "genesis",
blockID: []byte("genesis"),
@@ -162,6 +168,112 @@ func TestGetBlock(t *testing.T) {
}
}
func TestBlobsErrorHandling(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.DenebForkEpoch = 1
params.OverrideBeaconConfig(cfg)
ctx := t.Context()
db := testDB.SetupDB(t)
t.Run("non-existent block by root returns 404", func(t *testing.T) {
blocker := &BeaconDbBlocker{
BeaconDB: db,
}
_, rpcErr := blocker.Blobs(ctx, "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef")
require.NotNil(t, rpcErr)
require.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
require.StringContains(t, "not found", rpcErr.Err.Error())
})
t.Run("non-existent block by slot returns 404", func(t *testing.T) {
blocker := &BeaconDbBlocker{
BeaconDB: db,
ChainInfoFetcher: &mockChain.ChainService{},
}
_, rpcErr := blocker.Blobs(ctx, "999999")
require.NotNil(t, rpcErr)
require.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
require.StringContains(t, "no blocks found at slot", rpcErr.Err.Error())
})
t.Run("genesis block not found returns 404", func(t *testing.T) {
blocker := &BeaconDbBlocker{
BeaconDB: db,
}
// Note: genesis blocks don't support blobs, so this returns BadRequest
_, rpcErr := blocker.Blobs(ctx, "genesis")
require.NotNil(t, rpcErr)
require.Equal(t, core.ErrorReason(core.BadRequest), rpcErr.Reason)
require.StringContains(t, "not supported for Phase 0", rpcErr.Err.Error())
})
t.Run("finalized block not found returns 404", func(t *testing.T) {
// Set up a finalized checkpoint pointing to a non-existent block
nonExistentRoot := bytesutil.PadTo([]byte("nonexistent"), 32)
blocker := &BeaconDbBlocker{
BeaconDB: db,
ChainInfoFetcher: &mockChain.ChainService{
FinalizedCheckPoint: &ethpb.Checkpoint{Root: nonExistentRoot},
},
}
_, rpcErr := blocker.Blobs(ctx, "finalized")
require.NotNil(t, rpcErr)
require.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
require.StringContains(t, "finalized block", rpcErr.Err.Error())
require.StringContains(t, "not found", rpcErr.Err.Error())
})
t.Run("justified block not found returns 404", func(t *testing.T) {
// Set up a justified checkpoint pointing to a non-existent block
nonExistentRoot := bytesutil.PadTo([]byte("nonexistent2"), 32)
blocker := &BeaconDbBlocker{
BeaconDB: db,
ChainInfoFetcher: &mockChain.ChainService{
CurrentJustifiedCheckPoint: &ethpb.Checkpoint{Root: nonExistentRoot},
},
}
_, rpcErr := blocker.Blobs(ctx, "justified")
require.NotNil(t, rpcErr)
require.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
require.StringContains(t, "justified block", rpcErr.Err.Error())
require.StringContains(t, "not found", rpcErr.Err.Error())
})
t.Run("invalid block ID returns 400", func(t *testing.T) {
blocker := &BeaconDbBlocker{
BeaconDB: db,
}
_, rpcErr := blocker.Blobs(ctx, "invalid-hex")
require.NotNil(t, rpcErr)
require.Equal(t, core.ErrorReason(core.BadRequest), rpcErr.Reason)
require.StringContains(t, "could not parse block ID", rpcErr.Err.Error())
})
t.Run("database error returns 500", func(t *testing.T) {
// Create a pre-Deneb block with valid slot
predenebBlock := util.NewBeaconBlock()
predenebBlock.Block.Slot = 100
util.SaveBlock(t, ctx, db, predenebBlock)
// Create blocker without ChainInfoFetcher to trigger internal error when checking canonical status
blocker := &BeaconDbBlocker{
BeaconDB: db,
}
_, rpcErr := blocker.Blobs(ctx, "100")
require.NotNil(t, rpcErr)
require.Equal(t, core.ErrorReason(core.Internal), rpcErr.Reason)
})
}
func TestGetBlob(t *testing.T) {
const (
slot = 123
@@ -217,9 +329,9 @@ func TestGetBlob(t *testing.T) {
for _, blob := range fuluBlobSidecars {
var kzgBlob kzg.Blob
copy(kzgBlob[:], blob.Blob)
cellsAndProogs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
cellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
require.NoError(t, err)
cellsAndProofsList = append(cellsAndProofsList, cellsAndProogs)
cellsAndProofsList = append(cellsAndProofsList, cellsAndProofs)
}
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsAndProofsList, peerdas.PopulateFromBlock(fuluBlock))
@@ -240,14 +352,17 @@ func TestGetBlob(t *testing.T) {
blocker := &BeaconDbBlocker{}
_, rpcErr := blocker.Blobs(ctx, "genesis")
require.Equal(t, http.StatusBadRequest, core.ErrorReasonToHTTP(rpcErr.Reason))
require.StringContains(t, "blobs are not supported for Phase 0 fork", rpcErr.Err.Error())
require.StringContains(t, "not supported for Phase 0 fork", rpcErr.Err.Error())
})
t.Run("head", func(t *testing.T) {
setupDeneb(t)
blocker := &BeaconDbBlocker{
ChainInfoFetcher: &mockChain.ChainService{Root: denebBlockRoot[:]},
ChainInfoFetcher: &mockChain.ChainService{
Root: denebBlockRoot[:],
Block: denebBlock,
},
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
@@ -326,6 +441,7 @@ func TestGetBlob(t *testing.T) {
setupDeneb(t)
blocker := &BeaconDbBlocker{
ChainInfoFetcher: &mockChain.ChainService{},
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
@@ -491,6 +607,31 @@ func TestGetBlob(t *testing.T) {
require.DeepSSZEqual(t, initialBlobSidecarPb, retrievedBlobSidecarPb)
}
})
t.Run("pre-deneb block should return 400", func(t *testing.T) {
// Setup with Deneb fork at epoch 1, so slot 0 is before Deneb
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.DenebForkEpoch = 1
params.OverrideBeaconConfig(cfg)
// Create a pre-Deneb block (slot 0, which is epoch 0)
predenebBlock := util.NewBeaconBlock()
predenebBlock.Block.Slot = 0
util.SaveBlock(t, ctx, db, predenebBlock)
predenebBlockRoot, err := predenebBlock.Block.HashTreeRoot()
require.NoError(t, err)
blocker := &BeaconDbBlocker{
BeaconDB: db,
}
_, rpcErr := blocker.Blobs(ctx, hexutil.Encode(predenebBlockRoot[:]))
require.NotNil(t, rpcErr)
require.Equal(t, core.ErrorReason(core.BadRequest), rpcErr.Reason)
require.Equal(t, http.StatusBadRequest, core.ErrorReasonToHTTP(rpcErr.Reason))
require.StringContains(t, "not supported before", rpcErr.Err.Error())
})
}
func TestBlobs_CommitmentOrdering(t *testing.T) {
@@ -653,3 +794,300 @@ func TestBlobs_CommitmentOrdering(t *testing.T) {
require.StringContains(t, "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", rpcErr.Err.Error())
})
}
func TestGetDataColumns(t *testing.T) {
const (
blobCount = 4
fuluForkEpoch = 2
)
setupFulu := func(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.DenebForkEpoch = 1
cfg.FuluForkEpoch = fuluForkEpoch
params.OverrideBeaconConfig(cfg)
}
setupPreFulu := func(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.DenebForkEpoch = 1
cfg.FuluForkEpoch = 1000 // Set to a high epoch to ensure we're before Fulu
params.OverrideBeaconConfig(cfg)
}
ctx := t.Context()
db := testDB.SetupDB(t)
// Start the trusted setup.
err := kzg.Start()
require.NoError(t, err)
// Create Fulu block and convert blob sidecars to data column sidecars.
fuluForkSlot := fuluForkEpoch * params.BeaconConfig().SlotsPerEpoch
fuluBlock, fuluBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, fuluForkSlot, blobCount)
fuluBlockRoot := fuluBlock.Root()
cellsAndProofsList := make([]kzg.CellsAndProofs, 0, len(fuluBlobSidecars))
for _, blob := range fuluBlobSidecars {
var kzgBlob kzg.Blob
copy(kzgBlob[:], blob.Blob)
cellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
require.NoError(t, err)
cellsAndProofsList = append(cellsAndProofsList, cellsAndProofs)
}
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsAndProofsList, peerdas.PopulateFromBlock(fuluBlock))
require.NoError(t, err)
verifiedRoDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumnSidecars))
for _, roDataColumn := range roDataColumnSidecars {
verifiedRoDataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
verifiedRoDataColumnSidecars = append(verifiedRoDataColumnSidecars, verifiedRoDataColumn)
}
err = db.SaveBlock(t.Context(), fuluBlock)
require.NoError(t, err)
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars)
require.NoError(t, err)
t.Run("pre-fulu fork", func(t *testing.T) {
setupPreFulu(t)
// Create a block at slot 123 (before Fulu fork since FuluForkEpoch is set to MaxUint64)
preFuluBlock := util.NewBeaconBlock()
preFuluBlock.Block.Slot = 123
util.SaveBlock(t, ctx, db, preFuluBlock)
blocker := &BeaconDbBlocker{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
ChainInfoFetcher: &mockChain.ChainService{},
BeaconDB: db,
}
_, rpcErr := blocker.DataColumns(ctx, "123", nil)
require.NotNil(t, rpcErr)
require.Equal(t, core.ErrorReason(core.BadRequest), rpcErr.Reason)
require.StringContains(t, "not supported before Fulu fork", rpcErr.Err.Error())
})
t.Run("genesis", func(t *testing.T) {
setupFulu(t)
blocker := &BeaconDbBlocker{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
ChainInfoFetcher: &mockChain.ChainService{},
}
_, rpcErr := blocker.DataColumns(ctx, "genesis", nil)
require.NotNil(t, rpcErr)
require.Equal(t, http.StatusBadRequest, core.ErrorReasonToHTTP(rpcErr.Reason))
require.StringContains(t, "not supported for Phase 0 fork", rpcErr.Err.Error())
})
t.Run("head", func(t *testing.T) {
setupFulu(t)
blocker := &BeaconDbBlocker{
ChainInfoFetcher: &mockChain.ChainService{
Root: fuluBlockRoot[:],
Block: fuluBlock,
},
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
DataColumnStorage: dataColumnStorage,
}
retrievedDataColumns, rpcErr := blocker.DataColumns(ctx, "head", nil)
require.IsNil(t, rpcErr)
require.Equal(t, len(verifiedRoDataColumnSidecars), len(retrievedDataColumns))
// Create a map of expected indices for easier verification
expectedIndices := make(map[uint64]bool)
for _, expected := range verifiedRoDataColumnSidecars {
expectedIndices[expected.RODataColumn.DataColumnSidecar.Index] = true
}
// Verify we got data columns with the expected indices
for _, actual := range retrievedDataColumns {
require.Equal(t, true, expectedIndices[actual.RODataColumn.DataColumnSidecar.Index])
}
})
t.Run("finalized", func(t *testing.T) {
setupFulu(t)
blocker := &BeaconDbBlocker{
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Root: fuluBlockRoot[:]}},
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
DataColumnStorage: dataColumnStorage,
}
retrievedDataColumns, rpcErr := blocker.DataColumns(ctx, "finalized", nil)
require.IsNil(t, rpcErr)
require.Equal(t, len(verifiedRoDataColumnSidecars), len(retrievedDataColumns))
})
t.Run("justified", func(t *testing.T) {
setupFulu(t)
blocker := &BeaconDbBlocker{
ChainInfoFetcher: &mockChain.ChainService{CurrentJustifiedCheckPoint: &ethpb.Checkpoint{Root: fuluBlockRoot[:]}},
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
DataColumnStorage: dataColumnStorage,
}
retrievedDataColumns, rpcErr := blocker.DataColumns(ctx, "justified", nil)
require.IsNil(t, rpcErr)
require.Equal(t, len(verifiedRoDataColumnSidecars), len(retrievedDataColumns))
})
t.Run("root", func(t *testing.T) {
setupFulu(t)
blocker := &BeaconDbBlocker{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
DataColumnStorage: dataColumnStorage,
}
retrievedDataColumns, rpcErr := blocker.DataColumns(ctx, hexutil.Encode(fuluBlockRoot[:]), nil)
require.IsNil(t, rpcErr)
require.Equal(t, len(verifiedRoDataColumnSidecars), len(retrievedDataColumns))
})
t.Run("slot", func(t *testing.T) {
setupFulu(t)
blocker := &BeaconDbBlocker{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
ChainInfoFetcher: &mockChain.ChainService{},
BeaconDB: db,
DataColumnStorage: dataColumnStorage,
}
slotStr := fmt.Sprintf("%d", fuluForkSlot)
retrievedDataColumns, rpcErr := blocker.DataColumns(ctx, slotStr, nil)
require.IsNil(t, rpcErr)
require.Equal(t, len(verifiedRoDataColumnSidecars), len(retrievedDataColumns))
})
t.Run("specific indices", func(t *testing.T) {
setupFulu(t)
blocker := &BeaconDbBlocker{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
DataColumnStorage: dataColumnStorage,
}
// Request specific indices (first 3 data columns)
indices := []int{0, 1, 2}
retrievedDataColumns, rpcErr := blocker.DataColumns(ctx, hexutil.Encode(fuluBlockRoot[:]), indices)
require.IsNil(t, rpcErr)
require.Equal(t, 3, len(retrievedDataColumns))
for i, dataColumn := range retrievedDataColumns {
require.Equal(t, uint64(indices[i]), dataColumn.RODataColumn.DataColumnSidecar.Index)
}
})
t.Run("no data columns returns empty array", func(t *testing.T) {
setupFulu(t)
_, emptyDataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
blocker := &BeaconDbBlocker{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
DataColumnStorage: emptyDataColumnStorage,
}
retrievedDataColumns, rpcErr := blocker.DataColumns(ctx, hexutil.Encode(fuluBlockRoot[:]), nil)
require.IsNil(t, rpcErr)
require.Equal(t, 0, len(retrievedDataColumns))
})
t.Run("index too big", func(t *testing.T) {
setupFulu(t)
blocker := &BeaconDbBlocker{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
DataColumnStorage: dataColumnStorage,
}
_, rpcErr := blocker.DataColumns(ctx, hexutil.Encode(fuluBlockRoot[:]), []int{0, math.MaxInt})
require.NotNil(t, rpcErr)
require.Equal(t, core.ErrorReason(core.BadRequest), rpcErr.Reason)
})
t.Run("outside retention period", func(t *testing.T) {
setupFulu(t)
// Create a data column storage with very short retention period
shortRetentionStorage, err := filesystem.NewDataColumnStorage(ctx,
filesystem.WithDataColumnBasePath(t.TempDir()),
filesystem.WithDataColumnRetentionEpochs(1), // Only 1 epoch retention
)
require.NoError(t, err)
// Mock genesis time to make current slot much later than the block slot
// This simulates being outside retention period
genesisTime := time.Now().Add(-time.Duration(fuluForkSlot+1000) * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
blocker := &BeaconDbBlocker{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: genesisTime,
},
BeaconDB: db,
DataColumnStorage: shortRetentionStorage,
}
// Since the block is outside retention period, should return empty array
retrievedDataColumns, rpcErr := blocker.DataColumns(ctx, hexutil.Encode(fuluBlockRoot[:]), nil)
require.IsNil(t, rpcErr)
require.Equal(t, 0, len(retrievedDataColumns))
})
t.Run("block not found", func(t *testing.T) {
setupFulu(t)
blocker := &BeaconDbBlocker{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
DataColumnStorage: dataColumnStorage,
}
nonExistentRoot := bytesutil.PadTo([]byte("nonexistent"), 32)
_, rpcErr := blocker.DataColumns(ctx, hexutil.Encode(nonExistentRoot), nil)
require.NotNil(t, rpcErr)
require.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
})
}

View File

@@ -324,18 +324,18 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
wg.Add(1)
go func() {
defer wg.Done()
if err := vs.broadcastReceiveBlock(ctx, block, root); err != nil {
if err := vs.broadcastReceiveBlock(ctx, &wg, block, root); err != nil {
errChan <- errors.Wrap(err, "broadcast/receive block failed")
return
}
errChan <- nil
}()
wg.Wait()
if err := vs.broadcastAndReceiveSidecars(ctx, block, root, blobSidecars, dataColumnSidecars); err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive sidecars: %v", err)
}
wg.Wait()
if err := <-errChan; err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive block: %v", err)
}
@@ -432,7 +432,26 @@ func (vs *Server) handleUnblindedBlock(
}
// broadcastReceiveBlock broadcasts a block and handles its reception.
func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
func (vs *Server) broadcastReceiveBlock(ctx context.Context, wg *sync.WaitGroup, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
if err := vs.broadcastBlock(ctx, wg, block, root); err != nil {
return errors.Wrap(err, "broadcast block")
}
vs.BlockNotifier.BlockFeed().Send(&feed.Event{
Type: blockfeed.ReceivedBlock,
Data: &blockfeed.ReceivedBlockData{SignedBlock: block},
})
if err := vs.BlockReceiver.ReceiveBlock(ctx, block, root, nil); err != nil {
return errors.Wrap(err, "receive block")
}
return nil
}
func (vs *Server) broadcastBlock(ctx context.Context, wg *sync.WaitGroup, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
defer wg.Done()
protoBlock, err := block.Proto()
if err != nil {
return errors.Wrap(err, "protobuf conversion failed")
@@ -440,11 +459,13 @@ func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.Si
if err := vs.P2P.Broadcast(ctx, protoBlock); err != nil {
return errors.Wrap(err, "broadcast failed")
}
vs.BlockNotifier.BlockFeed().Send(&feed.Event{
Type: blockfeed.ReceivedBlock,
Data: &blockfeed.ReceivedBlockData{SignedBlock: block},
})
return vs.BlockReceiver.ReceiveBlock(ctx, block, root, nil)
log.WithFields(logrus.Fields{
"slot": block.Block().Slot(),
"root": fmt.Sprintf("%#x", root),
}).Debug("Broadcasted block")
return nil
}
// broadcastAndReceiveBlobs handles the broadcasting and reception of blob sidecars.
@@ -498,10 +519,6 @@ func (vs *Server) broadcastAndReceiveDataColumns(
})
}
if err := eg.Wait(); err != nil {
return errors.Wrap(err, "wait for data columns to be broadcasted")
}
if err := vs.DataColumnReceiver.ReceiveDataColumns(verifiedRODataColumns); err != nil {
return errors.Wrap(err, "receive data column")
}
@@ -512,6 +529,11 @@ func (vs *Server) broadcastAndReceiveDataColumns(
Data: &operation.DataColumnSidecarReceivedData{DataColumn: &verifiedRODataColumn}, // #nosec G601
})
}
if err := eg.Wait(); err != nil {
return errors.Wrap(err, "wait for data columns to be broadcasted")
}
return nil
}

View File

@@ -14,10 +14,13 @@ import (
// MockBlocker is a fake implementation of lookup.Blocker.
type MockBlocker struct {
BlockToReturn interfaces.ReadOnlySignedBeaconBlock
ErrorToReturn error
SlotBlockMap map[primitives.Slot]interfaces.ReadOnlySignedBeaconBlock
RootBlockMap map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
BlockToReturn interfaces.ReadOnlySignedBeaconBlock
ErrorToReturn error
SlotBlockMap map[primitives.Slot]interfaces.ReadOnlySignedBeaconBlock
RootBlockMap map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
DataColumnsFunc func(ctx context.Context, id string, indices []int) ([]blocks.VerifiedRODataColumn, *core.RpcError)
DataColumnsToReturn []blocks.VerifiedRODataColumn
DataColumnsErrorToReturn *core.RpcError
}
// Block --
@@ -40,3 +43,17 @@ func (m *MockBlocker) Block(_ context.Context, b []byte) (interfaces.ReadOnlySig
func (*MockBlocker) Blobs(_ context.Context, _ string, _ ...options.BlobsOption) ([]*blocks.VerifiedROBlob, *core.RpcError) {
return nil, &core.RpcError{}
}
// DataColumns --
func (m *MockBlocker) DataColumns(ctx context.Context, id string, indices []int) ([]blocks.VerifiedRODataColumn, *core.RpcError) {
if m.DataColumnsFunc != nil {
return m.DataColumnsFunc(ctx, id, indices)
}
if m.DataColumnsErrorToReturn != nil {
return nil, m.DataColumnsErrorToReturn
}
if m.DataColumnsToReturn != nil {
return m.DataColumnsToReturn, nil
}
return nil, &core.RpcError{}
}

View File

@@ -0,0 +1,3 @@
### Added
- Adding `/eth/v1/debug/beacon/data_column_sidecars/{block_id}` endpoint.

View File

@@ -0,0 +1,3 @@
### Fixed
- Fixed 'justified' block support missing on blocker.Block and optimized logic between blocker.Block and blocker.Blob.

View File

@@ -0,0 +1,4 @@
### Changed
- Broadcast block then sidecars, instead block and sidecars concurrently
- Broadcast and receive sidecars in concurrently, instead sequentially

View File

@@ -0,0 +1,2 @@
## Added
- Ability to create a new genesis block from Fulu state.

View File

@@ -0,0 +1,3 @@
### Fixed
- Fix prysmctl panic when baseFee is not set in genesis.json

View File

@@ -0,0 +1,3 @@
### Fixed
- Fix getStateRandao not returning historic RANDAO mix values

View File

@@ -0,0 +1,3 @@
### Added
- SSZ-QL: Handle `Bitlist` and `Bitvector` types.

View File

@@ -34,7 +34,11 @@ go_test(
deps = [
"//crypto/bls:go_default_library",
"//runtime/interop:go_default_library",
"//runtime/version:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_ethereum_go_ethereum//core:go_default_library",
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
"@com_github_ethereum_go_ethereum//params:go_default_library",
],
)

View File

@@ -279,6 +279,14 @@ func generateGenesis(ctx context.Context) (state.BeaconState, error) {
if v > version.Altair {
// set ttd to zero so EL goes post-merge immediately
gen.Config.TerminalTotalDifficulty = big.NewInt(0)
if gen.BaseFee == nil {
return nil, errors.New("baseFeePerGas must be set in genesis.json for Post-Merge networks (after Altair)")
}
} else {
if gen.BaseFee == nil {
gen.BaseFee = big.NewInt(1000000000) // 1 Gwei default
log.WithField("baseFeePerGas", "1000000000").Warn("BaseFeePerGas not specified in genesis.json, using default value of 1 Gwei")
}
}
} else {
gen = interop.GethTestnetGenesis(time.Unix(int64(f.GenesisTime), 0), params.BeaconConfig())

View File

@@ -1,14 +1,21 @@
package testnet
import (
"context"
"encoding/json"
"fmt"
"math/big"
"os"
"testing"
"github.com/OffchainLabs/prysm/v6/crypto/bls"
"github.com/OffchainLabs/prysm/v6/runtime/interop"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
)
func Test_genesisStateFromJSONValidators(t *testing.T) {
@@ -48,3 +55,76 @@ func createGenesisDepositData(t *testing.T, numKeys int) []*depositDataJSON {
}
return jsonData
}
func Test_generateGenesis_BaseFeeValidation(t *testing.T) {
tests := []struct {
name string
forkVersion int
baseFee *big.Int
expectError bool
errorMsg string
}{
{
name: "Pre-merge Altair network without baseFee - should use default",
forkVersion: version.Altair,
baseFee: nil,
expectError: false,
},
{
name: "Post-merge Bellatrix network without baseFee - should error",
forkVersion: version.Bellatrix,
baseFee: nil,
expectError: true,
errorMsg: "baseFeePerGas must be set in genesis.json for Post-Merge networks (after Altair)",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Save original flags
originalFlags := generateGenesisStateFlags
defer func() {
generateGenesisStateFlags = originalFlags
}()
// Set up test flags
generateGenesisStateFlags.NumValidators = 2
generateGenesisStateFlags.GenesisTime = 1609459200
generateGenesisStateFlags.ForkName = version.String(tt.forkVersion)
// Create a minimal genesis JSON for testing
genesis := &core.Genesis{
BaseFee: tt.baseFee,
Difficulty: big.NewInt(0),
GasLimit: 15000000,
Alloc: types.GenesisAlloc{},
Config: &params.ChainConfig{
ChainID: big.NewInt(32382),
},
}
// Create temporary genesis JSON file
genesisJSON, err := json.Marshal(genesis)
require.NoError(t, err)
tmpFile := t.TempDir() + "/genesis.json"
err = writeFile(tmpFile, genesisJSON)
require.NoError(t, err)
generateGenesisStateFlags.GethGenesisJsonIn = tmpFile
ctx := context.Background()
_, err = generateGenesis(ctx)
if tt.expectError {
require.ErrorContains(t, tt.errorMsg, err)
} else {
require.NoError(t, err)
}
})
}
}
func writeFile(path string, data []byte) error {
return os.WriteFile(path, data, 0644)
}

View File

@@ -4,6 +4,8 @@ go_library(
name = "go_default_library",
srcs = [
"analyzer.go",
"bitlist.go",
"bitvector.go",
"container.go",
"list.go",
"path.go",
@@ -15,6 +17,7 @@ go_library(
],
importpath = "github.com/OffchainLabs/prysm/v6/encoding/ssz/query",
visibility = ["//visibility:public"],
deps = ["@com_github_prysmaticlabs_go_bitfield//:go_default_library"],
)
go_test(
@@ -30,5 +33,6 @@ go_test(
"//encoding/ssz/query/testutil:go_default_library",
"//proto/ssz_query:go_default_library",
"//testing/require:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
],
)

View File

@@ -67,6 +67,25 @@ func PopulateVariableLengthInfo(sszInfo *sszInfo, value any) error {
}
return nil
// In Bitlist case, we have to set the actual length of the bitlist.
case Bitlist:
bitlistInfo, err := sszInfo.BitlistInfo()
if err != nil {
return fmt.Errorf("could not get bitlist info: %w", err)
}
if bitlistInfo == nil {
return errors.New("bitlistInfo is nil")
}
val := reflect.ValueOf(value)
if err := bitlistInfo.SetLengthFromBytes(val.Bytes()); err != nil {
return fmt.Errorf("could not set bitlist length from bytes: %w", err)
}
return nil
// In Container case, we need to recursively populate variable-sized fields.
case Container:
containerInfo, err := sszInfo.ContainerInfo()
@@ -194,7 +213,7 @@ func analyzeHomogeneousColType(typ reflect.Type, tag *reflect.StructTag) (*sszIn
return nil, fmt.Errorf("could not get list limit: %w", err)
}
return analyzeListType(typ, elementInfo, limit)
return analyzeListType(typ, elementInfo, limit, sszDimension.isBitfield)
}
// 2. Handle Vector/Bitvector type
@@ -204,7 +223,7 @@ func analyzeHomogeneousColType(typ reflect.Type, tag *reflect.StructTag) (*sszIn
return nil, fmt.Errorf("could not get vector length: %w", err)
}
return analyzeVectorType(typ, elementInfo, length)
return analyzeVectorType(typ, elementInfo, length, sszDimension.isBitfield)
}
// Parsing ssz tag doesn't provide enough information to determine the collection type,
@@ -212,8 +231,22 @@ func analyzeHomogeneousColType(typ reflect.Type, tag *reflect.StructTag) (*sszIn
return nil, errors.New("could not determine collection type from tags")
}
// analyzeListType analyzes SSZ List type and returns its SSZ info.
func analyzeListType(typ reflect.Type, elementInfo *sszInfo, limit uint64) (*sszInfo, error) {
// analyzeListType analyzes SSZ List/Bitlist type and returns its SSZ info.
func analyzeListType(typ reflect.Type, elementInfo *sszInfo, limit uint64, isBitfield bool) (*sszInfo, error) {
if isBitfield {
return &sszInfo{
sszType: Bitlist,
typ: typ,
fixedSize: offsetBytes,
isVariable: true,
bitlistInfo: &bitlistInfo{
limit: limit,
},
}, nil
}
if elementInfo == nil {
return nil, errors.New("element info is required for List")
}
@@ -232,10 +265,25 @@ func analyzeListType(typ reflect.Type, elementInfo *sszInfo, limit uint64) (*ssz
}, nil
}
// analyzeVectorType analyzes SSZ Vector type and returns its SSZ info.
func analyzeVectorType(typ reflect.Type, elementInfo *sszInfo, length uint64) (*sszInfo, error) {
// analyzeVectorType analyzes SSZ Vector/Bitvector type and returns its SSZ info.
func analyzeVectorType(typ reflect.Type, elementInfo *sszInfo, length uint64, isBitfield bool) (*sszInfo, error) {
if isBitfield {
return &sszInfo{
sszType: Bitvector,
typ: typ,
// Size in bytes
fixedSize: length,
isVariable: false,
bitvectorInfo: &bitvectorInfo{
length: length * 8, // length in bits
},
}, nil
}
if elementInfo == nil {
return nil, errors.New("element info is required for Vector")
return nil, errors.New("element info is required for Vector/Bitvector")
}
// Validate the given length.

View File

@@ -13,5 +13,5 @@ func TestAnalyzeSSZInfo(t *testing.T) {
require.NoError(t, err)
require.NotNil(t, info, "Expected non-nil SSZ info")
require.Equal(t, uint64(493), info.FixedSize(), "Expected fixed size to be 333")
require.Equal(t, uint64(565), info.FixedSize())
}

View File

@@ -0,0 +1,65 @@
package query
import (
"errors"
"fmt"
"github.com/prysmaticlabs/go-bitfield"
)
// bitlistInfo holds information about a SSZ Bitlist type.
//
// Same as listInfo, but limit/length are in bits, not elements.
type bitlistInfo struct {
// limit is the maximum number of bits in the bitlist.
limit uint64
// length is the actual number of bits at runtime (0 if not set).
length uint64
}
func (l *bitlistInfo) Limit() uint64 {
if l == nil {
return 0
}
return l.limit
}
func (l *bitlistInfo) Length() uint64 {
if l == nil {
return 0
}
return l.length
}
func (l *bitlistInfo) SetLength(length uint64) error {
if l == nil {
return errors.New("bitlistInfo is nil")
}
if length > l.limit {
return fmt.Errorf("length %d exceeds limit %d", length, l.limit)
}
l.length = length
return nil
}
// SetLengthFromBytes determines the actual bitlist length from SSZ-encoded bytes.
func (l *bitlistInfo) SetLengthFromBytes(rawBytes []byte) error {
// Wrap rawBytes in a Bitlist to use existing methods.
bl := bitfield.Bitlist(rawBytes)
return l.SetLength(bl.Len())
}
// Size returns the size in bytes for this bitlist.
// Note that while serializing, 1 bit is added for the delimiter bit,
// which results in ceil((length + 1) / 8) bytes.
// Note that `(length / 8) + 1` is equivalent to `ceil((length + 1) / 8)`.
// Example: length=0 -> size=1, length=7 -> size=1, length=8 -> size=2
// Reference: https://github.com/ethereum/consensus-specs/blob/master/ssz/simple-serialize.md#bitlistn-progressivebitlist
func (l *bitlistInfo) Size() uint64 {
if l == nil {
return 0
}
return (l.length / 8) + 1
}

View File

@@ -0,0 +1,15 @@
package query
// bitvectorInfo holds information about a SSZ Bitvector type.
type bitvectorInfo struct {
// length is the fixed length of bits of the Bitvector.
length uint64
}
func (v *bitvectorInfo) Length() uint64 {
if v == nil {
return 0
}
return v.length
}

View File

@@ -8,6 +8,7 @@ import (
"github.com/OffchainLabs/prysm/v6/encoding/ssz/query/testutil"
sszquerypb "github.com/OffchainLabs/prysm/v6/proto/ssz_query"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/prysmaticlabs/go-bitfield"
)
func TestCalculateOffsetAndLength(t *testing.T) {
@@ -80,11 +81,24 @@ func TestCalculateOffsetAndLength(t *testing.T) {
expectedOffset: 277,
expectedLength: 160, // 5 * 32 bytes
},
// Bitvector fields
{
name: "bitvector64_field",
path: ".bitvector64_field",
expectedOffset: 437,
expectedLength: 8,
},
{
name: "bitvector512_field",
path: ".bitvector512_field",
expectedOffset: 445,
expectedLength: 64,
},
// Trailing field
{
name: "trailing_field",
path: ".trailing_field",
expectedOffset: 437,
expectedOffset: 509,
expectedLength: 56,
},
}
@@ -119,26 +133,26 @@ func TestCalculateOffsetAndLength(t *testing.T) {
{
name: "field_list_uint64",
path: ".field_list_uint64",
expectedOffset: 104, // First part of variable-sized type.
expectedOffset: 108, // First part of variable-sized type.
expectedLength: 40, // 5 elements * uint64 (8 bytes each)
},
{
name: "field_list_container",
path: ".field_list_container",
expectedOffset: 144, // Second part of variable-sized type.
expectedOffset: 148, // Second part of variable-sized type.
expectedLength: 120, // 3 elements * FixedNestedContainer (40 bytes each)
},
{
name: "field_list_bytes32",
path: ".field_list_bytes32",
expectedOffset: 264,
expectedOffset: 268,
expectedLength: 96, // 3 elements * 32 bytes each
},
// Nested paths
{
name: "nested",
path: ".nested",
expectedOffset: 360,
expectedOffset: 364,
// Calculated with:
// - Value1: 8 bytes
// - field_list_uint64 offset: 4 bytes
@@ -148,20 +162,27 @@ func TestCalculateOffsetAndLength(t *testing.T) {
{
name: "nested.value1",
path: ".nested.value1",
expectedOffset: 360,
expectedOffset: 364,
expectedLength: 8,
},
{
name: "nested.field_list_uint64",
path: ".nested.field_list_uint64",
expectedOffset: 372,
expectedOffset: 376,
expectedLength: 40,
},
// Bitlist field
{
name: "bitlist_field",
path: ".bitlist_field",
expectedOffset: 416,
expectedLength: 33, // 32 bytes + 1 byte for length delimiter
},
// Fixed trailing field
{
name: "trailing_field",
path: ".trailing_field",
expectedOffset: 48, // After leading_field + 4 offset pointers
expectedOffset: 52, // After leading_field + 5 offset pointers
expectedLength: 56,
},
}
@@ -208,6 +229,16 @@ func createFixedTestContainer() *sszquerypb.FixedTestContainer {
nestedValue2[i] = byte(i + 56)
}
bitvector64 := bitfield.NewBitvector64()
for i := range bitvector64 {
bitvector64[i] = 0x42
}
bitvector512 := bitfield.NewBitvector512()
for i := range bitvector512 {
bitvector512[i] = 0x24
}
trailingField := make([]byte, 56)
for i := range trailingField {
trailingField[i] = byte(i + 88)
@@ -240,6 +271,10 @@ func createFixedTestContainer() *sszquerypb.FixedTestContainer {
make([]byte, 32),
},
// Bitvector fields
Bitvector64Field: bitvector64,
Bitvector512Field: bitvector512,
// Trailing field
TrailingField: trailingField,
}
@@ -294,6 +329,15 @@ func getFixedTestContainerSpec() testutil.TestSpec {
Path: ".two_dimension_bytes_field",
Expected: testContainer.TwoDimensionBytesField,
},
// Bitvector fields
{
Path: ".bitvector64_field",
Expected: testContainer.Bitvector64Field,
},
{
Path: ".bitvector512_field",
Expected: testContainer.Bitvector512Field,
},
// Trailing field
{
Path: ".trailing_field",
@@ -326,6 +370,13 @@ func createVariableTestContainer() *sszquerypb.VariableTestContainer {
}
}
bitlistField := bitfield.NewBitlist(256)
bitlistField.SetBitAt(0, true)
bitlistField.SetBitAt(10, true)
bitlistField.SetBitAt(50, true)
bitlistField.SetBitAt(100, true)
bitlistField.SetBitAt(255, true)
return &sszquerypb.VariableTestContainer{
// Fixed leading field
LeadingField: leadingField,
@@ -345,6 +396,9 @@ func createVariableTestContainer() *sszquerypb.VariableTestContainer {
FieldListUint64: []uint64{1, 2, 3, 4, 5},
},
// Bitlist field
BitlistField: bitlistField,
// Fixed trailing field
TrailingField: trailingField,
}
@@ -391,6 +445,11 @@ func getVariableTestContainerSpec() testutil.TestSpec {
Path: ".nested.field_list_uint64",
Expected: testContainer.Nested.FieldListUint64,
},
// Bitlist field
{
Path: ".bitlist_field",
Expected: testContainer.BitlistField,
},
// Fixed trailing field
{
Path: ".trailing_field",

View File

@@ -27,6 +27,12 @@ type sszInfo struct {
// For Vector types.
vectorInfo *vectorInfo
// For Bitlist types.
bitlistInfo *bitlistInfo
// For Bitvector types.
bitvectorInfo *bitvectorInfo
}
func (info *sszInfo) FixedSize() uint64 {
@@ -53,6 +59,9 @@ func (info *sszInfo) Size() uint64 {
return length * elementSize
case Bitlist:
return info.bitlistInfo.Size()
case Container:
size := info.fixedSize
for _, fieldInfo := range info.containerInfo.fields {
@@ -65,7 +74,6 @@ func (info *sszInfo) Size() uint64 {
return size
default:
// NOTE: Handle other variable-sized types.
return 0
}
}
@@ -110,6 +118,30 @@ func (info *sszInfo) VectorInfo() (*vectorInfo, error) {
return info.vectorInfo, nil
}
func (info *sszInfo) BitlistInfo() (*bitlistInfo, error) {
if info == nil {
return nil, errors.New("sszInfo is nil")
}
if info.sszType != Bitlist {
return nil, fmt.Errorf("sszInfo is not a Bitlist type, got %s", info.sszType)
}
return info.bitlistInfo, nil
}
func (info *sszInfo) BitvectorInfo() (*bitvectorInfo, error) {
if info == nil {
return nil, errors.New("sszInfo is nil")
}
if info.sszType != Bitvector {
return nil, fmt.Errorf("sszInfo is not a Bitvector type, got %s", info.sszType)
}
return info.bitvectorInfo, nil
}
// String implements the Stringer interface for sszInfo.
// This follows the notation used in the consensus specs.
func (info *sszInfo) String() string {
@@ -128,6 +160,10 @@ func (info *sszInfo) String() string {
return fmt.Sprintf("Bytes%d", info.vectorInfo.length)
}
return fmt.Sprintf("Vector[%s, %d]", info.vectorInfo.element, info.vectorInfo.length)
case Bitlist:
return fmt.Sprintf("Bitlist[%d]", info.bitlistInfo.limit)
case Bitvector:
return fmt.Sprintf("Bitvector[%d]", info.bitvectorInfo.length)
default:
return info.typ.Name()
}
@@ -175,6 +211,9 @@ func printRecursive(info *sszInfo, builder *strings.Builder, prefix string) {
case List:
builder.WriteString(fmt.Sprintf("%s (%s / length: %d, size: %d)\n", info, sizeDesc, info.listInfo.length, info.Size()))
case Bitlist:
builder.WriteString(fmt.Sprintf("%s (%s / length (bit): %d, size: %d)\n", info, sizeDesc, info.bitlistInfo.length, info.Size()))
default:
builder.WriteString(fmt.Sprintf("%s (%s / size: %d)\n", info, sizeDesc, info.Size()))
}

View File

@@ -15,6 +15,10 @@ const (
// sszSizeTag specifies the length of a fixed-sized collection, like an SSZ Vector.
// A wildcard ('?') indicates that the dimension is variable-sized (a List).
sszSizeTag = "ssz-size"
// castTypeTag specifies special custom casting instructions.
// e.g., "github.com/prysmaticlabs/go-bitfield.Bitlist".
castTypeTag = "cast-type"
)
// SSZDimension holds parsed SSZ tag information for current dimension.
@@ -22,6 +26,9 @@ const (
type SSZDimension struct {
vectorLength *uint64
listLimit *uint64
// isBitfield indicates if the dimension represents a bitfield type (Bitlist, Bitvector).
isBitfield bool
}
// ParseSSZTag parses SSZ-specific tags (like `ssz-max` and `ssz-size`)
@@ -34,6 +41,11 @@ func ParseSSZTag(tag *reflect.StructTag) (*SSZDimension, *reflect.StructTag, err
var newTagParts []string
var sizeStr, maxStr string
var isBitfield bool
if castType := tag.Get(castTypeTag); strings.Contains(castType, "go-bitfield") {
isBitfield = true
}
// Parse ssz-size tag
if sszSize := tag.Get(sszSizeTag); sszSize != "" {
@@ -88,7 +100,7 @@ func ParseSSZTag(tag *reflect.StructTag) (*SSZDimension, *reflect.StructTag, err
return nil, nil, errors.New("ssz-max must be greater than 0")
}
return &SSZDimension{listLimit: &limit}, newTag, nil
return &SSZDimension{listLimit: &limit, isBitfield: isBitfield}, newTag, nil
}
// 2. If ssz-size is specified, it must be a vector.
@@ -100,7 +112,7 @@ func ParseSSZTag(tag *reflect.StructTag) (*SSZDimension, *reflect.StructTag, err
return nil, nil, errors.New("ssz-size must be greater than 0")
}
return &SSZDimension{vectorLength: &length}, newTag, nil
return &SSZDimension{vectorLength: &length, isBitfield: isBitfield}, newTag, nil
}
// IsVector returns true if this dimension represents a vector.

View File

@@ -14,5 +14,6 @@ go_library(
"//encoding/ssz/query:go_default_library",
"//testing/require:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
],
)

View File

@@ -3,8 +3,10 @@ package testutil
import (
"fmt"
"reflect"
"strings"
ssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/go-bitfield"
)
// marshalAny marshals any value into SSZ format.
@@ -20,7 +22,25 @@ func marshalAny(value any) ([]byte, error) {
}
// Handle custom type aliases by checking if they're based on primitive types
if valueType.PkgPath() != "" {
if pkgPath := valueType.PkgPath(); pkgPath != "" {
// Special handling for bitfield types.
if strings.Contains(pkgPath, "go-bitfield") {
// Check if it's a Bitlist (variable-length) that needs SSZ encoding
if bl, ok := value.(bitfield.Bitlist); ok {
// The Bitlist type already contains the SSZ delimiter bit in its internal representation
// The raw []byte contains the delimiter as the most significant bit
// So we just return the raw bytes directly for SSZ encoding
return []byte(bl), nil
}
// For other bitfield types (Bitvector), just return the bytes
if bitfield, ok := value.(bitfield.Bitfield); ok {
return bitfield.Bytes(), nil
}
return nil, fmt.Errorf("expected bitfield type, got %T", value)
}
switch valueType.Kind() {
case reflect.Uint64:
return ssz.MarshalUint64(make([]byte, 0), reflect.ValueOf(value).Uint()), nil

View File

@@ -25,6 +25,7 @@ go_proto_library(
visibility = ["//visibility:public"],
deps = [
"//proto/eth/ext:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_golang_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
"@org_golang_google_protobuf//runtime/protoimpl:go_default_library",

View File

@@ -11,6 +11,7 @@ import (
sync "sync"
_ "github.com/OffchainLabs/prysm/v6/proto/eth/ext"
github_com_prysmaticlabs_go_bitfield "github.com/prysmaticlabs/go-bitfield"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
@@ -82,14 +83,16 @@ type FixedTestContainer struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
FieldUint32 uint32 `protobuf:"varint,3,opt,name=field_uint32,json=fieldUint32,proto3" json:"field_uint32,omitempty"`
FieldUint64 uint64 `protobuf:"varint,4,opt,name=field_uint64,json=fieldUint64,proto3" json:"field_uint64,omitempty"`
FieldBool bool `protobuf:"varint,5,opt,name=field_bool,json=fieldBool,proto3" json:"field_bool,omitempty"`
FieldBytes32 []byte `protobuf:"bytes,8,opt,name=field_bytes32,json=fieldBytes32,proto3" json:"field_bytes32,omitempty" ssz-size:"32"`
Nested *FixedNestedContainer `protobuf:"bytes,9,opt,name=nested,proto3" json:"nested,omitempty"`
VectorField []uint64 `protobuf:"varint,10,rep,packed,name=vector_field,json=vectorField,proto3" json:"vector_field,omitempty" ssz-size:"24"`
TwoDimensionBytesField [][]byte `protobuf:"bytes,11,rep,name=two_dimension_bytes_field,json=twoDimensionBytesField,proto3" json:"two_dimension_bytes_field,omitempty" ssz-size:"5,32"`
TrailingField []byte `protobuf:"bytes,12,opt,name=trailing_field,json=trailingField,proto3" json:"trailing_field,omitempty" ssz-size:"56"`
FieldUint32 uint32 `protobuf:"varint,1,opt,name=field_uint32,json=fieldUint32,proto3" json:"field_uint32,omitempty"`
FieldUint64 uint64 `protobuf:"varint,2,opt,name=field_uint64,json=fieldUint64,proto3" json:"field_uint64,omitempty"`
FieldBool bool `protobuf:"varint,3,opt,name=field_bool,json=fieldBool,proto3" json:"field_bool,omitempty"`
FieldBytes32 []byte `protobuf:"bytes,4,opt,name=field_bytes32,json=fieldBytes32,proto3" json:"field_bytes32,omitempty" ssz-size:"32"`
Nested *FixedNestedContainer `protobuf:"bytes,5,opt,name=nested,proto3" json:"nested,omitempty"`
VectorField []uint64 `protobuf:"varint,6,rep,packed,name=vector_field,json=vectorField,proto3" json:"vector_field,omitempty" ssz-size:"24"`
TwoDimensionBytesField [][]byte `protobuf:"bytes,7,rep,name=two_dimension_bytes_field,json=twoDimensionBytesField,proto3" json:"two_dimension_bytes_field,omitempty" ssz-size:"5,32"`
Bitvector64Field github_com_prysmaticlabs_go_bitfield.Bitvector64 `protobuf:"bytes,8,opt,name=bitvector64_field,json=bitvector64Field,proto3" json:"bitvector64_field,omitempty" cast-type:"github.com/prysmaticlabs/go-bitfield.Bitvector64" ssz-size:"8"`
Bitvector512Field github_com_prysmaticlabs_go_bitfield.Bitvector512 `protobuf:"bytes,9,opt,name=bitvector512_field,json=bitvector512Field,proto3" json:"bitvector512_field,omitempty" cast-type:"github.com/prysmaticlabs/go-bitfield.Bitvector512" ssz-size:"64"`
TrailingField []byte `protobuf:"bytes,10,opt,name=trailing_field,json=trailingField,proto3" json:"trailing_field,omitempty" ssz-size:"56"`
}
func (x *FixedTestContainer) Reset() {
@@ -173,6 +176,20 @@ func (x *FixedTestContainer) GetTwoDimensionBytesField() [][]byte {
return nil
}
func (x *FixedTestContainer) GetBitvector64Field() github_com_prysmaticlabs_go_bitfield.Bitvector64 {
if x != nil {
return x.Bitvector64Field
}
return github_com_prysmaticlabs_go_bitfield.Bitvector64(nil)
}
func (x *FixedTestContainer) GetBitvector512Field() github_com_prysmaticlabs_go_bitfield.Bitvector512 {
if x != nil {
return x.Bitvector512Field
}
return github_com_prysmaticlabs_go_bitfield.Bitvector512(nil)
}
func (x *FixedTestContainer) GetTrailingField() []byte {
if x != nil {
return x.TrailingField
@@ -240,12 +257,13 @@ type VariableTestContainer struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
LeadingField []byte `protobuf:"bytes,1,opt,name=leading_field,json=leadingField,proto3" json:"leading_field,omitempty" ssz-size:"32"`
FieldListUint64 []uint64 `protobuf:"varint,2,rep,packed,name=field_list_uint64,json=fieldListUint64,proto3" json:"field_list_uint64,omitempty" ssz-max:"2048"`
FieldListContainer []*FixedNestedContainer `protobuf:"bytes,3,rep,name=field_list_container,json=fieldListContainer,proto3" json:"field_list_container,omitempty" ssz-max:"128"`
FieldListBytes32 [][]byte `protobuf:"bytes,4,rep,name=field_list_bytes32,json=fieldListBytes32,proto3" json:"field_list_bytes32,omitempty" ssz-max:"100" ssz-size:"?,32"`
Nested *VariableNestedContainer `protobuf:"bytes,5,opt,name=nested,proto3" json:"nested,omitempty"`
TrailingField []byte `protobuf:"bytes,6,opt,name=trailing_field,json=trailingField,proto3" json:"trailing_field,omitempty" ssz-size:"56"`
LeadingField []byte `protobuf:"bytes,1,opt,name=leading_field,json=leadingField,proto3" json:"leading_field,omitempty" ssz-size:"32"`
FieldListUint64 []uint64 `protobuf:"varint,2,rep,packed,name=field_list_uint64,json=fieldListUint64,proto3" json:"field_list_uint64,omitempty" ssz-max:"2048"`
FieldListContainer []*FixedNestedContainer `protobuf:"bytes,3,rep,name=field_list_container,json=fieldListContainer,proto3" json:"field_list_container,omitempty" ssz-max:"128"`
FieldListBytes32 [][]byte `protobuf:"bytes,4,rep,name=field_list_bytes32,json=fieldListBytes32,proto3" json:"field_list_bytes32,omitempty" ssz-max:"100" ssz-size:"?,32"`
Nested *VariableNestedContainer `protobuf:"bytes,5,opt,name=nested,proto3" json:"nested,omitempty"`
BitlistField github_com_prysmaticlabs_go_bitfield.Bitlist `protobuf:"bytes,6,opt,name=bitlist_field,json=bitlistField,proto3" json:"bitlist_field,omitempty" cast-type:"github.com/prysmaticlabs/go-bitfield.Bitlist" ssz-max:"2048"`
TrailingField []byte `protobuf:"bytes,7,opt,name=trailing_field,json=trailingField,proto3" json:"trailing_field,omitempty" ssz-size:"56"`
}
func (x *VariableTestContainer) Reset() {
@@ -315,6 +333,13 @@ func (x *VariableTestContainer) GetNested() *VariableNestedContainer {
return nil
}
func (x *VariableTestContainer) GetBitlistField() github_com_prysmaticlabs_go_bitfield.Bitlist {
if x != nil {
return x.BitlistField
}
return github_com_prysmaticlabs_go_bitfield.Bitlist(nil)
}
func (x *VariableTestContainer) GetTrailingField() []byte {
if x != nil {
return x.TrailingField
@@ -334,66 +359,85 @@ var file_proto_ssz_query_ssz_query_proto_rawDesc = []byte{
0x72, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x31, 0x18, 0x01, 0x20, 0x01, 0x28,
0x04, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x31, 0x12, 0x1e, 0x0a, 0x06, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x32, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33,
0x32, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x32, 0x22, 0xfe, 0x02, 0x0a, 0x12, 0x46, 0x69,
0x32, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x32, 0x22, 0xd2, 0x04, 0x0a, 0x12, 0x46, 0x69,
0x78, 0x65, 0x64, 0x54, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x75, 0x69, 0x6e, 0x74, 0x33, 0x32,
0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x55, 0x69, 0x6e,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x55, 0x69, 0x6e,
0x74, 0x33, 0x32, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x75, 0x69, 0x6e,
0x74, 0x36, 0x34, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x69, 0x65, 0x6c, 0x64,
0x74, 0x36, 0x34, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x69, 0x65, 0x6c, 0x64,
0x55, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f,
0x62, 0x6f, 0x6f, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c,
0x62, 0x6f, 0x6f, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c,
0x64, 0x42, 0x6f, 0x6f, 0x6c, 0x12, 0x2b, 0x0a, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62,
0x79, 0x74, 0x65, 0x73, 0x33, 0x32, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5,
0x79, 0x74, 0x65, 0x73, 0x33, 0x32, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5,
0x18, 0x02, 0x33, 0x32, 0x52, 0x0c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x79, 0x74, 0x65, 0x73,
0x33, 0x32, 0x12, 0x37, 0x0a, 0x06, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01,
0x33, 0x32, 0x12, 0x37, 0x0a, 0x06, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x73, 0x73, 0x7a, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46,
0x69, 0x78, 0x65, 0x64, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69,
0x6e, 0x65, 0x72, 0x52, 0x06, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x12, 0x29, 0x0a, 0x0c, 0x76,
0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x0a, 0x20, 0x03, 0x28,
0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x06, 0x20, 0x03, 0x28,
0x04, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x32, 0x34, 0x52, 0x0b, 0x76, 0x65, 0x63, 0x74, 0x6f,
0x72, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x19, 0x74, 0x77, 0x6f, 0x5f, 0x64, 0x69,
0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x66, 0x69,
0x65, 0x6c, 0x64, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x08, 0x8a, 0xb5, 0x18, 0x04, 0x35,
0x65, 0x6c, 0x64, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x08, 0x8a, 0xb5, 0x18, 0x04, 0x35,
0x2c, 0x33, 0x32, 0x52, 0x16, 0x74, 0x77, 0x6f, 0x44, 0x69, 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f,
0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x2d, 0x0a, 0x0e, 0x74,
0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x0c, 0x20,
0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x35, 0x36, 0x52, 0x0d, 0x74, 0x72, 0x61,
0x69, 0x6c, 0x69, 0x6e, 0x67, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x22, 0x66, 0x0a, 0x17, 0x56, 0x61,
0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x74,
0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x31, 0x18,
0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x31, 0x12, 0x33, 0x0a,
0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x66, 0x0a, 0x11, 0x62,
0x69, 0x74, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x36, 0x34, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64,
0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x39, 0x82, 0xb5, 0x18, 0x30, 0x67, 0x69, 0x74, 0x68,
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63,
0x6c, 0x61, 0x62, 0x73, 0x2f, 0x67, 0x6f, 0x2d, 0x62, 0x69, 0x74, 0x66, 0x69, 0x65, 0x6c, 0x64,
0x2e, 0x42, 0x69, 0x74, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x36, 0x34, 0x8a, 0xb5, 0x18, 0x01,
0x38, 0x52, 0x10, 0x62, 0x69, 0x74, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x36, 0x34, 0x46, 0x69,
0x65, 0x6c, 0x64, 0x12, 0x6a, 0x0a, 0x12, 0x62, 0x69, 0x74, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72,
0x35, 0x31, 0x32, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x42,
0x3b, 0x82, 0xb5, 0x18, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x67, 0x6f,
0x2d, 0x62, 0x69, 0x74, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x2e, 0x42, 0x69, 0x74, 0x76, 0x65, 0x63,
0x74, 0x6f, 0x72, 0x35, 0x31, 0x32, 0x8a, 0xb5, 0x18, 0x02, 0x36, 0x34, 0x52, 0x11, 0x62, 0x69,
0x74, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x35, 0x31, 0x32, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12,
0x2d, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x66, 0x69, 0x65, 0x6c,
0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x35, 0x36, 0x52,
0x0d, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x22, 0x66,
0x0a, 0x17, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64,
0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x31, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65,
0x31, 0x12, 0x33, 0x0a, 0x11, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x5f,
0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x18, 0x02, 0x20, 0x03, 0x28, 0x04, 0x42, 0x07, 0x92, 0xb5,
0x18, 0x03, 0x31, 0x30, 0x30, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4c, 0x69, 0x73, 0x74,
0x55, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x22, 0xdf, 0x03, 0x0a, 0x15, 0x56, 0x61, 0x72, 0x69, 0x61,
0x62, 0x6c, 0x65, 0x54, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
0x12, 0x2b, 0x0a, 0x0d, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x66, 0x69, 0x65, 0x6c,
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52,
0x0c, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x34, 0x0a,
0x11, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x75, 0x69, 0x6e, 0x74,
0x36, 0x34, 0x18, 0x02, 0x20, 0x03, 0x28, 0x04, 0x42, 0x07, 0x92, 0xb5, 0x18, 0x03, 0x31, 0x30,
0x30, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x69, 0x6e, 0x74,
0x36, 0x34, 0x22, 0x80, 0x03, 0x0a, 0x15, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x54,
0x65, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x0d,
0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20,
0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x0c, 0x6c, 0x65, 0x61,
0x64, 0x69, 0x6e, 0x67, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x34, 0x0a, 0x11, 0x66, 0x69, 0x65,
0x6c, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x18, 0x02,
0x20, 0x03, 0x28, 0x04, 0x42, 0x08, 0x92, 0xb5, 0x18, 0x04, 0x32, 0x30, 0x34, 0x38, 0x52, 0x0f,
0x66, 0x69, 0x65, 0x6c, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x12,
0x5a, 0x0a, 0x14, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x63, 0x6f,
0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e,
0x73, 0x73, 0x7a, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x78, 0x65, 0x64, 0x4e,
0x65, 0x73, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x42, 0x07,
0x92, 0xb5, 0x18, 0x03, 0x31, 0x32, 0x38, 0x52, 0x12, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4c, 0x69,
0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x12, 0x66,
0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x33,
0x32, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x0f, 0x8a, 0xb5, 0x18, 0x04, 0x3f, 0x2c, 0x33,
0x32, 0x92, 0xb5, 0x18, 0x03, 0x31, 0x30, 0x30, 0x52, 0x10, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4c,
0x69, 0x73, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x33, 0x32, 0x12, 0x3a, 0x0a, 0x06, 0x6e, 0x65,
0x73, 0x74, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x73, 0x73, 0x7a,
0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x4e,
0x65, 0x73, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x06,
0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x12, 0x2d, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69,
0x6e, 0x67, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06,
0x8a, 0xb5, 0x18, 0x02, 0x35, 0x36, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67,
0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73,
0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
0x73, 0x73, 0x7a, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
0x36, 0x34, 0x18, 0x02, 0x20, 0x03, 0x28, 0x04, 0x42, 0x08, 0x92, 0xb5, 0x18, 0x04, 0x32, 0x30,
0x34, 0x38, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x69, 0x6e,
0x74, 0x36, 0x34, 0x12, 0x5a, 0x0a, 0x14, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6c, 0x69, 0x73,
0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28,
0x0b, 0x32, 0x1f, 0x2e, 0x73, 0x73, 0x7a, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69,
0x78, 0x65, 0x64, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e,
0x65, 0x72, 0x42, 0x07, 0x92, 0xb5, 0x18, 0x03, 0x31, 0x32, 0x38, 0x52, 0x12, 0x66, 0x69, 0x65,
0x6c, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12,
0x3d, 0x0a, 0x12, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x62, 0x79,
0x74, 0x65, 0x73, 0x33, 0x32, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x0f, 0x8a, 0xb5, 0x18,
0x04, 0x3f, 0x2c, 0x33, 0x32, 0x92, 0xb5, 0x18, 0x03, 0x31, 0x30, 0x30, 0x52, 0x10, 0x66, 0x69,
0x65, 0x6c, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x33, 0x32, 0x12, 0x3a,
0x0a, 0x06, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22,
0x2e, 0x73, 0x73, 0x7a, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x61, 0x72, 0x69, 0x61,
0x62, 0x6c, 0x65, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e,
0x65, 0x72, 0x52, 0x06, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x12, 0x5d, 0x0a, 0x0d, 0x62, 0x69,
0x74, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28,
0x0c, 0x42, 0x38, 0x82, 0xb5, 0x18, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f,
0x67, 0x6f, 0x2d, 0x62, 0x69, 0x74, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x2e, 0x42, 0x69, 0x74, 0x6c,
0x69, 0x73, 0x74, 0x92, 0xb5, 0x18, 0x04, 0x32, 0x30, 0x34, 0x38, 0x52, 0x0c, 0x62, 0x69, 0x74,
0x6c, 0x69, 0x73, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x2d, 0x0a, 0x0e, 0x74, 0x72, 0x61,
0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28,
0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x35, 0x36, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x69, 0x6c,
0x69, 0x6e, 0x67, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68,
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c,
0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x2f, 0x73, 0x73, 0x7a, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
}
var (

View File

@@ -20,29 +20,42 @@ message FixedNestedContainer {
// FixedTestContainer - comprehensive fixed-size container for SSZ query testing
// Tests: All basic fixed-size SSZ types, nested containers, vectors, offset/length calculations
// Total size: 493 bytes (4+8+1+32+40+192+160+56)
// Total size: 565 bytes (4+8+1+32+40+192+160+8+64+56)
message FixedTestContainer {
// Basic integer types - test different integer sizes and their SSZ serialization
uint32 field_uint32 = 3; // Test: uint32 basic type, offset: 0
uint64 field_uint64 = 4; // Test: uint64 basic type, offset: 4
uint32 field_uint32 = 1; // Test: uint32 basic type, offset: 0
uint64 field_uint64 = 2; // Test: uint64 basic type, offset: 4
// Boolean type - test boolean serialization (1 byte in SSZ)
bool field_bool = 5; // Test: boolean basic type, offset: 12
bool field_bool = 3; // Test: boolean basic type, offset: 12
// Fixed-size bytes - test byte array
bytes field_bytes32 = 8 [ (ethereum.eth.ext.ssz_size) = "32" ]; // Test: 32-byte array, offset: 13
bytes field_bytes32 = 4 [ (ethereum.eth.ext.ssz_size) = "32" ]; // Test: 32-byte array, offset: 13
// Nested container - test container nesting and field access
FixedNestedContainer nested = 9; // Test: nested container navigation (8+32=40 bytes), offset: 45
FixedNestedContainer nested = 5; // Test: nested container navigation (8+32=40 bytes), offset: 45
// Vector type - test fixed-size array of basic elements
repeated uint64 vector_field = 10 [ (ethereum.eth.ext.ssz_size) = "24" ]; // Test: Vector[24] of uint64 (24*8=192 bytes), offset: 85
repeated uint64 vector_field = 6 [ (ethereum.eth.ext.ssz_size) = "24" ]; // Test: Vector[24] of uint64 (24*8=192 bytes), offset: 85
// 2D bytes type - test 2-dimensional byte arrays, common in blockchain state roots (e.g., beacon_state.block_roots)
repeated bytes two_dimension_bytes_field = 11 [ (ethereum.eth.ext.ssz_size) = "5,32" ]; // Test: Vector[Bytes32, 5] (32*5=160 bytes), offset: 277
repeated bytes two_dimension_bytes_field = 7 [ (ethereum.eth.ext.ssz_size) = "5,32" ]; // Test: Vector[Bytes32, 5] (32*5=160 bytes), offset: 277
// Bitvector types - test bitfield serialization
bytes bitvector64_field = 8 [
(ethereum.eth.ext.ssz_size) = "8",
(ethereum.eth.ext.cast_type) =
"github.com/prysmaticlabs/go-bitfield.Bitvector64"
]; // Test: Bitvector64 (8 bytes), offset: 437
bytes bitvector512_field = 9 [
(ethereum.eth.ext.ssz_size) = "64",
(ethereum.eth.ext.cast_type) =
"github.com/prysmaticlabs/go-bitfield.Bitvector512"
]; // Test: Bitvector512 (64 bytes), offset: 445
// Additional bytes field - test field ordering and offset calculation
bytes trailing_field = 12 [ (ethereum.eth.ext.ssz_size) = "56" ]; // Test: trailing field after vector, offset: 437
bytes trailing_field = 10 [ (ethereum.eth.ext.ssz_size) = "56" ]; // Test: trailing field after vector, offset: 509
}
// ===== VARIABLE-SIZE TEST CONTAINERS =====
@@ -76,7 +89,14 @@ message VariableTestContainer {
// Variable nested container - test nested container access within variable container
VariableNestedContainer nested = 5;
// Bitlist type - test bitlist serialization
bytes bitlist_field = 6 [
(ethereum.eth.ext.ssz_max) = "2048",
(ethereum.eth.ext.cast_type) =
"github.com/prysmaticlabs/go-bitfield.Bitlist"
];
// Fixed-size trailing field - test fixed field after variable fields
// Verifies correct offset calculation after variable-size fields
bytes trailing_field = 6 [ (ethereum.eth.ext.ssz_size) = "56" ]; // Test: fixed 56-byte field at end, offset: 32 + 4 + 4 + 4 + 4 = 48
bytes trailing_field = 7 [ (ethereum.eth.ext.ssz_size) = "56" ]; // Test: fixed 56-byte field at end, offset: 32 + 4 + 4 + 4 + 4 + 4 = 52
}

View File

@@ -131,7 +131,21 @@ func (f *FixedTestContainer) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = append(dst, f.TwoDimensionBytesField[ii]...)
}
// Field (7) 'TrailingField'
// Field (7) 'Bitvector64Field'
if size := len(f.Bitvector64Field); size != 8 {
err = ssz.ErrBytesLengthFn("--.Bitvector64Field", size, 8)
return
}
dst = append(dst, f.Bitvector64Field...)
// Field (8) 'Bitvector512Field'
if size := len(f.Bitvector512Field); size != 64 {
err = ssz.ErrBytesLengthFn("--.Bitvector512Field", size, 64)
return
}
dst = append(dst, f.Bitvector512Field...)
// Field (9) 'TrailingField'
if size := len(f.TrailingField); size != 56 {
err = ssz.ErrBytesLengthFn("--.TrailingField", size, 56)
return
@@ -145,7 +159,7 @@ func (f *FixedTestContainer) MarshalSSZTo(buf []byte) (dst []byte, err error) {
func (f *FixedTestContainer) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 493 {
if size != 565 {
return ssz.ErrSize
}
@@ -190,18 +204,30 @@ func (f *FixedTestContainer) UnmarshalSSZ(buf []byte) error {
f.TwoDimensionBytesField[ii] = append(f.TwoDimensionBytesField[ii], buf[277:437][ii*32:(ii+1)*32]...)
}
// Field (7) 'TrailingField'
if cap(f.TrailingField) == 0 {
f.TrailingField = make([]byte, 0, len(buf[437:493]))
// Field (7) 'Bitvector64Field'
if cap(f.Bitvector64Field) == 0 {
f.Bitvector64Field = make([]byte, 0, len(buf[437:445]))
}
f.TrailingField = append(f.TrailingField, buf[437:493]...)
f.Bitvector64Field = append(f.Bitvector64Field, buf[437:445]...)
// Field (8) 'Bitvector512Field'
if cap(f.Bitvector512Field) == 0 {
f.Bitvector512Field = make([]byte, 0, len(buf[445:509]))
}
f.Bitvector512Field = append(f.Bitvector512Field, buf[445:509]...)
// Field (9) 'TrailingField'
if cap(f.TrailingField) == 0 {
f.TrailingField = make([]byte, 0, len(buf[509:565]))
}
f.TrailingField = append(f.TrailingField, buf[509:565]...)
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the FixedTestContainer object
func (f *FixedTestContainer) SizeSSZ() (size int) {
size = 493
size = 565
return
}
@@ -265,7 +291,21 @@ func (f *FixedTestContainer) HashTreeRootWith(hh *ssz.Hasher) (err error) {
hh.Merkleize(subIndx)
}
// Field (7) 'TrailingField'
// Field (7) 'Bitvector64Field'
if size := len(f.Bitvector64Field); size != 8 {
err = ssz.ErrBytesLengthFn("--.Bitvector64Field", size, 8)
return
}
hh.PutBytes(f.Bitvector64Field)
// Field (8) 'Bitvector512Field'
if size := len(f.Bitvector512Field); size != 64 {
err = ssz.ErrBytesLengthFn("--.Bitvector512Field", size, 64)
return
}
hh.PutBytes(f.Bitvector512Field)
// Field (9) 'TrailingField'
if size := len(f.TrailingField); size != 56 {
err = ssz.ErrBytesLengthFn("--.TrailingField", size, 56)
return
@@ -393,7 +433,7 @@ func (v *VariableTestContainer) MarshalSSZ() ([]byte, error) {
// MarshalSSZTo ssz marshals the VariableTestContainer object to a target array
func (v *VariableTestContainer) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(104)
offset := int(108)
// Field (0) 'LeadingField'
if size := len(v.LeadingField); size != 32 {
@@ -421,7 +461,11 @@ func (v *VariableTestContainer) MarshalSSZTo(buf []byte) (dst []byte, err error)
}
offset += v.Nested.SizeSSZ()
// Field (5) 'TrailingField'
// Offset (5) 'BitlistField'
dst = ssz.WriteOffset(dst, offset)
offset += len(v.BitlistField)
// Field (6) 'TrailingField'
if size := len(v.TrailingField); size != 56 {
err = ssz.ErrBytesLengthFn("--.TrailingField", size, 56)
return
@@ -466,6 +510,13 @@ func (v *VariableTestContainer) MarshalSSZTo(buf []byte) (dst []byte, err error)
return
}
// Field (5) 'BitlistField'
if size := len(v.BitlistField); size > 2048 {
err = ssz.ErrBytesLengthFn("--.BitlistField", size, 2048)
return
}
dst = append(dst, v.BitlistField...)
return
}
@@ -473,12 +524,12 @@ func (v *VariableTestContainer) MarshalSSZTo(buf []byte) (dst []byte, err error)
func (v *VariableTestContainer) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 104 {
if size < 108 {
return ssz.ErrSize
}
tail := buf
var o1, o2, o3, o4 uint64
var o1, o2, o3, o4, o5 uint64
// Field (0) 'LeadingField'
if cap(v.LeadingField) == 0 {
@@ -491,7 +542,7 @@ func (v *VariableTestContainer) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o1 != 104 {
if o1 != 108 {
return ssz.ErrInvalidVariableOffset
}
@@ -510,11 +561,16 @@ func (v *VariableTestContainer) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
// Field (5) 'TrailingField'
if cap(v.TrailingField) == 0 {
v.TrailingField = make([]byte, 0, len(buf[48:104]))
// Offset (5) 'BitlistField'
if o5 = ssz.ReadOffset(buf[48:52]); o5 > size || o4 > o5 {
return ssz.ErrOffset
}
v.TrailingField = append(v.TrailingField, buf[48:104]...)
// Field (6) 'TrailingField'
if cap(v.TrailingField) == 0 {
v.TrailingField = make([]byte, 0, len(buf[52:108]))
}
v.TrailingField = append(v.TrailingField, buf[52:108]...)
// Field (1) 'FieldListUint64'
{
@@ -565,7 +621,7 @@ func (v *VariableTestContainer) UnmarshalSSZ(buf []byte) error {
// Field (4) 'Nested'
{
buf = tail[o4:]
buf = tail[o4:o5]
if v.Nested == nil {
v.Nested = new(VariableNestedContainer)
}
@@ -573,12 +629,24 @@ func (v *VariableTestContainer) UnmarshalSSZ(buf []byte) error {
return err
}
}
// Field (5) 'BitlistField'
{
buf = tail[o5:]
if err = ssz.ValidateBitlist(buf, 2048); err != nil {
return err
}
if cap(v.BitlistField) == 0 {
v.BitlistField = make([]byte, 0, len(buf))
}
v.BitlistField = append(v.BitlistField, buf...)
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the VariableTestContainer object
func (v *VariableTestContainer) SizeSSZ() (size int) {
size = 104
size = 108
// Field (1) 'FieldListUint64'
size += len(v.FieldListUint64) * 8
@@ -595,6 +663,9 @@ func (v *VariableTestContainer) SizeSSZ() (size int) {
}
size += v.Nested.SizeSSZ()
// Field (5) 'BitlistField'
size += len(v.BitlistField)
return
}
@@ -670,7 +741,14 @@ func (v *VariableTestContainer) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
// Field (5) 'TrailingField'
// Field (5) 'BitlistField'
if len(v.BitlistField) == 0 {
err = ssz.ErrEmptyBitlist
return
}
hh.PutBitlist(v.BitlistField, 2048)
// Field (6) 'TrailingField'
if size := len(v.TrailingField); size != 56 {
err = ssz.ErrBytesLengthFn("--.TrailingField", size, 56)
return