Compare commits

..

1 Commits

Author SHA1 Message Date
Bastin
06aaf4aa36 update lightclient code for gloas 2026-01-27 14:40:49 +01:00
54 changed files with 137 additions and 1908 deletions

View File

@@ -34,18 +34,6 @@ type Event struct {
Data []byte
}
// PublishEvent enqueues an event without blocking the producer. If the channel is full,
// the event is dropped since only the most recent heads are relevant.
func PublishEvent(eventsChannel chan<- *Event, event *Event) {
if eventsChannel == nil || event == nil {
return
}
select {
case eventsChannel <- event:
default:
}
}
// EventStream is responsible for subscribing to the Beacon API events endpoint
// and dispatching received events to subscribers.
type EventStream struct {
@@ -79,20 +67,19 @@ func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
fullUrl := h.host + "/eth/v1/events?topics=" + allTopics
req, err := http.NewRequestWithContext(h.ctx, http.MethodGet, fullUrl, nil)
if err != nil {
PublishEvent(eventsChannel, &Event{
eventsChannel <- &Event{
EventType: EventConnectionError,
Data: []byte(errors.Wrap(err, "failed to create HTTP request").Error()),
})
return
}
}
req.Header.Set("Accept", api.EventStreamMediaType)
req.Header.Set("Connection", api.KeepAlive)
resp, err := h.httpClient.Do(req)
if err != nil {
PublishEvent(eventsChannel, &Event{
eventsChannel <- &Event{
EventType: EventConnectionError,
Data: []byte(errors.Wrap(err, client.ErrConnectionIssue.Error()).Error()),
})
}
return
}
@@ -113,6 +100,7 @@ func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
select {
case <-h.ctx.Done():
log.Info("Context canceled, stopping event stream")
close(eventsChannel)
return
default:
line := scanner.Text()
@@ -121,7 +109,7 @@ func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
// Empty line indicates the end of an event
if eventType != "" && data != "" {
// Process the event when both eventType and data are set
PublishEvent(eventsChannel, &Event{EventType: eventType, Data: []byte(data)})
eventsChannel <- &Event{EventType: eventType, Data: []byte(data)}
}
// Reset eventType and data for the next event
@@ -142,9 +130,9 @@ func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
}
if err := scanner.Err(); err != nil {
PublishEvent(eventsChannel, &Event{
eventsChannel <- &Event{
EventType: EventConnectionError,
Data: []byte(errors.Wrap(err, errors.Wrap(client.ErrConnectionIssue, "scanner failed").Error()).Error()),
})
}
}
}

View File

@@ -53,7 +53,7 @@ func TestEventStream(t *testing.T) {
defer server.Close()
topics := []string{"head"}
eventsChannel := make(chan *Event, 4)
eventsChannel := make(chan *Event, 1)
stream, err := NewEventStream(t.Context(), http.DefaultClient, server.URL, topics)
require.NoError(t, err)
go stream.Subscribe(eventsChannel)
@@ -80,7 +80,7 @@ func TestEventStream(t *testing.T) {
func TestEventStreamRequestError(t *testing.T) {
topics := []string{"head"}
eventsChannel := make(chan *Event, 4)
eventsChannel := make(chan *Event, 1)
ctx := t.Context()
// use valid url that will result in failed request with nil body

View File

@@ -114,32 +114,17 @@ func payloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot pr
}
committeesPerSlot := helpers.SlotCommitteeCount(activeCount)
out := make([]primitives.ValidatorIndex, 0, activeCount/uint64(params.BeaconConfig().SlotsPerEpoch))
selected := make([]primitives.ValidatorIndex, 0, fieldparams.PTCSize)
var i uint64
for uint64(len(selected)) < fieldparams.PTCSize {
if ctx.Err() != nil {
return nil, ctx.Err()
}
for committeeIndex := primitives.CommitteeIndex(0); committeeIndex < primitives.CommitteeIndex(committeesPerSlot); committeeIndex++ {
if uint64(len(selected)) >= fieldparams.PTCSize {
break
}
committee, err := helpers.BeaconCommitteeFromState(ctx, st, slot, committeeIndex)
if err != nil {
return nil, errors.Wrapf(err, "failed to get beacon committee %d", committeeIndex)
}
selected, i, err = selectByBalanceFill(ctx, st, committee, seed, selected, i)
if err != nil {
return nil, errors.Wrapf(err, "failed to sample beacon committee %d", committeeIndex)
}
for i := primitives.CommitteeIndex(0); i < primitives.CommitteeIndex(committeesPerSlot); i++ {
committee, err := helpers.BeaconCommitteeFromState(ctx, st, slot, i)
if err != nil {
return nil, errors.Wrapf(err, "failed to get beacon committee %d", i)
}
out = append(out, committee...)
}
return selected, nil
return selectByBalance(ctx, st, out, seed, fieldparams.PTCSize)
}
// ptcSeed computes the seed for the payload timeliness committee.
@@ -163,39 +148,33 @@ func ptcSeed(st state.ReadOnlyBeaconState, epoch primitives.Epoch, slot primitiv
// if compute_balance_weighted_acceptance(state, indices[next], seed, i):
// selected.append(indices[next])
// i += 1
func selectByBalanceFill(
ctx context.Context,
st state.ReadOnlyBeaconState,
candidates []primitives.ValidatorIndex,
seed [32]byte,
selected []primitives.ValidatorIndex,
i uint64,
) ([]primitives.ValidatorIndex, uint64, error) {
func selectByBalance(ctx context.Context, st state.ReadOnlyBeaconState, candidates []primitives.ValidatorIndex, seed [32]byte, count uint64) ([]primitives.ValidatorIndex, error) {
if len(candidates) == 0 {
return nil, errors.New("no candidates for balance weighted selection")
}
hashFunc := hash.CustomSHA256Hasher()
// Pre-allocate buffer for hash input: seed (32 bytes) + round counter (8 bytes).
var buf [40]byte
copy(buf[:], seed[:])
maxBalance := params.BeaconConfig().MaxEffectiveBalanceElectra
for _, idx := range candidates {
selected := make([]primitives.ValidatorIndex, 0, count)
total := uint64(len(candidates))
for i := uint64(0); uint64(len(selected)) < count; i++ {
if ctx.Err() != nil {
return nil, i, ctx.Err()
return nil, ctx.Err()
}
idx := candidates[i%total]
ok, err := acceptByBalance(st, idx, buf[:], hashFunc, maxBalance, i)
if err != nil {
return nil, i, err
return nil, err
}
if ok {
selected = append(selected, idx)
}
if uint64(len(selected)) == fieldparams.PTCSize {
break
}
i++
}
return selected, i, nil
return selected, nil
}
// acceptByBalance determines if a validator is accepted based on its effective balance.

View File

@@ -22,10 +22,6 @@ var ErrNotFoundFeeRecipient = errors.Wrap(ErrNotFound, "fee recipient")
// ErrNotFoundMetadataSeqNum is a not found error specifically for the metadata sequence number getter
var ErrNotFoundMetadataSeqNum = errors.Wrap(ErrNotFound, "metadata sequence number")
// ErrStateDiffIncompatible is returned when state-diff feature is enabled
// but the database was created without state-diff support.
var ErrStateDiffIncompatible = errors.New("state-diff feature enabled but database was created without state-diff support")
var errEmptyBlockSlice = errors.New("[]blocks.ROBlock is empty")
var errIncorrectBlockParent = errors.New("unexpected missing or forked blocks in a []ROBlock")
var errFinalizedChildNotFound = errors.New("unable to find finalized root descending from backfill batch")

View File

@@ -42,10 +42,6 @@ func (s *Store) SaveGenesisData(ctx context.Context, genesisState state.BeaconSt
if err := s.SaveGenesisBlockRoot(ctx, genesisBlkRoot); err != nil {
return errors.Wrap(err, "could not save genesis block root")
}
if err := s.initializeStateDiff(0, genesisState); err != nil {
return errors.Wrap(err, "failed to initialize state diff for genesis")
}
return nil
}

View File

@@ -203,47 +203,17 @@ func NewKVStore(ctx context.Context, dirPath string, opts ...KVStoreOption) (*St
return nil, err
}
if err := kv.startStateDiff(ctx); err != nil {
if errors.Is(err, ErrStateDiffIncompatible) {
return kv, err
if features.Get().EnableStateDiff {
sdCache, err := newStateDiffCache(kv)
if err != nil {
return nil, err
}
return nil, err
kv.stateDiffCache = sdCache
}
return kv, nil
}
func (kv *Store) startStateDiff(ctx context.Context) error {
if !features.Get().EnableStateDiff {
return nil
}
// Check if offset already exists (existing state-diff database).
hasOffset, err := kv.hasStateDiffOffset()
if err != nil {
return err
}
if hasOffset {
// Existing state-diff database - restarts not yet supported.
return errors.New("restarting with existing state-diff database not yet supported")
}
// Check if this is a new database (no head block).
headBlock, err := kv.HeadBlock(ctx)
if err != nil {
return errors.Wrap(err, "could not get head block")
}
if headBlock == nil {
// New database - will be initialized later during checkpoint/genesis sync.
// stateDiffCache stays nil until SaveOrigin or SaveGenesisData initializes it.
log.Info("State-diff enabled: will be initialized during checkpoint or genesis sync")
} else {
// Existing database without state-diff - return store with error for caller to handle.
return ErrStateDiffIncompatible
}
return nil
}
// ClearDB removes the previously stored database in the data directory.
func (s *Store) ClearDB() error {
if err := s.Close(); err != nil {

View File

@@ -9,13 +9,11 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
statenative "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/consensus-types/hdiff"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/math"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
pkgerrors "github.com/pkg/errors"
"go.etcd.io/bbolt"
)
@@ -124,66 +122,6 @@ func (s *Store) getOffset() uint64 {
return s.stateDiffCache.getOffset()
}
// hasStateDiffOffset checks if the state-diff offset has been set in the database.
// This is used to detect if an existing database has state-diff enabled.
func (s *Store) hasStateDiffOffset() (bool, error) {
var hasOffset bool
err := s.db.View(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(stateDiffBucket)
if bucket == nil {
return nil
}
hasOffset = bucket.Get(offsetKey) != nil
return nil
})
return hasOffset, err
}
// initializeStateDiff sets up the state-diff schema for a new database.
// This should be called during checkpoint sync or genesis sync.
func (s *Store) initializeStateDiff(slot primitives.Slot, initialState state.ReadOnlyBeaconState) error {
// Return early if the feature is not set
if !features.Get().EnableStateDiff {
return nil
}
// Only reinitialize if the offset is different
if s.stateDiffCache != nil {
if s.stateDiffCache.getOffset() == uint64(slot) {
log.WithField("offset", slot).Warning("Ignoring state diff cache reinitialization")
return nil
}
}
// Write offset directly to the database (without using cache which doesn't exist yet).
err := s.db.Update(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(stateDiffBucket)
if bucket == nil {
return bbolt.ErrBucketNotFound
}
offsetBytes := make([]byte, 8)
binary.LittleEndian.PutUint64(offsetBytes, uint64(slot))
return bucket.Put(offsetKey, offsetBytes)
})
if err != nil {
return pkgerrors.Wrap(err, "failed to set offset")
}
// Create the state diff cache (this will read the offset from the database).
sdCache, err := newStateDiffCache(s)
if err != nil {
return pkgerrors.Wrap(err, "failed to create state diff cache")
}
s.stateDiffCache = sdCache
// Save the initial state as a full snapshot.
if err := s.saveFullSnapshot(initialState); err != nil {
return pkgerrors.Wrap(err, "failed to save initial snapshot")
}
log.WithField("offset", slot).Info("Initialized state-diff cache")
return nil
}
func keyForSnapshot(v int) ([]byte, error) {
switch v {
case version.Fulu:

View File

@@ -110,8 +110,6 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
if err = s.SaveFinalizedCheckpoint(ctx, chkpt); err != nil {
return errors.Wrap(err, "save finalized checkpoint")
}
if err := s.initializeStateDiff(state.Slot(), state); err != nil {
return errors.Wrap(err, "failed to initialize state diff")
}
return nil
}

View File

@@ -413,7 +413,7 @@ func CreateDefaultLightClientUpdate(attestedBlock interfaces.ReadOnlySignedBeaco
SyncCommitteeSignature: make([]byte, 96),
},
}
case version.Electra, version.Fulu:
case version.Electra, version.Fulu, version.Gloas:
m = &pb.LightClientUpdateElectra{
AttestedHeader: &pb.LightClientHeaderDeneb{
Beacon: &pb.BeaconBlockHeader{
@@ -515,7 +515,7 @@ func ComputeWithdrawalsRoot(payload interfaces.ExecutionData) ([]byte, error) {
func BlockToLightClientHeader(
ctx context.Context,
attestedBlockVersion int, // this is the version that the light client header should be in, based on the attested block.
attestedBlockVersion int, // this is the version that the light client header should be in, based on the attested block.
block interfaces.ReadOnlySignedBeaconBlock, // this block is either the attested block, or the finalized block. in case of the latter, we might need to upgrade it to the attested block's version.
) (interfaces.LightClientHeader, error) {
if block.Version() > attestedBlockVersion {
@@ -543,7 +543,7 @@ func BlockToLightClientHeader(
Execution: payloadHeader,
ExecutionBranch: payloadProof,
}
case version.Deneb, version.Electra, version.Fulu:
case version.Deneb, version.Electra, version.Fulu, version.Gloas:
payloadHeader, payloadProof, err := makeExecutionAndProofDeneb(ctx, block)
if err != nil {
return nil, errors.Wrap(err, "could not make execution payload header and proof")

View File

@@ -535,12 +535,7 @@ func openDB(ctx context.Context, dbPath string, clearer *dbClearer) (*kv.Store,
log.WithField("databasePath", dbPath).Info("Checking DB")
d, err := kv.NewKVStore(ctx, dbPath)
if errors.Is(err, kv.ErrStateDiffIncompatible) {
log.WithError(err).Warn("Disabling state-diff feature")
cfg := features.Get()
cfg.EnableStateDiff = false
features.Init(cfg)
} else if err != nil {
if err != nil {
return nil, errors.Wrapf(err, "could not create database at %s", dbPath)
}

View File

@@ -83,7 +83,6 @@ func TestGetSpec(t *testing.T) {
config.ElectraForkEpoch = 107
config.FuluForkVersion = []byte("FuluForkVersion")
config.FuluForkEpoch = 109
config.GloasForkEpoch = 110
config.BLSWithdrawalPrefixByte = byte('b')
config.ETH1AddressWithdrawalPrefixByte = byte('c')
config.GenesisDelay = 24
@@ -135,10 +134,6 @@ func TestGetSpec(t *testing.T) {
config.AttestationDueBPS = primitives.BP(122)
config.AggregateDueBPS = primitives.BP(123)
config.ContributionDueBPS = primitives.BP(124)
config.AttestationDueBPSGloas = primitives.BP(126)
config.AggregateDueBPSGloas = primitives.BP(127)
config.SyncMessageDueBPSGloas = primitives.BP(128)
config.ContributionDueBPSGloas = primitives.BP(129)
config.TerminalBlockHash = common.HexToHash("TerminalBlockHash")
config.TerminalBlockHashActivationEpoch = 72
config.TerminalTotalDifficulty = "73"
@@ -220,7 +215,7 @@ func TestGetSpec(t *testing.T) {
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
data, ok := resp.Data.(map[string]any)
require.Equal(t, true, ok)
assert.Equal(t, 192, len(data))
assert.Equal(t, 187, len(data))
for k, v := range data {
t.Run(k, func(t *testing.T) {
switch k {
@@ -300,8 +295,6 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "0x"+hex.EncodeToString([]byte("FuluForkVersion")), v)
case "FULU_FORK_EPOCH":
assert.Equal(t, "109", v)
case "GLOAS_FORK_EPOCH":
assert.Equal(t, "110", v)
case "MIN_ANCHOR_POW_BLOCK_DIFFICULTY":
assert.Equal(t, "1000", v)
case "BLS_WITHDRAWAL_PREFIX":
@@ -486,14 +479,6 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "123", v)
case "CONTRIBUTION_DUE_BPS":
assert.Equal(t, "124", v)
case "ATTESTATION_DUE_BPS_GLOAS":
assert.Equal(t, "126", v)
case "AGGREGATE_DUE_BPS_GLOAS":
assert.Equal(t, "127", v)
case "SYNC_MESSAGE_DUE_BPS_GLOAS":
assert.Equal(t, "128", v)
case "CONTRIBUTION_DUE_BPS_GLOAS":
assert.Equal(t, "129", v)
case "MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT":
assert.Equal(t, "8", v)
case "MAX_REQUEST_LIGHT_CLIENT_UPDATES":

View File

@@ -48,7 +48,6 @@ go_test(
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",
"@org_golang_google_grpc//reflection:go_default_library",
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
"@org_golang_google_protobuf//types/known/timestamppb:go_default_library",

View File

@@ -35,19 +35,18 @@ import (
// providing RPC endpoints for verifying a beacon node's sync status, genesis and
// version information, and services the node implements and runs.
type Server struct {
LogsStreamer logs.Streamer
StreamLogsBufferSize int
SyncChecker sync.Checker
Server *grpc.Server
BeaconDB db.ReadOnlyDatabase
PeersFetcher p2p.PeersProvider
PeerManager p2p.PeerManager
GenesisTimeFetcher blockchain.TimeFetcher
GenesisFetcher blockchain.GenesisFetcher
POWChainInfoFetcher execution.ChainInfoFetcher
BeaconMonitoringHost string
BeaconMonitoringPort int
OptimisticModeFetcher blockchain.OptimisticModeFetcher
LogsStreamer logs.Streamer
StreamLogsBufferSize int
SyncChecker sync.Checker
Server *grpc.Server
BeaconDB db.ReadOnlyDatabase
PeersFetcher p2p.PeersProvider
PeerManager p2p.PeerManager
GenesisTimeFetcher blockchain.TimeFetcher
GenesisFetcher blockchain.GenesisFetcher
POWChainInfoFetcher execution.ChainInfoFetcher
BeaconMonitoringHost string
BeaconMonitoringPort int
}
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
@@ -62,28 +61,21 @@ func (ns *Server) GetHealth(ctx context.Context, request *ethpb.HealthRequest) (
ctx, cancel := context.WithTimeout(ctx, timeoutDuration)
defer cancel() // Important to avoid a context leak
// Check optimistic status - validators should not participate when optimistic
isOptimistic, err := ns.OptimisticModeFetcher.IsOptimistic(ctx)
if err != nil {
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not check optimistic status: %v", err)
}
if ns.SyncChecker.Synced() && !isOptimistic {
if ns.SyncChecker.Synced() {
return &empty.Empty{}, nil
}
if ns.SyncChecker.Syncing() || ns.SyncChecker.Initialized() {
// Set header for REST API clients (via gRPC-gateway)
if err := grpc.SetHeader(ctx, metadata.Pairs("x-http-code", strconv.FormatUint(http.StatusPartialContent, 10))); err != nil {
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set status code header: %v", err)
if request.SyncingStatus != 0 {
// override the 200 success with the provided request status
if err := grpc.SetHeader(ctx, metadata.Pairs("x-http-code", strconv.FormatUint(request.SyncingStatus, 10))); err != nil {
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set custom success code header: %v", err)
}
return &empty.Empty{}, nil
}
return &empty.Empty{}, status.Error(codes.Unavailable, "node is syncing")
}
if isOptimistic {
// Set header for REST API clients (via gRPC-gateway)
if err := grpc.SetHeader(ctx, metadata.Pairs("x-http-code", strconv.FormatUint(http.StatusPartialContent, 10))); err != nil {
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set status code header: %v", err)
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set custom success code header: %v", err)
}
return &empty.Empty{}, status.Error(codes.Unavailable, "node is optimistic")
return &empty.Empty{}, nil
}
return &empty.Empty{}, status.Errorf(codes.Unavailable, "service unavailable")
}

View File

@@ -2,7 +2,6 @@ package node
import (
"errors"
"maps"
"testing"
"time"
@@ -22,7 +21,6 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/p2p/enode"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/reflection"
"google.golang.org/protobuf/types/known/emptypb"
"google.golang.org/protobuf/types/known/timestamppb"
@@ -189,71 +187,32 @@ func TestNodeServer_GetETH1ConnectionStatus(t *testing.T) {
assert.Equal(t, errStr, res.CurrentConnectionError)
}
// mockServerTransportStream implements grpc.ServerTransportStream for testing
type mockServerTransportStream struct {
headers map[string][]string
}
func (m *mockServerTransportStream) Method() string { return "" }
func (m *mockServerTransportStream) SetHeader(md metadata.MD) error {
maps.Copy(m.headers, md)
return nil
}
func (m *mockServerTransportStream) SendHeader(metadata.MD) error { return nil }
func (m *mockServerTransportStream) SetTrailer(metadata.MD) error { return nil }
func TestNodeServer_GetHealth(t *testing.T) {
tests := []struct {
name string
input *mockSync.Sync
isOptimistic bool
customStatus uint64
wantedErr string
}{
{
name: "happy path - synced and not optimistic",
input: &mockSync.Sync{IsSyncing: false, IsSynced: true},
isOptimistic: false,
name: "happy path",
input: &mockSync.Sync{IsSyncing: false, IsSynced: true},
},
{
name: "returns error when not synced and not syncing",
input: &mockSync.Sync{IsSyncing: false, IsSynced: false},
isOptimistic: false,
wantedErr: "service unavailable",
},
{
name: "returns error when syncing",
input: &mockSync.Sync{IsSyncing: true, IsSynced: false},
isOptimistic: false,
wantedErr: "node is syncing",
},
{
name: "returns error when synced but optimistic",
input: &mockSync.Sync{IsSyncing: false, IsSynced: true},
isOptimistic: true,
wantedErr: "node is optimistic",
},
{
name: "returns error when syncing and optimistic",
input: &mockSync.Sync{IsSyncing: true, IsSynced: false},
isOptimistic: true,
wantedErr: "node is syncing",
name: "syncing",
input: &mockSync.Sync{IsSyncing: false},
wantedErr: "service unavailable",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
server := grpc.NewServer()
ns := &Server{
SyncChecker: tt.input,
OptimisticModeFetcher: &mock.ChainService{Optimistic: tt.isOptimistic},
SyncChecker: tt.input,
}
ethpb.RegisterNodeServer(server, ns)
reflection.Register(server)
// Create context with mock transport stream so grpc.SetHeader works
stream := &mockServerTransportStream{headers: make(map[string][]string)}
ctx := grpc.NewContextWithServerTransportStream(t.Context(), stream)
_, err := ns.GetHealth(ctx, &ethpb.HealthRequest{})
_, err := ns.GetHealth(t.Context(), &ethpb.HealthRequest{SyncingStatus: tt.customStatus})
if tt.wantedErr == "" {
require.NoError(t, err)
return

View File

@@ -259,19 +259,18 @@ func NewService(ctx context.Context, cfg *Config) *Service {
}
s.validatorServer = validatorServer
nodeServer := &nodev1alpha1.Server{
LogsStreamer: logs.NewStreamServer(),
StreamLogsBufferSize: 1000, // Enough to handle bursts of beacon node logs for gRPC streaming.
BeaconDB: s.cfg.BeaconDB,
Server: s.grpcServer,
SyncChecker: s.cfg.SyncService,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
PeersFetcher: s.cfg.PeersFetcher,
PeerManager: s.cfg.PeerManager,
GenesisFetcher: s.cfg.GenesisFetcher,
POWChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
BeaconMonitoringHost: s.cfg.BeaconMonitoringHost,
BeaconMonitoringPort: s.cfg.BeaconMonitoringPort,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
LogsStreamer: logs.NewStreamServer(),
StreamLogsBufferSize: 1000, // Enough to handle bursts of beacon node logs for gRPC streaming.
BeaconDB: s.cfg.BeaconDB,
Server: s.grpcServer,
SyncChecker: s.cfg.SyncService,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
PeersFetcher: s.cfg.PeersFetcher,
PeerManager: s.cfg.PeerManager,
GenesisFetcher: s.cfg.GenesisFetcher,
POWChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
BeaconMonitoringHost: s.cfg.BeaconMonitoringHost,
BeaconMonitoringPort: s.cfg.BeaconMonitoringPort,
}
beaconChainServer := &beaconv1alpha1.Server{
Ctx: s.ctx,

View File

@@ -137,7 +137,7 @@ func (s *State) migrateToColdHdiff(ctx context.Context, fRoot [32]byte) error {
if ctx.Err() != nil {
return ctx.Err()
}
offset, lvl, err := s.beaconDB.SlotInDiffTree(slot)
_, lvl, err := s.beaconDB.SlotInDiffTree(slot)
if err != nil {
log.WithError(err).Errorf("could not determine if slot %d is in diff tree", slot)
continue
@@ -145,9 +145,6 @@ func (s *State) migrateToColdHdiff(ctx context.Context, fRoot [32]byte) error {
if lvl == -1 {
continue
}
if uint64(slot) == offset {
continue
}
// The state needs to be saved.
// Try the epoch boundary cache first.
cached, exists, err := s.epochBoundaryStateCache.getBySlot(slot)

View File

@@ -1,3 +0,0 @@
### Ignored
- Added a field `path` for the ephemeral log file initialization log.

View File

@@ -1,3 +0,0 @@
### Fixed
- Fix Bazel build failure on macOS x86_64 (darwin_amd64) (adds missing assembly stub to hashtree patch).

View File

@@ -1,6 +0,0 @@
### Added
- Added new proofCollector type to ssz-query
### Ignored
- Added testing covering the production of Merkle proof from Phase0 beacon state and benchmarked against real Hoodi beacon state (Fulu version)

View File

@@ -1,3 +0,0 @@
### Changed
- gRPC health endpoint will now return an error on syncing or optimistic status showing that it's unavailable.

View File

@@ -1,3 +0,0 @@
### Added
- Initialize db with state-diff feature flag.

View File

@@ -1,2 +0,0 @@
### Added
- Gloas-specific timing intervals for validator attestation, aggregation, and sync duties.

View File

@@ -1,2 +0,0 @@
### Changed
- Sample PTC per committee to reduce allocations.

View File

@@ -160,7 +160,6 @@ var appFlags = []cli.Flag{
dasFlags.BackfillOldestSlot,
dasFlags.BlobRetentionEpochFlag,
flags.BatchVerifierLimit,
flags.StateDiffExponents,
flags.DisableEphemeralLogFile,
}

View File

@@ -74,7 +74,6 @@ var appHelpFlagGroups = []flagGroup{
flags.RPCHost,
flags.RPCPort,
flags.BatchVerifierLimit,
flags.StateDiffExponents,
},
},
{

View File

@@ -280,7 +280,6 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
DisableQUIC,
EnableDiscoveryReboot,
enableExperimentalAttestationPool,
EnableStateDiff,
forceHeadFlag,
blacklistRoots,
enableHashtree,

View File

@@ -91,10 +91,6 @@ type BeaconChainConfig struct {
AggregateDueBPS primitives.BP `yaml:"AGGREGATE_DUE_BPS" spec:"true"` // AggregateDueBPS defines the aggregate due time in basis points of the slot.
SyncMessageDueBPS primitives.BP `yaml:"SYNC_MESSAGE_DUE_BPS" spec:"true"` // SyncMessageDueBPS defines the sync message due time in basis points of the slot.
ContributionDueBPS primitives.BP `yaml:"CONTRIBUTION_DUE_BPS" spec:"true"` // ContributionDueBPS defines the contribution due time in basis points of the slot.
AttestationDueBPSGloas primitives.BP `yaml:"ATTESTATION_DUE_BPS_GLOAS" spec:"true"` // AttestationDueBPSGloas defines the attestation due time in basis points of the slot (Gloas).
AggregateDueBPSGloas primitives.BP `yaml:"AGGREGATE_DUE_BPS_GLOAS" spec:"true"` // AggregateDueBPSGloas defines the aggregate due time in basis points of the slot (Gloas).
SyncMessageDueBPSGloas primitives.BP `yaml:"SYNC_MESSAGE_DUE_BPS_GLOAS" spec:"true"` // SyncMessageDueBPSGloas defines the sync message due time in basis points of the slot (Gloas).
ContributionDueBPSGloas primitives.BP `yaml:"CONTRIBUTION_DUE_BPS_GLOAS" spec:"true"` // ContributionDueBPSGloas defines the contribution due time in basis points of the slot (Gloas).
// Ethereum PoW parameters.
DepositChainID uint64 `yaml:"DEPOSIT_CHAIN_ID" spec:"true"` // DepositChainID of the eth1 network. This used for replay protection.
@@ -192,7 +188,6 @@ type BeaconChainConfig struct {
ElectraForkEpoch primitives.Epoch `yaml:"ELECTRA_FORK_EPOCH" spec:"true"` // ElectraForkEpoch is used to represent the assigned fork epoch for electra.
FuluForkVersion []byte `yaml:"FULU_FORK_VERSION" spec:"true"` // FuluForkVersion is used to represent the fork version for fulu.
FuluForkEpoch primitives.Epoch `yaml:"FULU_FORK_EPOCH" spec:"true"` // FuluForkEpoch is used to represent the assigned fork epoch for fulu.
GloasForkEpoch primitives.Epoch `yaml:"GLOAS_FORK_EPOCH" spec:"true"` // GloasForkEpoch is used to represent the assigned fork epoch for gloas.
ForkVersionSchedule map[[fieldparams.VersionLength]byte]primitives.Epoch // Schedule of fork epochs by version.
ForkVersionNames map[[fieldparams.VersionLength]byte]string // Human-readable names of fork versions.
@@ -348,7 +343,6 @@ func (b *BeaconChainConfig) VersionToForkEpochMap() map[int]primitives.Epoch {
version.Deneb: b.DenebForkEpoch,
version.Electra: b.ElectraForkEpoch,
version.Fulu: b.FuluForkEpoch,
version.Gloas: b.GloasForkEpoch,
}
}

View File

@@ -224,7 +224,6 @@ func ConfigToYaml(cfg *BeaconChainConfig) []byte {
fmt.Sprintf("ELECTRA_FORK_VERSION: %#x", cfg.ElectraForkVersion),
fmt.Sprintf("FULU_FORK_EPOCH: %d", cfg.FuluForkEpoch),
fmt.Sprintf("FULU_FORK_VERSION: %#x", cfg.FuluForkVersion),
fmt.Sprintf("GLOAS_FORK_EPOCH: %d", cfg.GloasForkEpoch),
fmt.Sprintf("EPOCHS_PER_SUBNET_SUBSCRIPTION: %d", cfg.EpochsPerSubnetSubscription),
fmt.Sprintf("ATTESTATION_SUBNET_EXTRA_BITS: %d", cfg.AttestationSubnetExtraBits),
fmt.Sprintf("ATTESTATION_SUBNET_PREFIX_BITS: %d", cfg.AttestationSubnetPrefixBits),
@@ -247,10 +246,6 @@ func ConfigToYaml(cfg *BeaconChainConfig) []byte {
fmt.Sprintf("AGGREGATE_DUE_BPS: %d", cfg.AggregateDueBPS),
fmt.Sprintf("SYNC_MESSAGE_DUE_BPS: %d", cfg.SyncMessageDueBPS),
fmt.Sprintf("CONTRIBUTION_DUE_BPS: %d", cfg.ContributionDueBPS),
fmt.Sprintf("ATTESTATION_DUE_BPS_GLOAS: %d", cfg.AttestationDueBPSGloas),
fmt.Sprintf("AGGREGATE_DUE_BPS_GLOAS: %d", cfg.AggregateDueBPSGloas),
fmt.Sprintf("SYNC_MESSAGE_DUE_BPS_GLOAS: %d", cfg.SyncMessageDueBPSGloas),
fmt.Sprintf("CONTRIBUTION_DUE_BPS_GLOAS: %d", cfg.ContributionDueBPSGloas),
}
if len(cfg.BlobSchedule) > 0 {

View File

@@ -24,9 +24,12 @@ import (
// These are variables that we don't use in Prysm. (i.e. future hardfork, light client... etc)
// IMPORTANT: Use one field per line and sort these alphabetically to reduce conflicts.
var placeholderFields = []string{
"AGGREGATE_DUE_BPS_GLOAS",
"ATTESTATION_DEADLINE",
"ATTESTATION_DUE_BPS_GLOAS",
"BLOB_SIDECAR_SUBNET_COUNT_FULU",
"CELLS_PER_EXT_BLOB",
"CONTRIBUTION_DUE_BPS_GLOAS",
"EIP6110_FORK_EPOCH",
"EIP6110_FORK_VERSION",
"EIP7002_FORK_EPOCH",
@@ -42,6 +45,7 @@ var placeholderFields = []string{
"EPOCHS_PER_SHUFFLING_PHASE",
"FIELD_ELEMENTS_PER_CELL", // Configured as a constant in config/fieldparams/mainnet.go
"FIELD_ELEMENTS_PER_EXT_BLOB", // Configured in proto/ssz_proto_library.bzl
"GLOAS_FORK_EPOCH",
"GLOAS_FORK_VERSION",
"INCLUSION_LIST_SUBMISSION_DEADLINE",
"INCLUSION_LIST_SUBMISSION_DUE_BPS",
@@ -56,6 +60,7 @@ var placeholderFields = []string{
"PROPOSER_INCLUSION_LIST_CUTOFF",
"PROPOSER_INCLUSION_LIST_CUTOFF_BPS",
"PROPOSER_SELECTION_GAP",
"SYNC_MESSAGE_DUE_BPS_GLOAS",
"TARGET_NUMBER_OF_PEERS",
"UPDATE_TIMEOUT",
"VIEW_FREEZE_CUTOFF_BPS",

View File

@@ -32,8 +32,6 @@ const (
mainnetElectraForkEpoch = 364032 // May 7, 2025, 10:05:11 UTC
// Fulu Fork Epoch for mainnet config
mainnetFuluForkEpoch = 411392 // December 3, 2025, 09:49:11pm UTC
// Gloas Fork Epoch for mainnet config
mainnetGloasForkEpoch = math.MaxUint64
)
var mainnetNetworkConfig = &NetworkConfig{
@@ -123,15 +121,11 @@ var mainnetBeaconConfig = &BeaconChainConfig{
IntervalsPerSlot: 3,
// Time-based protocol parameters.
ProposerReorgCutoffBPS: primitives.BP(1667),
AttestationDueBPS: primitives.BP(3333),
AggregateDueBPS: primitives.BP(6667),
SyncMessageDueBPS: primitives.BP(3333),
ContributionDueBPS: primitives.BP(6667),
AttestationDueBPSGloas: primitives.BP(2500),
AggregateDueBPSGloas: primitives.BP(5000),
SyncMessageDueBPSGloas: primitives.BP(2500),
ContributionDueBPSGloas: primitives.BP(5000),
ProposerReorgCutoffBPS: primitives.BP(1667),
AttestationDueBPS: primitives.BP(3333),
AggregateDueBPS: primitives.BP(6667),
SyncMessageDueBPS: primitives.BP(3333),
ContributionDueBPS: primitives.BP(6667),
// Ethereum PoW parameters.
DepositChainID: 1, // Chain ID of eth1 mainnet.
@@ -241,7 +235,6 @@ var mainnetBeaconConfig = &BeaconChainConfig{
ElectraForkEpoch: mainnetElectraForkEpoch,
FuluForkVersion: []byte{6, 0, 0, 0},
FuluForkEpoch: mainnetFuluForkEpoch,
GloasForkEpoch: mainnetGloasForkEpoch,
// New values introduced in Altair hard fork 1.
// Participation flag indices.

View File

@@ -35,10 +35,6 @@ func MinimalSpecConfig() *BeaconChainConfig {
// Time parameters
minimalConfig.SecondsPerSlot = 6
minimalConfig.SlotDurationMilliseconds = 6000
minimalConfig.AttestationDueBPSGloas = 2500
minimalConfig.AggregateDueBPSGloas = 5000
minimalConfig.SyncMessageDueBPSGloas = 2500
minimalConfig.ContributionDueBPSGloas = 5000
minimalConfig.MinAttestationInclusionDelay = 1
minimalConfig.SlotsPerEpoch = 8
minimalConfig.SqrRootSlotsPerEpoch = 2
@@ -102,7 +98,6 @@ func MinimalSpecConfig() *BeaconChainConfig {
minimalConfig.ElectraForkEpoch = math.MaxUint64
minimalConfig.FuluForkVersion = []byte{6, 0, 0, 1}
minimalConfig.FuluForkEpoch = math.MaxUint64
minimalConfig.GloasForkEpoch = minimalConfig.FarFutureEpoch
minimalConfig.SyncCommitteeSize = 32
minimalConfig.InactivityScoreBias = 4

View File

@@ -163,18 +163,3 @@ func Uint256ToSSZBytes(num string) ([]byte, error) {
}
return PadTo(ReverseByteOrder(uint256.Bytes()), 32), nil
}
// PutLittleEndian writes an unsigned integer value in little-endian format.
// Supports sizes 1, 2, 4, or 8 bytes for uint8/16/32/64 respectively.
func PutLittleEndian(dst []byte, val uint64, size int) {
switch size {
case 1:
dst[0] = byte(val)
case 2:
binary.LittleEndian.PutUint16(dst, uint16(val))
case 4:
binary.LittleEndian.PutUint32(dst, uint32(val))
case 8:
binary.LittleEndian.PutUint64(dst, val)
}
}

View File

@@ -9,9 +9,7 @@ go_library(
"container.go",
"generalized_index.go",
"list.go",
"merkle_proof.go",
"path.go",
"proof_collector.go",
"query.go",
"ssz_info.go",
"ssz_object.go",
@@ -22,12 +20,7 @@ go_library(
importpath = "github.com/OffchainLabs/prysm/v7/encoding/ssz/query",
visibility = ["//visibility:public"],
deps = [
"//container/trie:go_default_library",
"//crypto/hash/htr:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz:go_default_library",
"//math:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
],
)
@@ -36,24 +29,15 @@ go_test(
name = "go_default_test",
srcs = [
"generalized_index_test.go",
"merkle_proof_test.go",
"path_test.go",
"proof_collector_test.go",
"query_test.go",
"tag_parser_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/state/stateutil:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/ssz:go_default_library",
":go_default_library",
"//encoding/ssz/query/testutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/ssz_query/testing:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
],
)

View File

@@ -1,34 +0,0 @@
package query
import (
"fmt"
"reflect"
fastssz "github.com/prysmaticlabs/fastssz"
)
// Prove is the entrypoint to generate an SSZ Merkle proof for the given generalized index.
// Parameters:
// - gindex: the generalized index of the node to prove inclusion for.
// Returns:
// - fastssz.Proof: the Merkle proof containing the leaf, index, and sibling hashes.
// - error: any error encountered during proof generation.
func (info *SszInfo) Prove(gindex uint64) (*fastssz.Proof, error) {
if info == nil {
return nil, fmt.Errorf("nil SszInfo")
}
collector := newProofCollector()
collector.addTarget(gindex)
// info.source is guaranteed to be valid and dereferenced by AnalyzeObject
v := reflect.ValueOf(info.source).Elem()
// Start the merkleization and proof collection process.
// In SSZ generalized indices, the root is always at index 1.
if _, err := collector.merkleize(info, v, 1); err != nil {
return nil, err
}
return collector.toProof()
}

View File

@@ -1,163 +0,0 @@
package query_test
import (
"testing"
"github.com/OffchainLabs/go-bitfield"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/ssz/query"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
ssz "github.com/prysmaticlabs/fastssz"
)
func TestProve_FixedTestContainer(t *testing.T) {
obj := createFixedTestContainer()
tests := []string{
".field_uint32",
".nested.value2",
".vector_field[3]",
".bitvector64_field",
".trailing_field",
}
for _, tc := range tests {
t.Run(tc, func(t *testing.T) {
proveAndVerify(t, obj, tc)
})
}
}
func TestProve_VariableTestContainer(t *testing.T) {
obj := createVariableTestContainer()
tests := []string{
".leading_field",
".field_list_uint64[2]",
"len(field_list_uint64)",
".nested.nested_list_field[1]",
".variable_container_list[0].inner_1.field_list_uint64[1]",
}
for _, tc := range tests {
t.Run(tc, func(t *testing.T) {
proveAndVerify(t, obj, tc)
})
}
}
func TestProve_BeaconBlock(t *testing.T) {
randaoReveal := make([]byte, 96)
for i := range randaoReveal {
randaoReveal[i] = 0x42
}
root32 := make([]byte, 32)
for i := range root32 {
root32[i] = 0x24
}
sig := make([]byte, 96)
for i := range sig {
sig[i] = 0x99
}
att := &eth.Attestation{
AggregationBits: bitfield.Bitlist{0x01},
Data: &eth.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: root32,
Source: &eth.Checkpoint{
Epoch: 1,
Root: root32,
},
Target: &eth.Checkpoint{
Epoch: 1,
Root: root32,
},
},
Signature: sig,
}
b := util.NewBeaconBlock()
b.Block.Slot = 123
b.Block.Body.RandaoReveal = randaoReveal
b.Block.Body.Attestations = []*eth.Attestation{att}
sb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
protoBlock, err := sb.Block().Proto()
require.NoError(t, err)
obj, ok := protoBlock.(query.SSZObject)
require.Equal(t, true, ok, "block proto does not implement query.SSZObject")
tests := []string{
".slot",
".body.randao_reveal",
".body.attestations[0].data.slot",
"len(body.attestations)",
}
for _, tc := range tests {
t.Run(tc, func(t *testing.T) {
proveAndVerify(t, obj, tc)
})
}
}
func TestProve_BeaconState(t *testing.T) {
st, _ := util.DeterministicGenesisState(t, 16)
require.NoError(t, st.SetSlot(primitives.Slot(42)))
sszObj, ok := st.ToProtoUnsafe().(query.SSZObject)
require.Equal(t, true, ok, "state proto does not implement query.SSZObject")
tests := []string{
".slot",
".latest_block_header",
".validators[0].effective_balance",
"len(validators)",
}
for _, tc := range tests {
t.Run(tc, func(t *testing.T) {
proveAndVerify(t, sszObj, tc)
})
}
}
// proveAndVerify helper to analyze an object, generate a merkle proof for the given path,
// and verify the proof against the object's root.
func proveAndVerify(t *testing.T, obj query.SSZObject, pathStr string) {
t.Helper()
info, err := query.AnalyzeObject(obj)
require.NoError(t, err)
path, err := query.ParsePath(pathStr)
require.NoError(t, err)
gi, err := query.GetGeneralizedIndexFromPath(info, path)
require.NoError(t, err)
proof, err := info.Prove(gi)
require.NoError(t, err)
require.Equal(t, int(gi), proof.Index)
root, err := obj.HashTreeRoot()
require.NoError(t, err)
ok, err := ssz.VerifyProof(root[:], proof)
require.NoError(t, err)
require.Equal(t, true, ok, "merkle proof verification failed")
require.Equal(t, 32, len(proof.Leaf))
for i, h := range proof.Hashes {
require.Equal(t, 32, len(h), "proof hash %d is not 32 bytes", i)
}
}

View File

@@ -1,672 +0,0 @@
package query
import (
"encoding/binary"
"errors"
"fmt"
"math/bits"
"reflect"
"runtime"
"slices"
"sync"
"github.com/OffchainLabs/go-bitfield"
"github.com/OffchainLabs/prysm/v7/container/trie"
"github.com/OffchainLabs/prysm/v7/crypto/hash/htr"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
ssz "github.com/OffchainLabs/prysm/v7/encoding/ssz"
"github.com/OffchainLabs/prysm/v7/math"
fastssz "github.com/prysmaticlabs/fastssz"
)
// proofCollector collects sibling hashes and leaves needed for Merkle proofs.
//
// Multiproof-ready design:
// - requiredSiblings/requiredLeaves store which gindices we want to collect (registered before merkleization).
// - siblings/leaves store the actual collected hashes.
//
// Concurrency:
// - required* maps are read-only during merkleization.
// - siblings/leaves writes are protected by mutex.
type proofCollector struct {
sync.Mutex
// Required gindices (registered before merkleization)
requiredSiblings map[uint64]struct{}
requiredLeaves map[uint64]struct{}
// Collected hashes
siblings map[uint64][32]byte
leaves map[uint64][32]byte
}
func newProofCollector() *proofCollector {
return &proofCollector{
requiredSiblings: make(map[uint64]struct{}),
requiredLeaves: make(map[uint64]struct{}),
siblings: make(map[uint64][32]byte),
leaves: make(map[uint64][32]byte),
}
}
func (pc *proofCollector) reset() {
pc.Lock()
defer pc.Unlock()
pc.requiredSiblings = make(map[uint64]struct{})
pc.requiredLeaves = make(map[uint64]struct{})
pc.siblings = make(map[uint64][32]byte)
pc.leaves = make(map[uint64][32]byte)
}
// addTarget register the target leaf and its required sibling nodes for proof construction.
// Registration should happen before merkleization begins.
func (pc *proofCollector) addTarget(gindex uint64) {
pc.Lock()
defer pc.Unlock()
pc.requiredLeaves[gindex] = struct{}{}
// Walk from the target leaf up to (but not including) the root (gindex=1).
// At each step, register the sibling node required to prove inclusion.
nodeGindex := gindex
for nodeGindex > 1 {
siblingGindex := nodeGindex ^ 1 // flip the last bit: left<->right sibling
pc.requiredSiblings[siblingGindex] = struct{}{}
// Move to parent
nodeGindex /= 2
}
}
// toProof converts the collected siblings and leaves into a fastssz.Proof structure.
// Current behavior expects a single target leaf (single proof).
func (pc *proofCollector) toProof() (*fastssz.Proof, error) {
pc.Lock()
defer pc.Unlock()
proof := &fastssz.Proof{}
if len(pc.leaves) == 0 {
return nil, errors.New("no leaves collected: add target leaves before merkleization")
}
leafGindices := make([]uint64, 0, len(pc.leaves))
for g := range pc.leaves {
leafGindices = append(leafGindices, g)
}
slices.Sort(leafGindices)
// single proof resides in leafGindices[0]
targetGindex := leafGindices[0]
proofIndex, err := math.Int(targetGindex)
if err != nil {
return nil, fmt.Errorf("gindex %d overflows int: %w", targetGindex, err)
}
proof.Index = proofIndex
// store the leaf
leaf := pc.leaves[targetGindex]
leafBuf := make([]byte, 32)
copy(leafBuf, leaf[:])
proof.Leaf = leafBuf
// Walk from target up to root, collecting siblings.
steps := bits.Len64(targetGindex) - 1
proof.Hashes = make([][]byte, 0, steps)
for targetGindex > 1 {
sib := targetGindex ^ 1
h, ok := pc.siblings[sib]
if !ok {
return nil, fmt.Errorf("missing sibling hash for gindex %d", sib)
}
proof.Hashes = append(proof.Hashes, h[:])
targetGindex /= 2
}
return proof, nil
}
// collectLeaf checks if the given gindex is a required leaf for the proof,
// and if so, stores the provided leaf hash in the collector.
func (pc *proofCollector) collectLeaf(gindex uint64, leaf [32]byte) {
if _, ok := pc.requiredLeaves[gindex]; !ok {
return
}
pc.Lock()
pc.leaves[gindex] = leaf
pc.Unlock()
}
// collectSibling stores the hash for a sibling node identified by gindex.
// It only stores the hash if gindex was pre-registered via addTarget (present in requiredSiblings).
// Writes to the collected siblings map are protected by the collector mutex.
func (pc *proofCollector) collectSibling(gindex uint64, hash [32]byte) {
if _, ok := pc.requiredSiblings[gindex]; !ok {
return
}
pc.Lock()
pc.siblings[gindex] = hash
pc.Unlock()
}
// Merkleizers and proof collection methods
// merkleize recursively traverses an SSZ info and computes the Merkle root of the subtree.
//
// Proof collection:
// - During traversal it calls collectLeaf/collectSibling with the SSZ generalized indices (gindices)
// of visited nodes.
// - The collector only stores hashes for gindices that were pre-registered via addTarget
// (requiredLeaves/requiredSiblings). This makes the traversal multiproof-ready: you can register
// multiple targets before calling merkleize.
//
// SSZ types handled: basic types, containers, lists, vectors, bitlists, and bitvectors.
//
// Parameters:
// - info: SSZ type metadata for the current value.
// - v: reflect.Value of the current value.
// - currentGindex: generalized index of the current subtree root.
//
// Returns:
// - [32]byte: Merkle root of the current subtree.
// - error: any error encountered during traversal/merkleization.
func (pc *proofCollector) merkleize(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
if info.sszType.isBasic() {
return pc.merkleizeBasicType(info.sszType, v, currentGindex)
}
switch info.sszType {
case Container:
return pc.merkleizeContainer(info, v, currentGindex)
case List:
return pc.merkleizeList(info, v, currentGindex)
case Vector:
return pc.merkleizeVector(info, v, currentGindex)
case Bitlist:
return pc.merkleizeBitlist(info, v, currentGindex)
case Bitvector:
return pc.merkleizeBitvector(info, v, currentGindex)
default:
return [32]byte{}, fmt.Errorf("unsupported SSZ type: %v", info.sszType)
}
}
// merkleizeBasicType serializes a basic SSZ value into a 32-byte leaf chunk (little-endian, zero-padded).
//
// Proof collection:
// - It calls collectLeaf(currentGindex, leaf) and stores the leaf if currentGindex was pre-registered via addTarget.
//
// Parameters:
// - t: the SSZType (basic).
// - v: the reflect.Value of the basic value.
// - currentGindex: the generalized index (gindex) of this leaf.
//
// Returns:
// - [32]byte: the 32-byte SSZ leaf chunk.
// - error: if the SSZType is not a supported basic type.
func (pc *proofCollector) merkleizeBasicType(t SSZType, v reflect.Value, currentGindex uint64) ([32]byte, error) {
var leaf [32]byte
// Serialize the value into a 32-byte chunk (little-endian, zero-padded)
switch t {
case Uint8:
leaf[0] = uint8(v.Uint())
case Uint16:
binary.LittleEndian.PutUint16(leaf[:2], uint16(v.Uint()))
case Uint32:
binary.LittleEndian.PutUint32(leaf[:4], uint32(v.Uint()))
case Uint64:
binary.LittleEndian.PutUint64(leaf[:8], v.Uint())
case Boolean:
if v.Bool() {
leaf[0] = 1
}
default:
return [32]byte{}, fmt.Errorf("unexpected basic type: %v", t)
}
pc.collectLeaf(currentGindex, leaf)
return leaf, nil
}
// merkleizeContainer computes the Merkle root of an SSZ container by:
// 1. Merkleizing each field into a 32-byte subtree root
// 2. Merkleizing the field roots into the container root (padding to the next power-of-2)
//
// Generalized indices (gindices): depth = ssz.Depth(uint64(N)) and field i has gindex = (currentGindex << depth) + uint64(i).
// Proof collection: merkleize() computes each field root, merkleizeVectorAndCollect collects required siblings, and collectLeaf stores the container root if registered.
//
// Parameters:
// - info: SSZ type metadata for the container.
// - v: reflect.Value of the container value.
// - currentGindex: generalized index (gindex) of the container root.
//
// Returns:
// - [32]byte: Merkle root of the container.
// - error: any error encountered while merkleizing fields.
func (pc *proofCollector) merkleizeContainer(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
// If the container root itself is the target, compute directly and return early.
// This avoids full subtree merkleization when we only need the root.
if _, ok := pc.requiredLeaves[currentGindex]; ok {
root, err := info.HashTreeRoot()
if err != nil {
return [32]byte{}, err
}
pc.collectLeaf(currentGindex, root)
return root, nil
}
ci, err := info.ContainerInfo()
if err != nil {
return [32]byte{}, err
}
v = dereferencePointer(v)
// Calculate depth: how many levels from container root to field leaves
numFields := len(ci.order)
depth := ssz.Depth(uint64(numFields))
// Step 1: Compute HTR for each subtree (field)
fieldRoots := make([][32]byte, numFields)
for i, name := range ci.order {
fieldInfo := ci.fields[name]
fieldVal := v.FieldByName(fieldInfo.goFieldName)
// Field i's gindex: shift currentGindex left by depth, then OR with field index
fieldGindex := currentGindex<<depth + uint64(i)
htr, err := pc.merkleize(fieldInfo.sszInfo, fieldVal, fieldGindex)
if err != nil {
return [32]byte{}, fmt.Errorf("field %s: %w", name, err)
}
fieldRoots[i] = htr
}
// Step 2: Merkleize the field hashes into the container root,
// collecting sibling hashes if target is within this subtree
root := pc.merkleizeVectorAndCollect(fieldRoots, currentGindex, uint64(depth))
return root, nil
}
// merkleizeVectorBody computes the Merkle root of the "data" subtree for vector-like SSZ types
// (vectors and the data-part of lists/bitlists).
//
// Generalized indices (gindices): depth = ssz.Depth(limit); leafBase = subtreeRootGindex << depth; element/chunk i gindex = leafBase + uint64(i).
// Proof collection: merkleize() is called for composite elements; merkleizeVectorAndCollect collects required siblings at this layer.
// Padding: merkleizeVectorAndCollect uses trie.ZeroHashes as needed.
//
// Parameters:
// - elemInfo: SSZ type metadata for the element.
// - v: reflect.Value of the vector/list data.
// - length: number of actual elements present.
// - limit: virtual leaf capacity used for padding/Depth (fixed length for vectors, limit for lists).
// - subtreeRootGindex: gindex of the data subtree root.
//
// Returns:
// - [32]byte: Merkle root of the data subtree.
// - error: any error encountered while merkleizing composite elements.
func (pc *proofCollector) merkleizeVectorBody(elemInfo *SszInfo, v reflect.Value, length int, limit uint64, subtreeRootGindex uint64) ([32]byte, error) {
depth := uint64(ssz.Depth(limit))
var chunks [][32]byte
if elemInfo.sszType.isBasic() {
// Serialize basic elements and pack into 32-byte chunks using ssz.PackByChunk.
elemSize, err := math.Int(itemLength(elemInfo))
if err != nil {
return [32]byte{}, fmt.Errorf("element size %d overflows int: %w", itemLength(elemInfo), err)
}
serialized := make([][]byte, length)
// Single contiguous allocation for all element data
allData := make([]byte, length*elemSize)
for i := range length {
buf := allData[i*elemSize : (i+1)*elemSize]
elem := v.Index(i)
if elemInfo.sszType == Boolean && elem.Bool() {
buf[0] = 1
} else {
bytesutil.PutLittleEndian(buf, elem.Uint(), elemSize)
}
serialized[i] = buf
}
chunks, err = ssz.PackByChunk(serialized)
if err != nil {
return [32]byte{}, err
}
} else {
// Composite elements: compute each element root (no padding here; merkleizeVectorAndCollect pads).
chunks = make([][32]byte, length)
// Fall back to per-element merkleization with proper gindices for proof collection.
// Parallel execution
workerCount := min(runtime.GOMAXPROCS(0), length)
jobs := make(chan int, workerCount*16)
errCh := make(chan error, 1) // only need the first error
stopCh := make(chan struct{})
var stopOnce sync.Once
var wg sync.WaitGroup
worker := func() {
defer wg.Done()
for idx := range jobs {
select {
case <-stopCh:
return
default:
}
elemGindex := subtreeRootGindex<<depth + uint64(idx)
htr, err := pc.merkleize(elemInfo, v.Index(idx), elemGindex)
if err != nil {
stopOnce.Do(func() { close(stopCh) })
select {
case errCh <- fmt.Errorf("index %d: %w", idx, err):
default:
}
return
}
chunks[idx] = htr
}
}
wg.Add(workerCount)
for range workerCount {
go worker()
}
// Enqueue jobs; stop early if any worker reports an error.
enqueue:
for i := range length {
select {
case <-stopCh:
break enqueue
case jobs <- i:
}
}
close(jobs)
wg.Wait()
select {
case err := <-errCh:
return [32]byte{}, err
default:
}
}
root := pc.merkleizeVectorAndCollect(chunks, subtreeRootGindex, depth)
return root, nil
}
// merkleizeVector computes the Merkle root of an SSZ vector (fixed-length).
//
// Generalized indices (gindices): currentGindex is the gindex of the vector root; element/chunk gindices are derived
// inside merkleizeVectorBody using leafBase = currentGindex << ssz.Depth(leaves).
//
// Proof collection: merkleizeVectorBody performs element/chunk merkleization and collects required siblings at the
// vector layer; collectLeaf stores the vector root if currentGindex was registered via addTarget.
//
// Parameters:
// - info: SSZ type metadata for the vector.
// - v: reflect.Value of the vector value.
// - currentGindex: generalized index (gindex) of the vector root.
//
// Returns:
// - [32]byte: Merkle root of the vector.
// - error: any error encountered while merkleizing composite elements.
func (pc *proofCollector) merkleizeVector(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
vi, err := info.VectorInfo()
if err != nil {
return [32]byte{}, err
}
length, err := math.Int(vi.Length())
if err != nil {
return [32]byte{}, fmt.Errorf("vector length %d overflows int: %w", vi.Length(), err)
}
elemInfo := vi.element
// Determine the virtual leaf capacity for the vector.
leaves, err := getChunkCount(info)
if err != nil {
return [32]byte{}, err
}
root, err := pc.merkleizeVectorBody(elemInfo, v, length, leaves, currentGindex)
if err != nil {
return [32]byte{}, err
}
// If the vector root itself is the target
pc.collectLeaf(currentGindex, root)
return root, nil
}
// merkleizeList computes the Merkle root of an SSZ list by merkleizing its data subtree and mixing in the length.
//
// Generalized indices (gindices): dataRoot is the left child of the list root (dataRootGindex = currentGindex*2); the length mixin is the right child (currentGindex*2+1).
// Proof collection: merkleizeVectorBody computes the data root (collecting required siblings in the data subtree), and mixinLengthAndCollect collects required siblings at the length-mixin level; collectLeaf stores the list root if registered.
//
// Parameters:
// - info: SSZ type metadata for the list.
// - v: reflect.Value of the list value.
// - currentGindex: generalized index (gindex) of the list root.
//
// Returns:
// - [32]byte: Merkle root of the list.
// - error: any error encountered while merkleizing the data subtree.
func (pc *proofCollector) merkleizeList(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
li, err := info.ListInfo()
if err != nil {
return [32]byte{}, err
}
length := v.Len()
elemInfo := li.element
chunks := make([][32]byte, 2)
// Compute the length hash (little-endian uint256)
binary.LittleEndian.PutUint64(chunks[1][:8], uint64(length))
// Data subtree root is the left child of the list root.
dataRootGindex := currentGindex * 2
// Compute virtual leaf capacity for the data subtree.
leaves, err := getChunkCount(info)
if err != nil {
return [32]byte{}, err
}
chunks[0], err = pc.merkleizeVectorBody(elemInfo, v, length, leaves, dataRootGindex)
if err != nil {
return [32]byte{}, err
}
// Handle the length mixin level (and proof bookkeeping at this level).
// Compute the final list root: hash(dataRoot || lengthHash)
root := pc.mixinLengthAndCollect(currentGindex, chunks)
// If the list root itself is the target
pc.collectLeaf(currentGindex, root)
return root, nil
}
// merkleizeBitvectorBody computes the Merkle root of a bitvector-like byte sequence by packing it into 32-byte chunks
// and merkleizing those chunks as a fixed-capacity vector (padding with trie.ZeroHashes as needed).
//
// Generalized indices (gindices): depth = ssz.Depth(chunkLimit); leafBase = subtreeRootGindex << depth; chunk i uses gindex = leafBase + uint64(i).
// Proof collection: merkleizeVectorAndCollect collects required sibling hashes at the chunk-merkleization layer.
//
// Parameters:
// - data: raw byte sequence representing the bitvector payload.
// - chunkLimit: fixed/limit number of 32-byte chunks (used for padding/Depth).
// - subtreeRootGindex: gindex of the bitvector data subtree root.
//
// Returns:
// - [32]byte: Merkle root of the bitvector data subtree.
// - error: any error encountered while packing data into chunks.
func (pc *proofCollector) merkleizeBitvectorBody(data []byte, chunkLimit uint64, subtreeRootGindex uint64) ([32]byte, error) {
depth := ssz.Depth(chunkLimit)
chunks, err := ssz.PackByChunk([][]byte{data})
if err != nil {
return [32]byte{}, err
}
root := pc.merkleizeVectorAndCollect(chunks, subtreeRootGindex, uint64(depth))
return root, nil
}
// merkleizeBitvector computes the Merkle root of a fixed-length SSZ bitvector and collects proof nodes for targets.
//
// Parameters:
// - info: SSZ type metadata for the bitvector.
// - v: reflect.Value of the bitvector value.
// - currentGindex: generalized index (gindex) of the bitvector root.
//
// Returns:
// - [32]byte: Merkle root of the bitvector.
// - error: any error encountered during packing or merkleization.
func (pc *proofCollector) merkleizeBitvector(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
bitvectorBytes := v.Bytes()
if len(bitvectorBytes) == 0 {
return [32]byte{}, fmt.Errorf("bitvector field is uninitialized (nil or empty slice)")
}
// Compute virtual leaf capacity for the bitvector.
numChunks, err := getChunkCount(info)
if err != nil {
return [32]byte{}, err
}
root, err := pc.merkleizeBitvectorBody(bitvectorBytes, numChunks, currentGindex)
if err != nil {
return [32]byte{}, err
}
pc.collectLeaf(currentGindex, root)
return root, nil
}
// merkleizeBitlist computes the Merkle root of an SSZ bitlist by merkleizing its data chunks and mixing in the bit length.
//
// Generalized indices (gindices): dataRoot is the left child (dataRootGindex = currentGindex*2) and the length mixin is the right child (currentGindex*2+1).
// Proof collection: merkleizeBitvectorBody computes the data root (collecting required siblings under dataRootGindex), and mixinLengthAndCollect collects required siblings at the length-mixin level; collectLeaf stores the bitlist root if registered.
//
// Parameters:
// - info: SSZ type metadata for the bitlist.
// - v: reflect.Value of the bitlist value.
// - currentGindex: generalized index (gindex) of the bitlist root.
//
// Returns:
// - [32]byte: Merkle root of the bitlist.
// - error: any error encountered while merkleizing the data subtree.
func (pc *proofCollector) merkleizeBitlist(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
bi, err := info.BitlistInfo()
if err != nil {
return [32]byte{}, err
}
bitlistBytes := v.Bytes()
// Use go-bitfield to get bytes with termination bit cleared
bl := bitfield.Bitlist(bitlistBytes)
data := bl.BytesNoTrim()
// Get the bit length from bitlistInfo
bitLength := bi.Length()
// Get the chunk limit from getChunkCount
limitChunks, err := getChunkCount(info)
if err != nil {
return [32]byte{}, err
}
chunks := make([][32]byte, 2)
// Compute the length hash (little-endian uint256)
binary.LittleEndian.PutUint64(chunks[1][:8], uint64(bitLength))
dataRootGindex := currentGindex * 2
chunks[0], err = pc.merkleizeBitvectorBody(data, limitChunks, dataRootGindex)
if err != nil {
return [32]byte{}, err
}
// Handle the length mixin level (and proof bookkeeping at this level).
root := pc.mixinLengthAndCollect(currentGindex, chunks)
pc.collectLeaf(currentGindex, root)
return root, nil
}
// merkleizeVectorAndCollect merkleizes a slice of 32-byte leaf nodes into a subtree root, padding to a virtual size of 2^depth.
//
// Generalized indices (gindices): at layer i (0-based), nodes have gindices levelBase = subtreeGeneralizedIndex << (depth-i) and node gindex = levelBase + idx.
// Proof collection: for each layer it calls collectSibling(nodeGindex, nodeHash) and stores only those gindices registered via addTarget.
//
// Parameters:
// - elements: leaf-level hashes (may be shorter than 2^depth; padding is applied with trie.ZeroHashes).
// - subtreeGeneralizedIndex: gindex of the subtree root.
// - depth: number of merkleization layers from subtree root to leaves.
//
// Returns:
// - [32]byte: Merkle root of the subtree.
func (pc *proofCollector) merkleizeVectorAndCollect(elements [][32]byte, subtreeGeneralizedIndex uint64, depth uint64) [32]byte {
// Return zerohash at depth
if len(elements) == 0 {
return trie.ZeroHashes[depth]
}
for i := range depth {
layerLen := len(elements)
oddNodeLength := layerLen%2 == 1
if oddNodeLength {
zerohash := trie.ZeroHashes[i]
elements = append(elements, zerohash)
}
levelBaseGindex := subtreeGeneralizedIndex << (depth - i)
for idx := range elements {
gindex := levelBaseGindex + uint64(idx)
pc.collectSibling(gindex, elements[idx])
pc.collectLeaf(gindex, elements[idx])
}
elements = htr.VectorizedSha256(elements)
}
return elements[0]
}
// mixinLengthAndCollect computes the final mix-in root for list/bitlist values:
//
// root = hash(dataRoot, lengthHash)
//
// where chunks[0] is dataRoot and chunks[1] is the 32-byte length hash.
//
// Generalized indices (gindices): dataRoot is the left child (dataRootGindex = currentGindex*2) and lengthHash is the right child (lengthHashGindex = currentGindex*2+1).
// Proof collection: it calls collectSibling/collectLeaf for both child gindices; the collector stores them only if they were registered via addTarget.
//
// Parameters:
// - currentGindex: gindex of the parent node (list/bitlist root).
// - chunks: two 32-byte nodes: [dataRoot, lengthHash].
//
// Returns:
// - [32]byte: mixed-in Merkle root (or zero value on hashing error).
// - error: any error encountered during hashing.
func (pc *proofCollector) mixinLengthAndCollect(currentGindex uint64, chunks [][32]byte) [32]byte {
dataRoot, lengthHash := chunks[0], chunks[1]
dataRootGindex, lengthHashGindex := currentGindex*2, currentGindex*2+1
pc.collectSibling(dataRootGindex, dataRoot)
pc.collectSibling(lengthHashGindex, lengthHash)
pc.collectLeaf(dataRootGindex, dataRoot)
pc.collectLeaf(lengthHashGindex, lengthHash)
return ssz.MixInLength(dataRoot, lengthHash[:])
}

View File

@@ -1,531 +0,0 @@
package query
import (
"crypto/sha256"
"encoding/binary"
"reflect"
"slices"
"testing"
"github.com/OffchainLabs/go-bitfield"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ssz "github.com/OffchainLabs/prysm/v7/encoding/ssz"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
sszquerypb "github.com/OffchainLabs/prysm/v7/proto/ssz_query/testing"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestProofCollector_New(t *testing.T) {
pc := newProofCollector()
require.NotNil(t, pc)
require.Equal(t, 0, len(pc.requiredSiblings))
require.Equal(t, 0, len(pc.requiredLeaves))
require.Equal(t, 0, len(pc.siblings))
require.Equal(t, 0, len(pc.leaves))
}
func TestProofCollector_Reset(t *testing.T) {
pc := newProofCollector()
pc.requiredSiblings[3] = struct{}{}
pc.requiredLeaves[5] = struct{}{}
pc.siblings[3] = [32]byte{1}
pc.leaves[5] = [32]byte{2}
pc.reset()
require.Equal(t, 0, len(pc.requiredSiblings))
require.Equal(t, 0, len(pc.requiredLeaves))
require.Equal(t, 0, len(pc.siblings))
require.Equal(t, 0, len(pc.leaves))
}
func TestProofCollector_AddTarget(t *testing.T) {
pc := newProofCollector()
pc.addTarget(5)
_, hasLeaf := pc.requiredLeaves[5]
_, hasSibling4 := pc.requiredSiblings[4]
_, hasSibling3 := pc.requiredSiblings[3]
_, hasSibling1 := pc.requiredSiblings[1] // GI 1 is the root
require.Equal(t, true, hasLeaf)
require.Equal(t, true, hasSibling4)
require.Equal(t, true, hasSibling3)
require.Equal(t, false, hasSibling1)
}
func TestProofCollector_ToProof(t *testing.T) {
pc := newProofCollector()
pc.addTarget(5)
leaf := [32]byte{9}
sibling4 := [32]byte{4}
sibling3 := [32]byte{3}
pc.collectLeaf(5, leaf)
pc.collectSibling(4, sibling4)
pc.collectSibling(3, sibling3)
proof, err := pc.toProof()
require.NoError(t, err)
require.Equal(t, 5, proof.Index)
require.DeepEqual(t, leaf[:], proof.Leaf)
require.Equal(t, 2, len(proof.Hashes))
require.DeepEqual(t, sibling4[:], proof.Hashes[0])
require.DeepEqual(t, sibling3[:], proof.Hashes[1])
}
func TestProofCollector_ToProof_NoLeaves(t *testing.T) {
pc := newProofCollector()
_, err := pc.toProof()
require.NotNil(t, err)
}
func TestProofCollector_CollectLeaf(t *testing.T) {
pc := newProofCollector()
leaf := [32]byte{7}
pc.collectLeaf(10, leaf)
require.Equal(t, 0, len(pc.leaves))
pc.addTarget(10)
pc.collectLeaf(10, leaf)
stored, ok := pc.leaves[10]
require.Equal(t, true, ok)
require.Equal(t, leaf, stored)
}
func TestProofCollector_CollectSibling(t *testing.T) {
pc := newProofCollector()
hash := [32]byte{5}
pc.collectSibling(4, hash)
require.Equal(t, 0, len(pc.siblings))
pc.addTarget(5)
pc.collectSibling(4, hash)
stored, ok := pc.siblings[4]
require.Equal(t, true, ok)
require.Equal(t, hash, stored)
}
func TestProofCollector_Merkleize_BasicTypes(t *testing.T) {
testCases := []struct {
name string
sszType SSZType
value any
expected [32]byte
}{
{
name: "uint8",
sszType: Uint8,
value: uint8(0x11),
expected: func() [32]byte {
var leaf [32]byte
leaf[0] = 0x11
return leaf
}(),
},
{
name: "uint16",
sszType: Uint16,
value: uint16(0x2211),
expected: func() [32]byte {
var leaf [32]byte
binary.LittleEndian.PutUint16(leaf[:2], 0x2211)
return leaf
}(),
},
{
name: "uint32",
sszType: Uint32,
value: uint32(0x44332211),
expected: func() [32]byte {
var leaf [32]byte
binary.LittleEndian.PutUint32(leaf[:4], 0x44332211)
return leaf
}(),
},
{
name: "uint64",
sszType: Uint64,
value: uint64(0x8877665544332211),
expected: func() [32]byte {
var leaf [32]byte
binary.LittleEndian.PutUint64(leaf[:8], 0x8877665544332211)
return leaf
}(),
},
{
name: "bool",
sszType: Boolean,
value: true,
expected: func() [32]byte {
var leaf [32]byte
leaf[0] = 1
return leaf
}(),
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
pc := newProofCollector()
gindex := uint64(3)
pc.addTarget(gindex)
leaf, err := pc.merkleizeBasicType(tc.sszType, reflect.ValueOf(tc.value), gindex)
require.NoError(t, err)
require.Equal(t, tc.expected, leaf)
stored, ok := pc.leaves[gindex]
require.Equal(t, true, ok)
require.Equal(t, tc.expected, stored)
})
}
}
func TestProofCollector_Merkleize_Container(t *testing.T) {
container := makeFixedTestContainer()
info, err := AnalyzeObject(container)
require.NoError(t, err)
pc := newProofCollector()
pc.addTarget(1)
root, err := pc.merkleize(info, reflect.ValueOf(container), 1)
require.NoError(t, err)
expected, err := container.HashTreeRoot()
require.NoError(t, err)
require.Equal(t, expected, root)
stored, ok := pc.leaves[1]
require.Equal(t, true, ok)
require.Equal(t, expected, stored)
}
func TestProofCollector_Merkleize_Vector(t *testing.T) {
container := makeFixedTestContainer()
info, err := AnalyzeObject(container)
require.NoError(t, err)
ci, err := info.ContainerInfo()
require.NoError(t, err)
field := ci.fields["vector_field"]
pc := newProofCollector()
root, err := pc.merkleizeVector(field.sszInfo, reflect.ValueOf(container.VectorField), 1)
require.NoError(t, err)
serialized := make([][]byte, len(container.VectorField))
for i, v := range container.VectorField {
buf := make([]byte, 8)
binary.LittleEndian.PutUint64(buf, v)
serialized[i] = buf
}
chunks, err := ssz.PackByChunk(serialized)
require.NoError(t, err)
limit, err := getChunkCount(field.sszInfo)
require.NoError(t, err)
expected := ssz.MerkleizeVector(chunks, limit)
require.Equal(t, expected, root)
}
func TestProofCollector_Merkleize_List(t *testing.T) {
list := []*sszquerypb.FixedNestedContainer{
makeFixedNestedContainer(1),
makeFixedNestedContainer(2),
}
container := makeVariableTestContainer(list, bitfield.NewBitlist(1))
info, err := AnalyzeObject(container)
require.NoError(t, err)
ci, err := info.ContainerInfo()
require.NoError(t, err)
field := ci.fields["field_list_container"]
pc := newProofCollector()
root, err := pc.merkleizeList(field.sszInfo, reflect.ValueOf(list), 1)
require.NoError(t, err)
listInfo, err := field.sszInfo.ListInfo()
require.NoError(t, err)
expected, err := ssz.MerkleizeListSSZ(list, listInfo.Limit())
require.NoError(t, err)
require.Equal(t, expected, root)
}
func TestProofCollector_Merkleize_Bitvector(t *testing.T) {
container := makeFixedTestContainer()
info, err := AnalyzeObject(container)
require.NoError(t, err)
ci, err := info.ContainerInfo()
require.NoError(t, err)
field := ci.fields["bitvector64_field"]
pc := newProofCollector()
root, err := pc.merkleizeBitvector(field.sszInfo, reflect.ValueOf(container.Bitvector64Field), 1)
require.NoError(t, err)
expected, err := ssz.MerkleizeByteSliceSSZ([]byte(container.Bitvector64Field))
require.NoError(t, err)
require.Equal(t, expected, root)
}
func TestProofCollector_Merkleize_Bitlist(t *testing.T) {
bitlist := bitfield.NewBitlist(16)
bitlist.SetBitAt(3, true)
bitlist.SetBitAt(8, true)
container := makeVariableTestContainer(nil, bitlist)
info, err := AnalyzeObject(container)
require.NoError(t, err)
ci, err := info.ContainerInfo()
require.NoError(t, err)
field := ci.fields["bitlist_field"]
pc := newProofCollector()
root, err := pc.merkleizeBitlist(field.sszInfo, reflect.ValueOf(container.BitlistField), 1)
require.NoError(t, err)
bitlistInfo, err := field.sszInfo.BitlistInfo()
require.NoError(t, err)
expected, err := ssz.BitlistRoot(bitfield.Bitlist(bitlist), bitlistInfo.Limit())
require.NoError(t, err)
require.Equal(t, expected, root)
}
func TestProofCollector_MerkleizeVectorBody_Basic(t *testing.T) {
container := makeFixedTestContainer()
info, err := AnalyzeObject(container)
require.NoError(t, err)
ci, err := info.ContainerInfo()
require.NoError(t, err)
field := ci.fields["vector_field"]
vectorInfo, err := field.sszInfo.VectorInfo()
require.NoError(t, err)
length := len(container.VectorField)
limit, err := getChunkCount(field.sszInfo)
require.NoError(t, err)
pc := newProofCollector()
root, err := pc.merkleizeVectorBody(vectorInfo.element, reflect.ValueOf(container.VectorField), length, limit, 2)
require.NoError(t, err)
serialized := make([][]byte, len(container.VectorField))
for i, v := range container.VectorField {
buf := make([]byte, 8)
binary.LittleEndian.PutUint64(buf, v)
serialized[i] = buf
}
chunks, err := ssz.PackByChunk(serialized)
require.NoError(t, err)
expected := ssz.MerkleizeVector(chunks, limit)
require.Equal(t, expected, root)
}
func TestProofCollector_MerkleizeVectorAndCollect(t *testing.T) {
pc := newProofCollector()
pc.addTarget(6)
elements := [][32]byte{{1}, {2}}
expected := ssz.MerkleizeVector(slices.Clone(elements), 2)
root := pc.merkleizeVectorAndCollect(elements, 3, 1)
storedLeaf, hasLeaf := pc.leaves[6]
storedSibling, hasSibling := pc.siblings[7]
require.Equal(t, true, hasLeaf)
require.Equal(t, true, hasSibling)
require.Equal(t, elements[0], storedLeaf)
require.Equal(t, elements[1], storedSibling)
require.Equal(t, expected, root)
}
func TestProofCollector_MixinLengthAndCollect(t *testing.T) {
list := []*sszquerypb.FixedNestedContainer{
makeFixedNestedContainer(1),
makeFixedNestedContainer(2),
}
container := makeVariableTestContainer(list, bitfield.NewBitlist(1))
info, err := AnalyzeObject(container)
require.NoError(t, err)
ci, err := info.ContainerInfo()
require.NoError(t, err)
field := ci.fields["field_list_container"]
// Target gindex 2 (data root) - sibling at gindex 3 (length hash) should be collected
pc := newProofCollector()
pc.addTarget(2)
root, err := pc.merkleizeList(field.sszInfo, reflect.ValueOf(list), 1)
require.NoError(t, err)
listInfo, err := field.sszInfo.ListInfo()
require.NoError(t, err)
expected, err := ssz.MerkleizeListSSZ(list, listInfo.Limit())
require.NoError(t, err)
require.Equal(t, expected, root)
// Verify data root is collected as leaf at gindex 2
storedLeaf, hasLeaf := pc.leaves[2]
require.Equal(t, true, hasLeaf)
// Verify length hash is collected as sibling at gindex 3
storedSibling, hasSibling := pc.siblings[3]
require.Equal(t, true, hasSibling)
// Verify the root is hash(dataRoot || lengthHash)
expectedBuf := append(storedLeaf[:], storedSibling[:]...)
expectedRoot := sha256.Sum256(expectedBuf)
require.Equal(t, expectedRoot, root)
}
func BenchmarkOptimizedValidatorRoots(b *testing.B) {
validators := make([]*ethpb.Validator, 1000)
for i := range validators {
validators[i] = makeTestValidator(i)
}
b.ResetTimer()
for b.Loop() {
_, err := stateutil.OptimizedValidatorRoots(validators)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkProofCollectorMerkleize(b *testing.B) {
validators := make([]*ethpb.Validator, 1000)
for i := range validators {
validators[i] = makeTestValidator(i)
}
info, err := AnalyzeObject(validators[0])
require.NoError(b, err)
b.ResetTimer()
for b.Loop() {
for _, val := range validators {
pc := newProofCollector()
v := reflect.ValueOf(val)
_, err := pc.merkleize(info, v, 1)
if err != nil {
b.Fatal(err)
}
}
}
}
func makeTestValidator(i int) *ethpb.Validator {
pubkey := make([]byte, 48)
for j := range pubkey {
pubkey[j] = byte(i + j)
}
withdrawalCredentials := make([]byte, 32)
for j := range withdrawalCredentials {
withdrawalCredentials[j] = byte(255 - ((i + j) % 256))
}
return &ethpb.Validator{
PublicKey: pubkey,
WithdrawalCredentials: withdrawalCredentials,
EffectiveBalance: uint64(32000000000 + i),
Slashed: i%2 == 0,
ActivationEligibilityEpoch: primitives.Epoch(i),
ActivationEpoch: primitives.Epoch(i + 1),
ExitEpoch: primitives.Epoch(i + 2),
WithdrawableEpoch: primitives.Epoch(i + 3),
}
}
func makeFixedNestedContainer(value uint64) *sszquerypb.FixedNestedContainer {
value2 := make([]byte, 32)
for i := range value2 {
value2[i] = byte(i)
}
return &sszquerypb.FixedNestedContainer{
Value1: value,
Value2: value2,
}
}
func makeFixedTestContainer() *sszquerypb.FixedTestContainer {
fieldBytes32 := make([]byte, 32)
for i := range fieldBytes32 {
fieldBytes32[i] = byte(i)
}
vectorField := make([]uint64, 24)
for i := range vectorField {
vectorField[i] = uint64(i)
}
rows := make([][]byte, 5)
for i := range rows {
row := make([]byte, 32)
for j := range row {
row[j] = byte(i) + byte(j)
}
rows[i] = row
}
bitvector64 := bitfield.NewBitvector64()
bitvector64.SetBitAt(1, true)
bitvector512 := bitfield.NewBitvector512()
bitvector512.SetBitAt(10, true)
trailing := make([]byte, 56)
for i := range trailing {
trailing[i] = byte(i)
}
return &sszquerypb.FixedTestContainer{
FieldUint32: 1,
FieldUint64: 2,
FieldBool: true,
FieldBytes32: fieldBytes32,
Nested: makeFixedNestedContainer(3),
VectorField: vectorField,
TwoDimensionBytesField: rows,
Bitvector64Field: bitvector64,
Bitvector512Field: bitvector512,
TrailingField: trailing,
}
}
func makeVariableTestContainer(list []*sszquerypb.FixedNestedContainer, bitlist bitfield.Bitlist) *sszquerypb.VariableTestContainer {
leading := make([]byte, 32)
for i := range leading {
leading[i] = byte(i)
}
trailing := make([]byte, 56)
for i := range trailing {
trailing[i] = byte(255 - i)
}
if bitlist == nil {
bitlist = bitfield.NewBitlist(0)
}
return &sszquerypb.VariableTestContainer{
LeadingField: leading,
FieldListContainer: list,
BitlistField: bitlist,
TrailingField: trailing,
}
}

View File

@@ -389,7 +389,6 @@ func TestHashTreeRoot(t *testing.T) {
require.NoError(t, err, "HashTreeRoot should not return an error")
expectedHashTreeRoot, err := tt.obj.HashTreeRoot()
require.NoError(t, err, "HashTreeRoot on original object should not return an error")
// Verify the Merkle tree root matches with the SSZ generated HashTreeRoot
require.Equal(t, expectedHashTreeRoot, hashTreeRoot, "HashTreeRoot from sszInfo should match original object's HashTreeRoot")
})
}

View File

@@ -103,7 +103,7 @@ func ConfigureEphemeralLogFile(datadirPath string, app string) error {
AllowedLevels: logrus.AllLevels[:ephemeralLogFileVerbosity+1],
})
logrus.WithField("path", logFilePath).Debug("Ephemeral log file initialized")
logrus.Debug("Ephemeral log file initialized")
return nil
}

View File

@@ -97,7 +97,6 @@ go_test(
"endtoend_setup_test.go",
"endtoend_test.go",
"minimal_e2e_test.go",
"minimal_hdiff_e2e_test.go",
"minimal_slashing_e2e_test.go",
"slasher_simulator_e2e_test.go",
],

View File

@@ -1,16 +0,0 @@
package endtoend
import (
"testing"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/testing/endtoend/types"
)
func TestEndToEnd_MinimalConfig_WithStateDiff(t *testing.T) {
r := e2eMinimal(t, types.InitForkCfg(version.Bellatrix, version.Electra, params.E2ETestConfig()),
types.WithStateDiff(),
)
r.run()
}

View File

@@ -76,15 +76,6 @@ func WithSSZOnly() E2EConfigOpt {
}
}
func WithStateDiff() E2EConfigOpt {
return func(cfg *E2EConfig) {
cfg.BeaconFlags = append(cfg.BeaconFlags,
"--enable-state-diff",
"--state-diff-exponents=6,5", // Small exponents for quick testing
)
}
}
// WithExitEpoch sets a custom epoch for voluntary exit submission.
// This affects ProposeVoluntaryExit, ValidatorsHaveExited, SubmitWithdrawal, and ValidatorsHaveWithdrawn evaluators.
func WithExitEpoch(e primitives.Epoch) E2EConfigOpt {

View File

@@ -1,7 +1,7 @@
diff -urN a/BUILD.bazel b/BUILD.bazel
--- a/BUILD.bazel 1969-12-31 18:00:00.000000000 -0600
+++ b/BUILD.bazel 2025-01-05 12:00:00.000000000 -0600
@@ -0,0 +1,90 @@
@@ -0,0 +1,89 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
@@ -32,7 +32,6 @@ diff -urN a/BUILD.bazel b/BUILD.bazel
+ ],
+ "@io_bazel_rules_go//go/platform:darwin_amd64": [
+ "bindings_darwin_amd64.go",
+ "wrapper_darwin_amd64.s",
+ ],
+ "//conditions:default": [],
+ }),

View File

@@ -66,7 +66,7 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot primitives
// As specified in spec, an aggregator should wait until two thirds of the way through slot
// to broadcast the best aggregate to the global aggregate channel.
// https://github.com/ethereum/consensus-specs/blob/v0.9.3/specs/validator/0_beacon-chain-validator.md#broadcast-aggregate
v.waitUntilAggregateDue(ctx, slot)
v.waitToSlotTwoThirds(ctx, slot)
// In a DV setup, selection proofs need to be agreed upon by the DV.
// Checking for selection proofs at slot 0 of the epoch will result in an error, as the call to the DV executes slower than the start of this function.
@@ -203,18 +203,11 @@ func (v *validator) signSlotWithSelectionProof(ctx context.Context, pubKey [fiel
return sig.Marshal(), nil
}
// waitUntilAggregateDue waits until the configured aggregation due time within the current slot
// such that any attestations from this slot have time to reach the beacon node before creating
// the aggregated attestation.
//
// Note: Historically this was ~2/3 of the slot, but may differ across forks (e.g. Gloas).
func (v *validator) waitUntilAggregateDue(ctx context.Context, slot primitives.Slot) {
cfg := params.BeaconConfig()
component := cfg.AggregateDueBPS
if slots.ToEpoch(slot) >= cfg.GloasForkEpoch {
component = cfg.AggregateDueBPSGloas
}
v.waitUntilSlotComponent(ctx, slot, component)
// waitToSlotTwoThirds waits until two third through the current slot period
// such that any attestations from this slot have time to reach the beacon node
// before creating the aggregated attestation.
func (v *validator) waitToSlotTwoThirds(ctx context.Context, slot primitives.Slot) {
v.waitUntilSlotComponent(ctx, slot, params.BeaconConfig().AggregateDueBPS)
}
// This returns the signature of validator signing over aggregate and

View File

@@ -260,7 +260,7 @@ func TestWaitForSlotTwoThird_WaitCorrectly(t *testing.T) {
timeToSleep := params.BeaconConfig().SlotComponentDuration(params.BeaconConfig().AggregateDueBPS)
twoThirdTime := currentTime.Add(timeToSleep)
validator.waitUntilAggregateDue(t.Context(), numOfSlots)
validator.waitToSlotTwoThirds(t.Context(), numOfSlots)
currentTime = time.Now()
assert.Equal(t, twoThirdTime.Unix(), currentTime.Unix())
})
@@ -280,7 +280,7 @@ func TestWaitForSlotTwoThird_DoneContext_ReturnsImmediately(t *testing.T) {
expectedTime := time.Now()
ctx, cancel := context.WithCancel(t.Context())
cancel()
validator.waitUntilAggregateDue(ctx, numOfSlots)
validator.waitToSlotTwoThirds(ctx, numOfSlots)
currentTime = time.Now()
assert.Equal(t, expectedTime.Unix(), currentTime.Unix())
})

View File

@@ -37,7 +37,7 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot primitives.Slot,
defer span.End()
span.SetAttributes(trace.StringAttribute("validator", fmt.Sprintf("%#x", pubKey)))
v.waitUntilAttestationDueOrValidBlock(ctx, slot)
v.waitOneThirdOrValidBlock(ctx, slot)
var b strings.Builder
if err := b.WriteByte(byte(iface.RoleAttester)); err != nil {
@@ -259,12 +259,12 @@ func (v *validator) setHighestSlot(slot primitives.Slot) {
}
}
// waitUntilAttestationDueOrValidBlock waits until (a) or (b) whichever comes first:
// waitOneThirdOrValidBlock waits until (a) or (b) whichever comes first:
//
// (a) the validator has received a valid block that is the same slot as input slot
// (b) the configured attestation due time has transpired (as basis points of the slot duration)
func (v *validator) waitUntilAttestationDueOrValidBlock(ctx context.Context, slot primitives.Slot) {
ctx, span := trace.StartSpan(ctx, "validator.waitUntilAttestationDueOrValidBlock")
// (b) one-third of the slot has transpired (SECONDS_PER_SLOT / 3 seconds after the start of slot)
func (v *validator) waitOneThirdOrValidBlock(ctx context.Context, slot primitives.Slot) {
ctx, span := trace.StartSpan(ctx, "validator.waitOneThirdOrValidBlock")
defer span.End()
// Don't need to wait if requested slot is the same as highest valid slot.
@@ -272,12 +272,7 @@ func (v *validator) waitUntilAttestationDueOrValidBlock(ctx context.Context, slo
return
}
cfg := params.BeaconConfig()
component := cfg.AttestationDueBPS
if slots.ToEpoch(slot) >= cfg.GloasForkEpoch {
component = cfg.AttestationDueBPSGloas
}
finalTime, err := v.slotComponentDeadline(slot, component)
finalTime, err := v.slotComponentDeadline(slot, params.BeaconConfig().AttestationDueBPS)
if err != nil {
log.WithError(err).WithField("slot", slot).Error("Slot overflows, unable to wait for attestation deadline")
return

View File

@@ -706,7 +706,7 @@ func TestServer_WaitToSlotOneThird_CanWait(t *testing.T) {
timeToSleep := params.BeaconConfig().SecondsPerSlot / 3
oneThird := currentTime.Add(time.Duration(timeToSleep) * time.Second)
v.waitUntilAttestationDueOrValidBlock(t.Context(), currentSlot)
v.waitOneThirdOrValidBlock(t.Context(), currentSlot)
if oneThird.Sub(time.Now()) > 10*time.Millisecond { // Allow for small diff due to execution time.
t.Errorf("Wanted %s time for slot one third but got %s", oneThird, currentTime)
@@ -724,7 +724,7 @@ func TestServer_WaitToSlotOneThird_SameReqSlot(t *testing.T) {
highestValidSlot: currentSlot,
}
v.waitUntilAttestationDueOrValidBlock(t.Context(), currentSlot)
v.waitOneThirdOrValidBlock(t.Context(), currentSlot)
if currentTime.Sub(time.Now()) > 10*time.Millisecond { // Allow for small diff due to execution time.
t.Errorf("Wanted %s time for slot one third but got %s", time.Now(), currentTime)
@@ -750,7 +750,7 @@ func TestServer_WaitToSlotOneThird_ReceiveBlockSlot(t *testing.T) {
v.slotFeed.Send(currentSlot)
})
v.waitUntilAttestationDueOrValidBlock(t.Context(), currentSlot)
v.waitOneThirdOrValidBlock(t.Context(), currentSlot)
if currentTime.Sub(time.Now()) > 10*time.Millisecond { // Allow for small diff due to execution time.
t.Errorf("Wanted %s time for slot one third but got %s", time.Now(), currentTime)

View File

@@ -285,10 +285,10 @@ func (c *grpcValidatorClient) StartEventStream(ctx context.Context, topics []str
ctx, span := trace.StartSpan(ctx, "validator.gRPCClient.StartEventStream")
defer span.End()
if len(topics) == 0 {
eventClient.PublishEvent(eventsChannel, &eventClient.Event{
eventsChannel <- &eventClient.Event{
EventType: eventClient.EventError,
Data: []byte(errors.New("no topics were added").Error()),
})
}
return
}
// TODO(13563): ONLY WORKS WITH HEAD TOPIC.
@@ -299,10 +299,10 @@ func (c *grpcValidatorClient) StartEventStream(ctx context.Context, topics []str
}
}
if !containsHead {
eventClient.PublishEvent(eventsChannel, &eventClient.Event{
eventsChannel <- &eventClient.Event{
EventType: eventClient.EventConnectionError,
Data: []byte(errors.Wrap(client.ErrConnectionIssue, "gRPC only supports the head topic, and head topic was not passed").Error()),
})
}
}
if containsHead && len(topics) > 1 {
log.Warn("gRPC only supports the head topic, other topics will be ignored")
@@ -310,10 +310,10 @@ func (c *grpcValidatorClient) StartEventStream(ctx context.Context, topics []str
stream, err := c.beaconNodeValidatorClient.StreamSlots(ctx, &ethpb.StreamSlotsRequest{VerifiedOnly: true})
if err != nil {
eventClient.PublishEvent(eventsChannel, &eventClient.Event{
eventsChannel <- &eventClient.Event{
EventType: eventClient.EventConnectionError,
Data: []byte(errors.Wrap(client.ErrConnectionIssue, err.Error()).Error()),
})
}
return
}
c.isEventStreamRunning = true
@@ -327,25 +327,25 @@ func (c *grpcValidatorClient) StartEventStream(ctx context.Context, topics []str
if ctx.Err() != nil {
c.isEventStreamRunning = false
if errors.Is(ctx.Err(), context.Canceled) {
eventClient.PublishEvent(eventsChannel, &eventClient.Event{
eventsChannel <- &eventClient.Event{
EventType: eventClient.EventConnectionError,
Data: []byte(errors.Wrap(client.ErrConnectionIssue, ctx.Err().Error()).Error()),
})
}
return
}
eventClient.PublishEvent(eventsChannel, &eventClient.Event{
eventsChannel <- &eventClient.Event{
EventType: eventClient.EventError,
Data: []byte(ctx.Err().Error()),
})
}
return
}
res, err := stream.Recv()
if err != nil {
c.isEventStreamRunning = false
eventClient.PublishEvent(eventsChannel, &eventClient.Event{
eventsChannel <- &eventClient.Event{
EventType: eventClient.EventConnectionError,
Data: []byte(errors.Wrap(client.ErrConnectionIssue, err.Error()).Error()),
})
}
return
}
if res == nil {
@@ -357,15 +357,15 @@ func (c *grpcValidatorClient) StartEventStream(ctx context.Context, topics []str
CurrentDutyDependentRoot: hexutil.Encode(res.CurrentDutyDependentRoot),
})
if err != nil {
eventClient.PublishEvent(eventsChannel, &eventClient.Event{
eventsChannel <- &eventClient.Event{
EventType: eventClient.EventError,
Data: []byte(errors.Wrap(err, "failed to marshal Head Event").Error()),
})
}
}
eventClient.PublishEvent(eventsChannel, &eventClient.Event{
eventsChannel <- &eventClient.Event{
EventType: eventClient.EventHead,
Data: b,
})
}
}
}
}

View File

@@ -223,7 +223,7 @@ func TestStartEventStream(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
eventsChannel := make(chan *eventClient.Event, 4) // Buffer to prevent blocking
eventsChannel := make(chan *eventClient.Event, 1) // Buffer to prevent blocking
tc.prepare() // Setup mock expectations
go grpcClient.StartEventStream(ctx, tc.topics, eventsChannel)

View File

@@ -441,6 +441,7 @@ func TestRunnerPushesProposerSettings_ValidContext(t *testing.T) {
defer assertValidContext(t, timedCtx, ctx)
delay(t)
})
vcm.EXPECT().EventStreamIsRunning().Return(true).AnyTimes().Do(func() { delay(t) })
vcm.EXPECT().SubmitValidatorRegistrations(liveCtx, gomock.Any()).Do(func(ctx context.Context, _ any) {
defer assertValidContext(t, timedCtx, ctx) // This is the specific regression test assertion for PR 15369.
delay(t)

View File

@@ -66,8 +66,6 @@ type ValidatorService struct {
closeClientFunc func() // validator client stop function is used here
}
const eventChannelBufferSize = 32
// Config for the validator service.
type Config struct {
Validator iface.Validator
@@ -236,7 +234,7 @@ func (v *ValidatorService) Start() {
distributed: v.distributed,
disableDutiesPolling: v.disableDutiesPolling,
accountsChangedChannel: make(chan [][fieldparams.BLSPubkeyLength]byte, 1),
eventsChannel: make(chan *eventClient.Event, eventChannelBufferSize),
eventsChannel: make(chan *eventClient.Event, 1),
}
hm := newHealthMonitor(v.ctx, v.cancel, v.maxHealthChecks, v.validator)

View File

@@ -29,7 +29,7 @@ func (v *validator) SubmitSyncCommitteeMessage(ctx context.Context, slot primiti
defer span.End()
span.SetAttributes(trace.StringAttribute("validator", fmt.Sprintf("%#x", pubKey)))
v.waitUntilAttestationDueOrValidBlock(ctx, slot)
v.waitOneThirdOrValidBlock(ctx, slot)
res, err := v.validatorClient.SyncMessageBlockRoot(ctx, &emptypb.Empty{})
if err != nil {
@@ -127,12 +127,7 @@ func (v *validator) SubmitSignedContributionAndProof(ctx context.Context, slot p
return
}
cfg := params.BeaconConfig()
component := cfg.ContributionDueBPS
if slots.ToEpoch(slot) >= cfg.GloasForkEpoch {
component = cfg.ContributionDueBPSGloas
}
v.waitUntilSlotComponent(ctx, slot, component)
v.waitUntilSlotComponent(ctx, slot, params.BeaconConfig().ContributionDueBPS)
coveredSubnets := make(map[uint64]bool)
for i, comIdx := range indexRes.Indices {

View File

@@ -64,11 +64,6 @@ var (
msgNoKeysFetched = "No validating keys fetched. Waiting for keys..."
)
const (
eventStreamStopped uint32 = iota
eventStreamRunning
)
type validator struct {
logValidatorPerformance bool
distributed bool
@@ -87,7 +82,6 @@ type validator struct {
cachedAttestationData *ethpb.AttestationData
accountsChangedChannel chan [][fieldparams.BLSPubkeyLength]byte
eventsChannel chan *eventClient.Event
eventStreamState atomic.Uint32
highestValidSlot primitives.Slot
submittedAggregates map[submittedAttKey]*submittedAtt
graffitiStruct *graffiti.Graffiti
@@ -1217,40 +1211,12 @@ func (v *validator) PushProposerSettings(ctx context.Context, slot primitives.Sl
}
func (v *validator) StartEventStream(ctx context.Context, topics []string) {
if !v.eventStreamState.CompareAndSwap(eventStreamStopped, eventStreamRunning) {
if v.EventStreamIsRunning() {
log.Debug("EventStream is already running")
return
}
log.WithField("topics", topics).Info("Starting event stream")
go v.runEventStream(ctx, topics)
}
func (v *validator) runEventStream(ctx context.Context, topics []string) {
defer v.eventStreamState.Store(eventStreamStopped)
backoff := time.Second
const maxBackoff = 30 * time.Second
for {
v.validatorClient.StartEventStream(ctx, topics, v.eventsChannel)
if ctx.Err() != nil {
return
}
log.WithField("retryIn", backoff).Warn("Event stream ended unexpectedly, attempting to resubscribe")
timer := time.NewTimer(backoff)
select {
case <-ctx.Done():
timer.Stop()
return
case <-timer.C:
}
if backoff < maxBackoff {
backoff *= 2
if backoff > maxBackoff {
backoff = maxBackoff
}
}
}
v.validatorClient.StartEventStream(ctx, topics, v.eventsChannel)
}
func (v *validator) checkDependentRoots(ctx context.Context, head *structs.HeadEvent) error {
@@ -1337,7 +1303,7 @@ func (v *validator) ProcessEvent(ctx context.Context, event *eventClient.Event)
}
func (v *validator) EventStreamIsRunning() bool {
return v.eventStreamState.Load() == eventStreamRunning
return v.validatorClient.EventStreamIsRunning()
}
func (v *validator) Host() string {

View File

@@ -51,20 +51,12 @@ func (v *validator) slotComponentSpanName(component primitives.BP) string {
switch component {
case cfg.AttestationDueBPS:
return "validator.waitAttestationWindow"
case cfg.AttestationDueBPSGloas:
return "validator.waitAttestationWindow"
case cfg.AggregateDueBPS:
return "validator.waitAggregateWindow"
case cfg.AggregateDueBPSGloas:
return "validator.waitAggregateWindow"
case cfg.SyncMessageDueBPS:
return "validator.waitSyncMessageWindow"
case cfg.SyncMessageDueBPSGloas:
return "validator.waitSyncMessageWindow"
case cfg.ContributionDueBPS:
return "validator.waitContributionWindow"
case cfg.ContributionDueBPSGloas:
return "validator.waitContributionWindow"
case cfg.ProposerReorgCutoffBPS:
return "validator.waitProposerReorgWindow"
default: