Compare commits

..

2 Commits

Author SHA1 Message Date
Victor Farazdagi
1c65c8866a fix: bazel build failure on macOS (hashtree patch) (#16281)
**What type of PR is this?**


Bug fix

**What does this PR do? Why is it needed?**
  
It appears that #16216 introduced hashtree integration but broke builds
on macOS Intel (darwin_amd64).

```                                                                                                                                                                                                                                                                          
  Error:                                                                                                                                                                                                                                                                  
  Undefined symbols for architecture x86_64:                                                                                                                                                                                                                              
    "_github.com/OffchainLabs/hashtree.HashtreeHash"                                                                                                                                                                                                                      
```

The Bazel patch for hashtree was missing `wrapper_darwin_amd64.s`. So,
`//go:noescape` in `bindings.go` assumes that symbols are available
elsewhere, and while on other platforms optimized version is used, on
Darwin we have stub (symbol still must be available), which needs to be
referenced -- hence, this PR.

**Other notes for review**

I've re-checked using `bazel clean && bazel build //cmd/beacon-chain` --
it was failing before, works now.

cc @potuz as original patch author

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2026-01-27 16:40:29 +00:00
terence
14a4b97d57 gloas: add duty timing BPS and fork epoch gating (#16291)
This PR adds gloas specific time basis points for validator duties
(attestation, aggregation, and sync contribution) and wires the
validator client to use them once `GloasForkEpoch` is active. For now,
`GloasForkEpoch` is set to far-future so behavior is unchanged until the
fork epoch is configured

Note to the rev: today sync message duty uses `waitOneThirdOrValidBlock`
which shares with attestation dead line, that's why sync message bps is
not used
2026-01-27 15:29:37 +00:00
17 changed files with 107 additions and 63 deletions

View File

@@ -114,24 +114,17 @@ func payloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot pr
}
committeesPerSlot := helpers.SlotCommitteeCount(activeCount)
out := make([]primitives.ValidatorIndex, 0, activeCount/uint64(params.BeaconConfig().SlotsPerEpoch))
selected := make([]primitives.ValidatorIndex, 0, fieldparams.PTCSize)
for i := primitives.CommitteeIndex(0); i < primitives.CommitteeIndex(committeesPerSlot); i++ {
if uint64(len(selected)) >= fieldparams.PTCSize {
break
}
committee, err := helpers.BeaconCommitteeFromState(ctx, st, slot, i)
if err != nil {
return nil, errors.Wrapf(err, "failed to get beacon committee %d", i)
}
if err := selectByBalanceFill(ctx, st, committee, seed, &selected); err != nil {
return nil, errors.Wrapf(err, "failed to sample beacon committee %d", i)
}
out = append(out, committee...)
}
return selected, nil
return selectByBalance(ctx, st, out, seed, fieldparams.PTCSize)
}
// ptcSeed computes the seed for the payload timeliness committee.
@@ -155,16 +148,9 @@ func ptcSeed(st state.ReadOnlyBeaconState, epoch primitives.Epoch, slot primitiv
// if compute_balance_weighted_acceptance(state, indices[next], seed, i):
// selected.append(indices[next])
// i += 1
func selectByBalanceFill(
ctx context.Context,
st state.ReadOnlyBeaconState,
candidates []primitives.ValidatorIndex,
seed [32]byte,
selected *[]primitives.ValidatorIndex,
) error {
need := fieldparams.PTCSize - uint64(len(*selected))
if len(candidates) == 0 || need == 0 {
return nil
func selectByBalance(ctx context.Context, st state.ReadOnlyBeaconState, candidates []primitives.ValidatorIndex, seed [32]byte, count uint64) ([]primitives.ValidatorIndex, error) {
if len(candidates) == 0 {
return nil, errors.New("no candidates for balance weighted selection")
}
hashFunc := hash.CustomSHA256Hasher()
@@ -173,26 +159,22 @@ func selectByBalanceFill(
copy(buf[:], seed[:])
maxBalance := params.BeaconConfig().MaxEffectiveBalanceElectra
out := *selected
selected := make([]primitives.ValidatorIndex, 0, count)
total := uint64(len(candidates))
var i uint64
for need > 0 {
for i := uint64(0); uint64(len(selected)) < count; i++ {
if ctx.Err() != nil {
return ctx.Err()
return nil, ctx.Err()
}
idx := candidates[i%total]
ok, err := acceptByBalance(st, idx, buf[:], hashFunc, maxBalance, i)
if err != nil {
return err
return nil, err
}
if ok {
out = append(out, idx)
need--
selected = append(selected, idx)
}
i++
}
*selected = out
return nil
return selected, nil
}
// acceptByBalance determines if a validator is accepted based on its effective balance.

View File

@@ -83,6 +83,7 @@ func TestGetSpec(t *testing.T) {
config.ElectraForkEpoch = 107
config.FuluForkVersion = []byte("FuluForkVersion")
config.FuluForkEpoch = 109
config.GloasForkEpoch = 110
config.BLSWithdrawalPrefixByte = byte('b')
config.ETH1AddressWithdrawalPrefixByte = byte('c')
config.GenesisDelay = 24
@@ -134,6 +135,10 @@ func TestGetSpec(t *testing.T) {
config.AttestationDueBPS = primitives.BP(122)
config.AggregateDueBPS = primitives.BP(123)
config.ContributionDueBPS = primitives.BP(124)
config.AttestationDueBPSGloas = primitives.BP(126)
config.AggregateDueBPSGloas = primitives.BP(127)
config.SyncMessageDueBPSGloas = primitives.BP(128)
config.ContributionDueBPSGloas = primitives.BP(129)
config.TerminalBlockHash = common.HexToHash("TerminalBlockHash")
config.TerminalBlockHashActivationEpoch = 72
config.TerminalTotalDifficulty = "73"
@@ -215,7 +220,7 @@ func TestGetSpec(t *testing.T) {
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
data, ok := resp.Data.(map[string]any)
require.Equal(t, true, ok)
assert.Equal(t, 187, len(data))
assert.Equal(t, 192, len(data))
for k, v := range data {
t.Run(k, func(t *testing.T) {
switch k {
@@ -295,6 +300,8 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "0x"+hex.EncodeToString([]byte("FuluForkVersion")), v)
case "FULU_FORK_EPOCH":
assert.Equal(t, "109", v)
case "GLOAS_FORK_EPOCH":
assert.Equal(t, "110", v)
case "MIN_ANCHOR_POW_BLOCK_DIFFICULTY":
assert.Equal(t, "1000", v)
case "BLS_WITHDRAWAL_PREFIX":
@@ -479,6 +486,14 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "123", v)
case "CONTRIBUTION_DUE_BPS":
assert.Equal(t, "124", v)
case "ATTESTATION_DUE_BPS_GLOAS":
assert.Equal(t, "126", v)
case "AGGREGATE_DUE_BPS_GLOAS":
assert.Equal(t, "127", v)
case "SYNC_MESSAGE_DUE_BPS_GLOAS":
assert.Equal(t, "128", v)
case "CONTRIBUTION_DUE_BPS_GLOAS":
assert.Equal(t, "129", v)
case "MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT":
assert.Equal(t, "8", v)
case "MAX_REQUEST_LIGHT_CLIENT_UPDATES":

View File

@@ -0,0 +1,3 @@
### Fixed
- Fix Bazel build failure on macOS x86_64 (darwin_amd64) (adds missing assembly stub to hashtree patch).

View File

@@ -0,0 +1,2 @@
### Added
- Gloas-specific timing intervals for validator attestation, aggregation, and sync duties.

View File

@@ -1,2 +0,0 @@
### Changed
- Sample PTC per committee to reduce allocations.

View File

@@ -91,6 +91,10 @@ type BeaconChainConfig struct {
AggregateDueBPS primitives.BP `yaml:"AGGREGATE_DUE_BPS" spec:"true"` // AggregateDueBPS defines the aggregate due time in basis points of the slot.
SyncMessageDueBPS primitives.BP `yaml:"SYNC_MESSAGE_DUE_BPS" spec:"true"` // SyncMessageDueBPS defines the sync message due time in basis points of the slot.
ContributionDueBPS primitives.BP `yaml:"CONTRIBUTION_DUE_BPS" spec:"true"` // ContributionDueBPS defines the contribution due time in basis points of the slot.
AttestationDueBPSGloas primitives.BP `yaml:"ATTESTATION_DUE_BPS_GLOAS" spec:"true"` // AttestationDueBPSGloas defines the attestation due time in basis points of the slot (Gloas).
AggregateDueBPSGloas primitives.BP `yaml:"AGGREGATE_DUE_BPS_GLOAS" spec:"true"` // AggregateDueBPSGloas defines the aggregate due time in basis points of the slot (Gloas).
SyncMessageDueBPSGloas primitives.BP `yaml:"SYNC_MESSAGE_DUE_BPS_GLOAS" spec:"true"` // SyncMessageDueBPSGloas defines the sync message due time in basis points of the slot (Gloas).
ContributionDueBPSGloas primitives.BP `yaml:"CONTRIBUTION_DUE_BPS_GLOAS" spec:"true"` // ContributionDueBPSGloas defines the contribution due time in basis points of the slot (Gloas).
// Ethereum PoW parameters.
DepositChainID uint64 `yaml:"DEPOSIT_CHAIN_ID" spec:"true"` // DepositChainID of the eth1 network. This used for replay protection.
@@ -188,6 +192,7 @@ type BeaconChainConfig struct {
ElectraForkEpoch primitives.Epoch `yaml:"ELECTRA_FORK_EPOCH" spec:"true"` // ElectraForkEpoch is used to represent the assigned fork epoch for electra.
FuluForkVersion []byte `yaml:"FULU_FORK_VERSION" spec:"true"` // FuluForkVersion is used to represent the fork version for fulu.
FuluForkEpoch primitives.Epoch `yaml:"FULU_FORK_EPOCH" spec:"true"` // FuluForkEpoch is used to represent the assigned fork epoch for fulu.
GloasForkEpoch primitives.Epoch `yaml:"GLOAS_FORK_EPOCH" spec:"true"` // GloasForkEpoch is used to represent the assigned fork epoch for gloas.
ForkVersionSchedule map[[fieldparams.VersionLength]byte]primitives.Epoch // Schedule of fork epochs by version.
ForkVersionNames map[[fieldparams.VersionLength]byte]string // Human-readable names of fork versions.
@@ -343,6 +348,7 @@ func (b *BeaconChainConfig) VersionToForkEpochMap() map[int]primitives.Epoch {
version.Deneb: b.DenebForkEpoch,
version.Electra: b.ElectraForkEpoch,
version.Fulu: b.FuluForkEpoch,
version.Gloas: b.GloasForkEpoch,
}
}

View File

@@ -224,6 +224,7 @@ func ConfigToYaml(cfg *BeaconChainConfig) []byte {
fmt.Sprintf("ELECTRA_FORK_VERSION: %#x", cfg.ElectraForkVersion),
fmt.Sprintf("FULU_FORK_EPOCH: %d", cfg.FuluForkEpoch),
fmt.Sprintf("FULU_FORK_VERSION: %#x", cfg.FuluForkVersion),
fmt.Sprintf("GLOAS_FORK_EPOCH: %d", cfg.GloasForkEpoch),
fmt.Sprintf("EPOCHS_PER_SUBNET_SUBSCRIPTION: %d", cfg.EpochsPerSubnetSubscription),
fmt.Sprintf("ATTESTATION_SUBNET_EXTRA_BITS: %d", cfg.AttestationSubnetExtraBits),
fmt.Sprintf("ATTESTATION_SUBNET_PREFIX_BITS: %d", cfg.AttestationSubnetPrefixBits),
@@ -246,6 +247,10 @@ func ConfigToYaml(cfg *BeaconChainConfig) []byte {
fmt.Sprintf("AGGREGATE_DUE_BPS: %d", cfg.AggregateDueBPS),
fmt.Sprintf("SYNC_MESSAGE_DUE_BPS: %d", cfg.SyncMessageDueBPS),
fmt.Sprintf("CONTRIBUTION_DUE_BPS: %d", cfg.ContributionDueBPS),
fmt.Sprintf("ATTESTATION_DUE_BPS_GLOAS: %d", cfg.AttestationDueBPSGloas),
fmt.Sprintf("AGGREGATE_DUE_BPS_GLOAS: %d", cfg.AggregateDueBPSGloas),
fmt.Sprintf("SYNC_MESSAGE_DUE_BPS_GLOAS: %d", cfg.SyncMessageDueBPSGloas),
fmt.Sprintf("CONTRIBUTION_DUE_BPS_GLOAS: %d", cfg.ContributionDueBPSGloas),
}
if len(cfg.BlobSchedule) > 0 {

View File

@@ -24,12 +24,9 @@ import (
// These are variables that we don't use in Prysm. (i.e. future hardfork, light client... etc)
// IMPORTANT: Use one field per line and sort these alphabetically to reduce conflicts.
var placeholderFields = []string{
"AGGREGATE_DUE_BPS_GLOAS",
"ATTESTATION_DEADLINE",
"ATTESTATION_DUE_BPS_GLOAS",
"BLOB_SIDECAR_SUBNET_COUNT_FULU",
"CELLS_PER_EXT_BLOB",
"CONTRIBUTION_DUE_BPS_GLOAS",
"EIP6110_FORK_EPOCH",
"EIP6110_FORK_VERSION",
"EIP7002_FORK_EPOCH",
@@ -45,7 +42,6 @@ var placeholderFields = []string{
"EPOCHS_PER_SHUFFLING_PHASE",
"FIELD_ELEMENTS_PER_CELL", // Configured as a constant in config/fieldparams/mainnet.go
"FIELD_ELEMENTS_PER_EXT_BLOB", // Configured in proto/ssz_proto_library.bzl
"GLOAS_FORK_EPOCH",
"GLOAS_FORK_VERSION",
"INCLUSION_LIST_SUBMISSION_DEADLINE",
"INCLUSION_LIST_SUBMISSION_DUE_BPS",
@@ -60,7 +56,6 @@ var placeholderFields = []string{
"PROPOSER_INCLUSION_LIST_CUTOFF",
"PROPOSER_INCLUSION_LIST_CUTOFF_BPS",
"PROPOSER_SELECTION_GAP",
"SYNC_MESSAGE_DUE_BPS_GLOAS",
"TARGET_NUMBER_OF_PEERS",
"UPDATE_TIMEOUT",
"VIEW_FREEZE_CUTOFF_BPS",

View File

@@ -32,6 +32,8 @@ const (
mainnetElectraForkEpoch = 364032 // May 7, 2025, 10:05:11 UTC
// Fulu Fork Epoch for mainnet config
mainnetFuluForkEpoch = 411392 // December 3, 2025, 09:49:11pm UTC
// Gloas Fork Epoch for mainnet config
mainnetGloasForkEpoch = math.MaxUint64
)
var mainnetNetworkConfig = &NetworkConfig{
@@ -121,11 +123,15 @@ var mainnetBeaconConfig = &BeaconChainConfig{
IntervalsPerSlot: 3,
// Time-based protocol parameters.
ProposerReorgCutoffBPS: primitives.BP(1667),
AttestationDueBPS: primitives.BP(3333),
AggregateDueBPS: primitives.BP(6667),
SyncMessageDueBPS: primitives.BP(3333),
ContributionDueBPS: primitives.BP(6667),
ProposerReorgCutoffBPS: primitives.BP(1667),
AttestationDueBPS: primitives.BP(3333),
AggregateDueBPS: primitives.BP(6667),
SyncMessageDueBPS: primitives.BP(3333),
ContributionDueBPS: primitives.BP(6667),
AttestationDueBPSGloas: primitives.BP(2500),
AggregateDueBPSGloas: primitives.BP(5000),
SyncMessageDueBPSGloas: primitives.BP(2500),
ContributionDueBPSGloas: primitives.BP(5000),
// Ethereum PoW parameters.
DepositChainID: 1, // Chain ID of eth1 mainnet.
@@ -235,6 +241,7 @@ var mainnetBeaconConfig = &BeaconChainConfig{
ElectraForkEpoch: mainnetElectraForkEpoch,
FuluForkVersion: []byte{6, 0, 0, 0},
FuluForkEpoch: mainnetFuluForkEpoch,
GloasForkEpoch: mainnetGloasForkEpoch,
// New values introduced in Altair hard fork 1.
// Participation flag indices.

View File

@@ -35,6 +35,10 @@ func MinimalSpecConfig() *BeaconChainConfig {
// Time parameters
minimalConfig.SecondsPerSlot = 6
minimalConfig.SlotDurationMilliseconds = 6000
minimalConfig.AttestationDueBPSGloas = 2500
minimalConfig.AggregateDueBPSGloas = 5000
minimalConfig.SyncMessageDueBPSGloas = 2500
minimalConfig.ContributionDueBPSGloas = 5000
minimalConfig.MinAttestationInclusionDelay = 1
minimalConfig.SlotsPerEpoch = 8
minimalConfig.SqrRootSlotsPerEpoch = 2
@@ -98,6 +102,7 @@ func MinimalSpecConfig() *BeaconChainConfig {
minimalConfig.ElectraForkEpoch = math.MaxUint64
minimalConfig.FuluForkVersion = []byte{6, 0, 0, 1}
minimalConfig.FuluForkEpoch = math.MaxUint64
minimalConfig.GloasForkEpoch = minimalConfig.FarFutureEpoch
minimalConfig.SyncCommitteeSize = 32
minimalConfig.InactivityScoreBias = 4

View File

@@ -1,7 +1,7 @@
diff -urN a/BUILD.bazel b/BUILD.bazel
--- a/BUILD.bazel 1969-12-31 18:00:00.000000000 -0600
+++ b/BUILD.bazel 2025-01-05 12:00:00.000000000 -0600
@@ -0,0 +1,89 @@
@@ -0,0 +1,90 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
@@ -32,6 +32,7 @@ diff -urN a/BUILD.bazel b/BUILD.bazel
+ ],
+ "@io_bazel_rules_go//go/platform:darwin_amd64": [
+ "bindings_darwin_amd64.go",
+ "wrapper_darwin_amd64.s",
+ ],
+ "//conditions:default": [],
+ }),

View File

@@ -66,7 +66,7 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot primitives
// As specified in spec, an aggregator should wait until two thirds of the way through slot
// to broadcast the best aggregate to the global aggregate channel.
// https://github.com/ethereum/consensus-specs/blob/v0.9.3/specs/validator/0_beacon-chain-validator.md#broadcast-aggregate
v.waitToSlotTwoThirds(ctx, slot)
v.waitUntilAggregateDue(ctx, slot)
// In a DV setup, selection proofs need to be agreed upon by the DV.
// Checking for selection proofs at slot 0 of the epoch will result in an error, as the call to the DV executes slower than the start of this function.
@@ -203,11 +203,18 @@ func (v *validator) signSlotWithSelectionProof(ctx context.Context, pubKey [fiel
return sig.Marshal(), nil
}
// waitToSlotTwoThirds waits until two third through the current slot period
// such that any attestations from this slot have time to reach the beacon node
// before creating the aggregated attestation.
func (v *validator) waitToSlotTwoThirds(ctx context.Context, slot primitives.Slot) {
v.waitUntilSlotComponent(ctx, slot, params.BeaconConfig().AggregateDueBPS)
// waitUntilAggregateDue waits until the configured aggregation due time within the current slot
// such that any attestations from this slot have time to reach the beacon node before creating
// the aggregated attestation.
//
// Note: Historically this was ~2/3 of the slot, but may differ across forks (e.g. Gloas).
func (v *validator) waitUntilAggregateDue(ctx context.Context, slot primitives.Slot) {
cfg := params.BeaconConfig()
component := cfg.AggregateDueBPS
if slots.ToEpoch(slot) >= cfg.GloasForkEpoch {
component = cfg.AggregateDueBPSGloas
}
v.waitUntilSlotComponent(ctx, slot, component)
}
// This returns the signature of validator signing over aggregate and

View File

@@ -260,7 +260,7 @@ func TestWaitForSlotTwoThird_WaitCorrectly(t *testing.T) {
timeToSleep := params.BeaconConfig().SlotComponentDuration(params.BeaconConfig().AggregateDueBPS)
twoThirdTime := currentTime.Add(timeToSleep)
validator.waitToSlotTwoThirds(t.Context(), numOfSlots)
validator.waitUntilAggregateDue(t.Context(), numOfSlots)
currentTime = time.Now()
assert.Equal(t, twoThirdTime.Unix(), currentTime.Unix())
})
@@ -280,7 +280,7 @@ func TestWaitForSlotTwoThird_DoneContext_ReturnsImmediately(t *testing.T) {
expectedTime := time.Now()
ctx, cancel := context.WithCancel(t.Context())
cancel()
validator.waitToSlotTwoThirds(ctx, numOfSlots)
validator.waitUntilAggregateDue(ctx, numOfSlots)
currentTime = time.Now()
assert.Equal(t, expectedTime.Unix(), currentTime.Unix())
})

View File

@@ -37,7 +37,7 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot primitives.Slot,
defer span.End()
span.SetAttributes(trace.StringAttribute("validator", fmt.Sprintf("%#x", pubKey)))
v.waitOneThirdOrValidBlock(ctx, slot)
v.waitUntilAttestationDueOrValidBlock(ctx, slot)
var b strings.Builder
if err := b.WriteByte(byte(iface.RoleAttester)); err != nil {
@@ -259,12 +259,12 @@ func (v *validator) setHighestSlot(slot primitives.Slot) {
}
}
// waitOneThirdOrValidBlock waits until (a) or (b) whichever comes first:
// waitUntilAttestationDueOrValidBlock waits until (a) or (b) whichever comes first:
//
// (a) the validator has received a valid block that is the same slot as input slot
// (b) one-third of the slot has transpired (SECONDS_PER_SLOT / 3 seconds after the start of slot)
func (v *validator) waitOneThirdOrValidBlock(ctx context.Context, slot primitives.Slot) {
ctx, span := trace.StartSpan(ctx, "validator.waitOneThirdOrValidBlock")
// (b) the configured attestation due time has transpired (as basis points of the slot duration)
func (v *validator) waitUntilAttestationDueOrValidBlock(ctx context.Context, slot primitives.Slot) {
ctx, span := trace.StartSpan(ctx, "validator.waitUntilAttestationDueOrValidBlock")
defer span.End()
// Don't need to wait if requested slot is the same as highest valid slot.
@@ -272,7 +272,12 @@ func (v *validator) waitOneThirdOrValidBlock(ctx context.Context, slot primitive
return
}
finalTime, err := v.slotComponentDeadline(slot, params.BeaconConfig().AttestationDueBPS)
cfg := params.BeaconConfig()
component := cfg.AttestationDueBPS
if slots.ToEpoch(slot) >= cfg.GloasForkEpoch {
component = cfg.AttestationDueBPSGloas
}
finalTime, err := v.slotComponentDeadline(slot, component)
if err != nil {
log.WithError(err).WithField("slot", slot).Error("Slot overflows, unable to wait for attestation deadline")
return

View File

@@ -706,7 +706,7 @@ func TestServer_WaitToSlotOneThird_CanWait(t *testing.T) {
timeToSleep := params.BeaconConfig().SecondsPerSlot / 3
oneThird := currentTime.Add(time.Duration(timeToSleep) * time.Second)
v.waitOneThirdOrValidBlock(t.Context(), currentSlot)
v.waitUntilAttestationDueOrValidBlock(t.Context(), currentSlot)
if oneThird.Sub(time.Now()) > 10*time.Millisecond { // Allow for small diff due to execution time.
t.Errorf("Wanted %s time for slot one third but got %s", oneThird, currentTime)
@@ -724,7 +724,7 @@ func TestServer_WaitToSlotOneThird_SameReqSlot(t *testing.T) {
highestValidSlot: currentSlot,
}
v.waitOneThirdOrValidBlock(t.Context(), currentSlot)
v.waitUntilAttestationDueOrValidBlock(t.Context(), currentSlot)
if currentTime.Sub(time.Now()) > 10*time.Millisecond { // Allow for small diff due to execution time.
t.Errorf("Wanted %s time for slot one third but got %s", time.Now(), currentTime)
@@ -750,7 +750,7 @@ func TestServer_WaitToSlotOneThird_ReceiveBlockSlot(t *testing.T) {
v.slotFeed.Send(currentSlot)
})
v.waitOneThirdOrValidBlock(t.Context(), currentSlot)
v.waitUntilAttestationDueOrValidBlock(t.Context(), currentSlot)
if currentTime.Sub(time.Now()) > 10*time.Millisecond { // Allow for small diff due to execution time.
t.Errorf("Wanted %s time for slot one third but got %s", time.Now(), currentTime)

View File

@@ -29,7 +29,7 @@ func (v *validator) SubmitSyncCommitteeMessage(ctx context.Context, slot primiti
defer span.End()
span.SetAttributes(trace.StringAttribute("validator", fmt.Sprintf("%#x", pubKey)))
v.waitOneThirdOrValidBlock(ctx, slot)
v.waitUntilAttestationDueOrValidBlock(ctx, slot)
res, err := v.validatorClient.SyncMessageBlockRoot(ctx, &emptypb.Empty{})
if err != nil {
@@ -127,7 +127,12 @@ func (v *validator) SubmitSignedContributionAndProof(ctx context.Context, slot p
return
}
v.waitUntilSlotComponent(ctx, slot, params.BeaconConfig().ContributionDueBPS)
cfg := params.BeaconConfig()
component := cfg.ContributionDueBPS
if slots.ToEpoch(slot) >= cfg.GloasForkEpoch {
component = cfg.ContributionDueBPSGloas
}
v.waitUntilSlotComponent(ctx, slot, component)
coveredSubnets := make(map[uint64]bool)
for i, comIdx := range indexRes.Indices {

View File

@@ -51,12 +51,20 @@ func (v *validator) slotComponentSpanName(component primitives.BP) string {
switch component {
case cfg.AttestationDueBPS:
return "validator.waitAttestationWindow"
case cfg.AttestationDueBPSGloas:
return "validator.waitAttestationWindow"
case cfg.AggregateDueBPS:
return "validator.waitAggregateWindow"
case cfg.AggregateDueBPSGloas:
return "validator.waitAggregateWindow"
case cfg.SyncMessageDueBPS:
return "validator.waitSyncMessageWindow"
case cfg.SyncMessageDueBPSGloas:
return "validator.waitSyncMessageWindow"
case cfg.ContributionDueBPS:
return "validator.waitContributionWindow"
case cfg.ContributionDueBPSGloas:
return "validator.waitContributionWindow"
case cfg.ProposerReorgCutoffBPS:
return "validator.waitProposerReorgWindow"
default: