mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
4 Commits
fulu-p2p-r
...
sleep-with
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1aab9ef91c | ||
|
|
b24f1369a3 | ||
|
|
d3497576a5 | ||
|
|
02cf25e32b |
@@ -79,7 +79,7 @@ func (s *Service) spawnProcessAttestationsRoutine() {
|
||||
log.WithError(err).Error("Giving up waiting for genesis time")
|
||||
return
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
helpers.Sleep(s.ctx, 1*time.Second)
|
||||
}
|
||||
log.Warn("Genesis time received, now available to process attestations")
|
||||
}
|
||||
|
||||
@@ -472,8 +472,8 @@ func (s *Service) removeStartupState() {
|
||||
func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot, uint64, error) {
|
||||
isSubscribedToAllDataSubnets := flags.Get().SubscribeAllDataSubnets
|
||||
|
||||
beaconConfig := params.BeaconConfig()
|
||||
custodyRequirement := beaconConfig.CustodyRequirement
|
||||
cfg := params.BeaconConfig()
|
||||
custodyRequirement := cfg.CustodyRequirement
|
||||
|
||||
// Check if the node was previously subscribed to all data subnets, and if so,
|
||||
// store the new status accordingly.
|
||||
@@ -493,7 +493,7 @@ func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot,
|
||||
// Compute the custody group count.
|
||||
custodyGroupCount := custodyRequirement
|
||||
if isSubscribedToAllDataSubnets {
|
||||
custodyGroupCount = beaconConfig.NumberOfColumns
|
||||
custodyGroupCount = cfg.NumberOfColumns
|
||||
}
|
||||
|
||||
// Safely compute the fulu fork slot.
|
||||
@@ -536,11 +536,11 @@ func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db d
|
||||
}
|
||||
|
||||
func fuluForkSlot() (primitives.Slot, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
fuluForkEpoch := beaconConfig.FuluForkEpoch
|
||||
if fuluForkEpoch == beaconConfig.FarFutureEpoch {
|
||||
return beaconConfig.FarFutureSlot, nil
|
||||
fuluForkEpoch := cfg.FuluForkEpoch
|
||||
if fuluForkEpoch == cfg.FarFutureEpoch {
|
||||
return cfg.FarFutureSlot, nil
|
||||
}
|
||||
|
||||
forkFuluSlot, err := slots.EpochStart(fuluForkEpoch)
|
||||
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
"rewards_penalties.go",
|
||||
"shuffle.go",
|
||||
"sync_committee.go",
|
||||
"time.go",
|
||||
"validator_churn.go",
|
||||
"validators.go",
|
||||
"weak_subjectivity.go",
|
||||
@@ -61,6 +62,7 @@ go_test(
|
||||
"rewards_penalties_test.go",
|
||||
"shuffle_test.go",
|
||||
"sync_committee_test.go",
|
||||
"time_test.go",
|
||||
"validator_churn_test.go",
|
||||
"validators_test.go",
|
||||
"weak_subjectivity_test.go",
|
||||
|
||||
14
beacon-chain/core/helpers/time.go
Normal file
14
beacon-chain/core/helpers/time.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Sleep sleeps for the given duration or until the context is done.
|
||||
func Sleep(ctx context.Context, duration time.Duration) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-time.After(duration):
|
||||
}
|
||||
}
|
||||
22
beacon-chain/core/helpers/time_test.go
Normal file
22
beacon-chain/core/helpers/time_test.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package helpers_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
)
|
||||
|
||||
func TestSleep(t *testing.T) {
|
||||
t.Run("context cancelled", func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
cancel()
|
||||
|
||||
helpers.Sleep(ctx, 1*time.Hour)
|
||||
})
|
||||
|
||||
t.Run("Nominal", func(t *testing.T) {
|
||||
helpers.Sleep(t.Context(), 0)
|
||||
})
|
||||
}
|
||||
@@ -401,7 +401,7 @@ func ComputeProposerIndex(bState state.ReadOnlyBeaconState, activeIndices []prim
|
||||
return 0, errors.New("empty active indices list")
|
||||
}
|
||||
hashFunc := hash.CustomSHA256Hasher()
|
||||
beaconConfig := params.BeaconConfig()
|
||||
cfg := params.BeaconConfig()
|
||||
seedBuffer := make([]byte, len(seed)+8)
|
||||
copy(seedBuffer, seed[:])
|
||||
|
||||
@@ -426,14 +426,14 @@ func ComputeProposerIndex(bState state.ReadOnlyBeaconState, activeIndices []prim
|
||||
offset := (i % 16) * 2
|
||||
randomValue := uint64(randomBytes[offset]) | uint64(randomBytes[offset+1])<<8
|
||||
|
||||
if effectiveBal*fieldparams.MaxRandomValueElectra >= beaconConfig.MaxEffectiveBalanceElectra*randomValue {
|
||||
if effectiveBal*fieldparams.MaxRandomValueElectra >= cfg.MaxEffectiveBalanceElectra*randomValue {
|
||||
return candidateIndex, nil
|
||||
}
|
||||
} else {
|
||||
binary.LittleEndian.PutUint64(seedBuffer[len(seed):], i/32)
|
||||
randomByte := hashFunc(seedBuffer)[i%32]
|
||||
|
||||
if effectiveBal*fieldparams.MaxRandomByte >= beaconConfig.MaxEffectiveBalance*uint64(randomByte) {
|
||||
if effectiveBal*fieldparams.MaxRandomByte >= cfg.MaxEffectiveBalance*uint64(randomByte) {
|
||||
return candidateIndex, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,14 +89,14 @@ func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) ([]uint64, error)
|
||||
// ComputeColumnsForCustodyGroup computes the columns for a given custody group.
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#compute_columns_for_custody_group
|
||||
func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups
|
||||
cfg := params.BeaconConfig()
|
||||
numberOfCustodyGroups := cfg.NumberOfCustodyGroups
|
||||
|
||||
if custodyGroup >= numberOfCustodyGroups {
|
||||
return nil, ErrCustodyGroupTooLarge
|
||||
}
|
||||
|
||||
numberOfColumns := beaconConfig.NumberOfColumns
|
||||
numberOfColumns := cfg.NumberOfColumns
|
||||
|
||||
columnsPerGroup := numberOfColumns / numberOfCustodyGroups
|
||||
|
||||
@@ -112,9 +112,9 @@ func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
// ComputeCustodyGroupForColumn computes the custody group for a given column.
|
||||
// It is the reciprocal function of ComputeColumnsForCustodyGroup.
|
||||
func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfColumns := beaconConfig.NumberOfColumns
|
||||
numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups
|
||||
cfg := params.BeaconConfig()
|
||||
numberOfColumns := cfg.NumberOfColumns
|
||||
numberOfCustodyGroups := cfg.NumberOfCustodyGroups
|
||||
|
||||
if columnIndex >= numberOfColumns {
|
||||
return 0, ErrIndexTooLarge
|
||||
|
||||
@@ -84,10 +84,10 @@ func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validat
|
||||
totalNodeBalance += validator.EffectiveBalance()
|
||||
}
|
||||
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups
|
||||
validatorCustodyRequirement := beaconConfig.ValidatorCustodyRequirement
|
||||
balancePerAdditionalCustodyGroup := beaconConfig.BalancePerAdditionalCustodyGroup
|
||||
cfg := params.BeaconConfig()
|
||||
numberOfCustodyGroups := cfg.NumberOfCustodyGroups
|
||||
validatorCustodyRequirement := cfg.ValidatorCustodyRequirement
|
||||
balancePerAdditionalCustodyGroup := cfg.BalancePerAdditionalCustodyGroup
|
||||
|
||||
count := totalNodeBalance / balancePerAdditionalCustodyGroup
|
||||
return min(max(count, validatorCustodyRequirement), numberOfCustodyGroups), nil
|
||||
|
||||
@@ -196,7 +196,7 @@ func TestAltairCompatible(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCanUpgradeTo(t *testing.T) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
outerTestCases := []struct {
|
||||
name string
|
||||
@@ -205,32 +205,32 @@ func TestCanUpgradeTo(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "Altair",
|
||||
forkEpoch: &beaconConfig.AltairForkEpoch,
|
||||
forkEpoch: &cfg.AltairForkEpoch,
|
||||
upgradeFunc: time.CanUpgradeToAltair,
|
||||
},
|
||||
{
|
||||
name: "Bellatrix",
|
||||
forkEpoch: &beaconConfig.BellatrixForkEpoch,
|
||||
forkEpoch: &cfg.BellatrixForkEpoch,
|
||||
upgradeFunc: time.CanUpgradeToBellatrix,
|
||||
},
|
||||
{
|
||||
name: "Capella",
|
||||
forkEpoch: &beaconConfig.CapellaForkEpoch,
|
||||
forkEpoch: &cfg.CapellaForkEpoch,
|
||||
upgradeFunc: time.CanUpgradeToCapella,
|
||||
},
|
||||
{
|
||||
name: "Deneb",
|
||||
forkEpoch: &beaconConfig.DenebForkEpoch,
|
||||
forkEpoch: &cfg.DenebForkEpoch,
|
||||
upgradeFunc: time.CanUpgradeToDeneb,
|
||||
},
|
||||
{
|
||||
name: "Electra",
|
||||
forkEpoch: &beaconConfig.ElectraForkEpoch,
|
||||
forkEpoch: &cfg.ElectraForkEpoch,
|
||||
upgradeFunc: time.CanUpgradeToElectra,
|
||||
},
|
||||
{
|
||||
name: "Fulu",
|
||||
forkEpoch: &beaconConfig.FuluForkEpoch,
|
||||
forkEpoch: &cfg.FuluForkEpoch,
|
||||
upgradeFunc: time.CanUpgradeToFulu,
|
||||
},
|
||||
}
|
||||
@@ -238,7 +238,7 @@ func TestCanUpgradeTo(t *testing.T) {
|
||||
for _, otc := range outerTestCases {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
*otc.forkEpoch = 5
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
innerTestCases := []struct {
|
||||
name string
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
contracts "github.com/OffchainLabs/prysm/v6/contracts/deposit"
|
||||
"github.com/OffchainLabs/prysm/v6/io/logs"
|
||||
@@ -98,7 +99,7 @@ func (s *Service) retryExecutionClientConnection(ctx context.Context, err error)
|
||||
s.runError = errors.Wrap(err, "retryExecutionClientConnection")
|
||||
s.updateConnectedETH1(false)
|
||||
// Back off for a while before redialing.
|
||||
time.Sleep(backOffPeriod)
|
||||
helpers.Sleep(ctx, backOffPeriod)
|
||||
currClient := s.rpcClient
|
||||
if err := s.setupExecutionClientConnections(ctx, s.cfg.currHttpEndpoint); err != nil {
|
||||
s.runError = errors.Wrap(err, "setupExecutionClientConnections")
|
||||
|
||||
@@ -208,11 +208,11 @@ func (s *Service) custodyGroupCountFromPeerENR(pid peer.ID) uint64 {
|
||||
}
|
||||
|
||||
func fuluForkSlot() (primitives.Slot, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
fuluForkEpoch := beaconConfig.FuluForkEpoch
|
||||
if fuluForkEpoch == beaconConfig.FarFutureEpoch {
|
||||
return beaconConfig.FarFutureSlot, nil
|
||||
fuluForkEpoch := cfg.FuluForkEpoch
|
||||
if fuluForkEpoch == cfg.FarFutureEpoch {
|
||||
return cfg.FarFutureSlot, nil
|
||||
}
|
||||
|
||||
forkFuluSlot, err := slots.EpochStart(fuluForkEpoch)
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
@@ -348,7 +349,7 @@ func (s *Service) listenForNewNodes() {
|
||||
if s.isPeerAtLimit(all) {
|
||||
// Pause the main loop for a period to stop looking for new peers.
|
||||
log.Trace("Not looking for peers, at peer limit")
|
||||
time.Sleep(pollingPeriod)
|
||||
helpers.Sleep(s.ctx, pollingPeriod)
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/peerdata"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v6/time"
|
||||
@@ -164,7 +165,7 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
|
||||
currentTime := prysmTime.Now()
|
||||
|
||||
// Wait for peer to initiate handshake
|
||||
time.Sleep(timeForStatus)
|
||||
helpers.Sleep(s.ctx, timeForStatus)
|
||||
|
||||
// Exit if we are disconnected with the peer.
|
||||
if s.host.Network().Connectedness(remotePeer) != network.Connected {
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -94,7 +95,7 @@ func (s *Service) PublishToTopic(ctx context.Context, topic string, data []byte,
|
||||
case <-ctx.Done():
|
||||
return errors.Wrapf(ctx.Err(), "unable to find requisite number of peers for topic %s, 0 peers found to publish to", topic)
|
||||
default:
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
helpers.Sleep(ctx, 100*time.Millisecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -345,17 +345,17 @@ func TopicFromMessage(msg string, epoch primitives.Epoch) (string, error) {
|
||||
return "", errors.Errorf("%s: %s", invalidRPCMessageType, msg)
|
||||
}
|
||||
|
||||
beaconConfig := params.BeaconConfig()
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
// Check if the message is to be updated in fulu.
|
||||
if epoch >= beaconConfig.FuluForkEpoch {
|
||||
if epoch >= cfg.FuluForkEpoch {
|
||||
if version, ok := fuluMapping[msg]; ok {
|
||||
return protocolPrefix + msg + version, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the message is to be updated in altair.
|
||||
if epoch >= beaconConfig.AltairForkEpoch {
|
||||
if epoch >= cfg.AltairForkEpoch {
|
||||
if version, ok := altairMapping[msg]; ok {
|
||||
return protocolPrefix + msg + version, nil
|
||||
}
|
||||
|
||||
@@ -514,18 +514,18 @@ func initializePersistentSubnets(id enode.ID, epoch primitives.Epoch) error {
|
||||
//
|
||||
// return [compute_subscribed_subnet(node_id, epoch, index) for index in range(SUBNETS_PER_NODE)]
|
||||
func computeSubscribedSubnets(nodeID enode.ID, epoch primitives.Epoch) ([]uint64, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
subnets := make([]uint64, 0, beaconConfig.AttestationSubnetCount)
|
||||
for i := range beaconConfig.AttestationSubnetCount {
|
||||
subnets := make([]uint64, 0, cfg.AttestationSubnetCount)
|
||||
for i := range cfg.AttestationSubnetCount {
|
||||
subnets = append(subnets, i)
|
||||
}
|
||||
return subnets, nil
|
||||
}
|
||||
|
||||
subnets := make([]uint64, 0, beaconConfig.SubnetsPerNode)
|
||||
for i := range beaconConfig.SubnetsPerNode {
|
||||
subnets := make([]uint64, 0, cfg.SubnetsPerNode)
|
||||
for i := range cfg.SubnetsPerNode {
|
||||
sub, err := computeSubscribedSubnet(nodeID, epoch, i)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute subscribed subnet")
|
||||
|
||||
@@ -524,12 +524,12 @@ func TestSubnetComputation(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
t.Run("standard", func(t *testing.T) {
|
||||
retrievedSubnets, err := computeSubscribedSubnets(localNode.ID(), 1000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, beaconConfig.SubnetsPerNode, uint64(len(retrievedSubnets)))
|
||||
require.Equal(t, cfg.SubnetsPerNode, uint64(len(retrievedSubnets)))
|
||||
require.Equal(t, retrievedSubnets[0]+1, retrievedSubnets[1])
|
||||
})
|
||||
|
||||
@@ -541,8 +541,8 @@ func TestSubnetComputation(t *testing.T) {
|
||||
|
||||
retrievedSubnets, err := computeSubscribedSubnets(localNode.ID(), 1000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, beaconConfig.AttestationSubnetCount, uint64(len(retrievedSubnets)))
|
||||
for i := range beaconConfig.AttestationSubnetCount {
|
||||
require.Equal(t, cfg.AttestationSubnetCount, uint64(len(retrievedSubnets)))
|
||||
for i := range cfg.AttestationSubnetCount {
|
||||
require.Equal(t, i, retrievedSubnets[i])
|
||||
}
|
||||
})
|
||||
|
||||
@@ -19,6 +19,7 @@ go_library(
|
||||
"//testing:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
@@ -174,7 +175,6 @@ func (p *TestP2P) ReceivePubSub(topic string, msg proto.Message) {
|
||||
// PubSub requires some delay after connecting for the (*PubSub).processLoop method to
|
||||
// pick up the newly connected peer.
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
|
||||
castedMsg, ok := msg.(ssz.Marshaler)
|
||||
if !ok {
|
||||
p.t.Fatalf("%T doesn't support ssz marshaler", msg)
|
||||
@@ -403,7 +403,7 @@ func (p *TestP2P) Send(ctx context.Context, msg interface{}, topic string, pid p
|
||||
}
|
||||
// Delay returning the stream for testing purposes
|
||||
if p.DelaySend {
|
||||
time.Sleep(1 * time.Second)
|
||||
helpers.Sleep(ctx, 1*time.Second)
|
||||
}
|
||||
|
||||
return stream, nil
|
||||
|
||||
@@ -90,10 +90,10 @@ func (s *Service) updateCustodyInfoIfNeeded() error {
|
||||
// custodyGroupCount computes the custody group count based on the custody requirement,
|
||||
// the validators custody requirement, and whether the node is subscribed to all data subnets.
|
||||
func (s *Service) custodyGroupCount(context.Context) (uint64, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
if flags.Get().SubscribeAllDataSubnets {
|
||||
return beaconConfig.NumberOfCustodyGroups, nil
|
||||
return cfg.NumberOfCustodyGroups, nil
|
||||
}
|
||||
|
||||
validatorsCustodyRequirement, err := s.validatorsCustodyRequirement()
|
||||
@@ -101,7 +101,7 @@ func (s *Service) custodyGroupCount(context.Context) (uint64, error) {
|
||||
return 0, errors.Wrap(err, "validators custody requirement")
|
||||
}
|
||||
|
||||
return max(beaconConfig.CustodyRequirement, validatorsCustodyRequirement), nil
|
||||
return max(cfg.CustodyRequirement, validatorsCustodyRequirement), nil
|
||||
}
|
||||
|
||||
// validatorsCustodyRequirements computes the custody requirements based on the
|
||||
|
||||
@@ -116,11 +116,11 @@ func withSubscribeAllDataSubnets(t *testing.T, fn func()) {
|
||||
|
||||
func TestUpdateCustodyInfoIfNeeded(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.NumberOfCustodyGroups = 128
|
||||
beaconConfig.CustodyRequirement = 4
|
||||
beaconConfig.SamplesPerSlot = 8
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.NumberOfCustodyGroups = 128
|
||||
cfg.CustodyRequirement = 4
|
||||
cfg.SamplesPerSlot = 8
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
t.Run("Skip update when actual custody count >= target", func(t *testing.T) {
|
||||
setup := setupCustodyTest(t, false)
|
||||
@@ -159,7 +159,7 @@ func TestUpdateCustodyInfoIfNeeded(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
const expectedSlot = primitives.Slot(100)
|
||||
setup.assertCustodyInfo(t, expectedSlot, beaconConfig.NumberOfCustodyGroups)
|
||||
setup.assertCustodyInfo(t, expectedSlot, cfg.NumberOfCustodyGroups)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1133,10 +1133,7 @@ func randomPeer(
|
||||
"delay": waitPeriod,
|
||||
}).Debug("Waiting for a peer with enough bandwidth for data column sidecars")
|
||||
|
||||
select {
|
||||
case <-time.After(waitPeriod):
|
||||
case <-ctx.Done():
|
||||
}
|
||||
helpers.Sleep(ctx, waitPeriod)
|
||||
}
|
||||
|
||||
return "", ctx.Err()
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -85,7 +86,7 @@ func (f *blocksFetcher) waitForMinimumPeers(ctx context.Context) ([]peer.ID, err
|
||||
log.WithFields(logrus.Fields{
|
||||
"suitable": len(peers),
|
||||
"required": required}).Info("Waiting for enough suitable peers before syncing")
|
||||
time.Sleep(handshakePollingInterval)
|
||||
helpers.Sleep(ctx, handshakePollingInterval)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1366,16 +1366,16 @@ func TestFetchSidecars(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Nominal", func(t *testing.T) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfColumns := beaconConfig.NumberOfColumns
|
||||
samplesPerSlot := beaconConfig.SamplesPerSlot
|
||||
cfg := params.BeaconConfig()
|
||||
numberOfColumns := cfg.NumberOfColumns
|
||||
samplesPerSlot := cfg.SamplesPerSlot
|
||||
|
||||
// Define "now" to be one epoch after genesis time + retention period.
|
||||
genesisTime := time.Date(2025, time.August, 10, 0, 0, 0, 0, time.UTC)
|
||||
secondsPerSlot := beaconConfig.SecondsPerSlot
|
||||
slotsPerEpoch := beaconConfig.SlotsPerEpoch
|
||||
secondsPerSlot := cfg.SecondsPerSlot
|
||||
slotsPerEpoch := cfg.SlotsPerEpoch
|
||||
secondsPerEpoch := uint64(slotsPerEpoch.Mul(secondsPerSlot))
|
||||
retentionEpochs := beaconConfig.MinEpochsForDataColumnSidecarsRequest
|
||||
retentionEpochs := cfg.MinEpochsForDataColumnSidecarsRequest
|
||||
nowWrtGenesisSecs := retentionEpochs.Add(1).Mul(secondsPerEpoch)
|
||||
now := genesisTime.Add(time.Duration(nowWrtGenesisSecs) * time.Second)
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
@@ -235,7 +236,7 @@ func (q *blocksQueue) loop() {
|
||||
} else {
|
||||
q.exitConditions.noRequiredPeersErrRetries++
|
||||
log.Debug("Waiting for finalized peers")
|
||||
time.Sleep(noRequiredPeersErrRefreshInterval)
|
||||
helpers.Sleep(q.ctx, noRequiredPeersErrRefreshInterval)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -322,7 +322,7 @@ func (s *Service) waitForMinimumPeers() ([]peer.ID, error) {
|
||||
"suitable": len(peers),
|
||||
"required": required,
|
||||
}).Info("Waiting for enough suitable peers before syncing")
|
||||
time.Sleep(handshakePollingInterval)
|
||||
helpers.Sleep(s.ctx, handshakePollingInterval)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -530,12 +530,12 @@ func TestOriginOutsideRetention(t *testing.T) {
|
||||
func TestFetchOriginSidecars(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
beaconConfig := params.BeaconConfig()
|
||||
cfg := params.BeaconConfig()
|
||||
genesisTime := time.Date(2025, time.August, 10, 0, 0, 0, 0, time.UTC)
|
||||
secondsPerSlot := beaconConfig.SecondsPerSlot
|
||||
slotsPerEpoch := beaconConfig.SlotsPerEpoch
|
||||
secondsPerSlot := cfg.SecondsPerSlot
|
||||
slotsPerEpoch := cfg.SlotsPerEpoch
|
||||
secondsPerEpoch := uint64(slotsPerEpoch.Mul(secondsPerSlot))
|
||||
retentionEpochs := beaconConfig.MinEpochsForDataColumnSidecarsRequest
|
||||
retentionEpochs := cfg.MinEpochsForDataColumnSidecarsRequest
|
||||
|
||||
genesisValidatorRoot := [fieldparams.RootLength]byte{}
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2ptypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
@@ -194,7 +195,7 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
|
||||
// https://github.com/quic-go/quic-go/issues/3291
|
||||
defer func() {
|
||||
if strings.Contains(stream.Conn().RemoteMultiaddr().String(), "quic-v1") {
|
||||
time.Sleep(2 * time.Second)
|
||||
helpers.Sleep(s.ctx, 2*time.Second)
|
||||
}
|
||||
|
||||
_err := stream.Reset()
|
||||
|
||||
@@ -113,19 +113,19 @@ func (s *Service) blobSidecarByRootRPCHandler(ctx context.Context, msg interface
|
||||
}
|
||||
|
||||
func validateBlobByRootRequest(blobIdents types.BlobSidecarsByRootReq, slot primitives.Slot) error {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
cfg := params.BeaconConfig()
|
||||
epoch := slots.ToEpoch(slot)
|
||||
blobIdentCount := uint64(len(blobIdents))
|
||||
|
||||
if epoch >= beaconConfig.ElectraForkEpoch {
|
||||
if blobIdentCount > beaconConfig.MaxRequestBlobSidecarsElectra {
|
||||
if epoch >= cfg.ElectraForkEpoch {
|
||||
if blobIdentCount > cfg.MaxRequestBlobSidecarsElectra {
|
||||
return types.ErrMaxBlobReqExceeded
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if blobIdentCount > beaconConfig.MaxRequestBlobSidecars {
|
||||
if blobIdentCount > cfg.MaxRequestBlobSidecars {
|
||||
return types.ErrMaxBlobReqExceeded
|
||||
}
|
||||
|
||||
|
||||
@@ -38,8 +38,8 @@ func (s *Service) dataColumnSidecarsByRangeRPCHandler(ctx context.Context, msg i
|
||||
defer cancel()
|
||||
|
||||
SetRPCStreamDeadlines(stream)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
maxRequestDataColumnSidecars := beaconConfig.MaxRequestDataColumnSidecars
|
||||
cfg := params.BeaconConfig()
|
||||
maxRequestDataColumnSidecars := cfg.MaxRequestDataColumnSidecars
|
||||
remotePeer := stream.Conn().RemotePeer()
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
@@ -102,7 +102,7 @@ func (s *Service) dataColumnSidecarsByRangeRPCHandler(ctx context.Context, msg i
|
||||
|
||||
// Once the quota is reached, we're done serving the request.
|
||||
if maxRequestDataColumnSidecars == 0 {
|
||||
log.WithField("initialQuota", beaconConfig.MaxRequestDataColumnSidecars).Trace("Reached quota for data column sidecars by range request")
|
||||
log.WithField("initialQuota", cfg.MaxRequestDataColumnSidecars).Trace("Reached quota for data column sidecars by range request")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,9 +31,9 @@ import (
|
||||
|
||||
func TestDataColumnSidecarsByRangeRPCHandler(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctx := context.Background()
|
||||
t.Run("wrong message type", func(t *testing.T) {
|
||||
|
||||
@@ -163,9 +163,9 @@ func dataColumnsRPCMinValidSlot(currentSlot primitives.Slot) (primitives.Slot, e
|
||||
return primitives.Slot(math.MaxUint64), nil
|
||||
}
|
||||
|
||||
beaconConfig := params.BeaconConfig()
|
||||
minReqEpochs := beaconConfig.MinEpochsForDataColumnSidecarsRequest
|
||||
minStartEpoch := beaconConfig.FuluForkEpoch
|
||||
cfg := params.BeaconConfig()
|
||||
minReqEpochs := cfg.MinEpochsForDataColumnSidecarsRequest
|
||||
minStartEpoch := cfg.FuluForkEpoch
|
||||
|
||||
currEpoch := slots.ToEpoch(currentSlot)
|
||||
if currEpoch > minReqEpochs && currEpoch-minReqEpochs > minStartEpoch {
|
||||
|
||||
@@ -28,9 +28,9 @@ import (
|
||||
|
||||
func TestDataColumnSidecarsByRootRPCHandler(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
@@ -43,9 +43,9 @@ func TestDataColumnSidecarsByRootRPCHandler(t *testing.T) {
|
||||
|
||||
t.Run("invalid request", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.MaxRequestDataColumnSidecars = 1
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.MaxRequestDataColumnSidecars = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
service := &Service{cfg: &config{p2p: localP2P}}
|
||||
@@ -96,9 +96,9 @@ func TestDataColumnSidecarsByRootRPCHandler(t *testing.T) {
|
||||
}()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.FuluForkEpoch = 1
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.FuluForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
|
||||
@@ -465,8 +465,8 @@ func SendDataColumnSidecarsByRangeRequest(
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfColumns := beaconConfig.NumberOfColumns
|
||||
cfg := params.BeaconConfig()
|
||||
numberOfColumns := cfg.NumberOfColumns
|
||||
maxRequestDataColumnSidecars := params.BeaconConfig().MaxRequestDataColumnSidecars
|
||||
|
||||
// Check if we do not request too many sidecars.
|
||||
|
||||
@@ -889,9 +889,9 @@ func TestErrInvalidFetchedDataDistinction(t *testing.T) {
|
||||
|
||||
func TestSendDataColumnSidecarsByRangeRequest(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
@@ -923,9 +923,9 @@ func TestSendDataColumnSidecarsByRangeRequest(t *testing.T) {
|
||||
|
||||
t.Run("too many columns in request", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.MaxRequestDataColumnSidecars = 0
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.MaxRequestDataColumnSidecars = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
request := ðpb.DataColumnSidecarsByRangeRequest{Count: 1, Columns: []uint64{1, 2, 3}}
|
||||
_, err := SendDataColumnSidecarsByRangeRequest(DataColumnSidecarsParams{Ctx: t.Context()}, "", request)
|
||||
@@ -1193,9 +1193,9 @@ func TestIsSidecarIndexRequested(t *testing.T) {
|
||||
|
||||
func TestSendDataColumnSidecarsByRootRequest(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
@@ -1223,9 +1223,9 @@ func TestSendDataColumnSidecarsByRootRequest(t *testing.T) {
|
||||
|
||||
t.Run("too many columns in request", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.MaxRequestDataColumnSidecars = 4
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.MaxRequestDataColumnSidecars = 4
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
request := p2ptypes.DataColumnsByRootIdentifiers{
|
||||
{Columns: []uint64{1, 2, 3}},
|
||||
|
||||
@@ -445,7 +445,7 @@ func TestStatusRPCRequest_RequestSent(t *testing.T) {
|
||||
custodyGroupCount = uint64(4)
|
||||
)
|
||||
|
||||
beaconConfig := params.BeaconConfig()
|
||||
cfg := params.BeaconConfig()
|
||||
ctx := t.Context()
|
||||
|
||||
testCases := []struct {
|
||||
@@ -456,7 +456,7 @@ func TestStatusRPCRequest_RequestSent(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "before fulu",
|
||||
fuluForkEpoch: beaconConfig.FarFutureEpoch,
|
||||
fuluForkEpoch: cfg.FarFutureEpoch,
|
||||
topic: "/eth2/beacon_chain/req/status/1/ssz_snappy",
|
||||
streamHandler: func(service *Service, stream network.Stream, genesisState beaconState.BeaconState, beaconRoot, headRoot, finalizedRoot []byte) {
|
||||
out := ðpb.Status{}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
blockfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/block"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
@@ -389,7 +390,7 @@ func (s *Service) waitForChainStart() {
|
||||
|
||||
// Wait for chainstart in separate routine.
|
||||
if startTime.After(prysmTime.Now()) {
|
||||
time.Sleep(prysmTime.Until(startTime))
|
||||
helpers.Sleep(s.ctx, prysmTime.Until(startTime))
|
||||
}
|
||||
log.WithField("startTime", startTime).Debug("Chain started in sync service")
|
||||
s.markForChainStart()
|
||||
|
||||
@@ -695,10 +695,10 @@ func (s *Service) dataColumnSubnetIndices(primitives.Slot) map[uint64]bool {
|
||||
// the validators custody requirement, and whether the node is subscribed to all data subnets.
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#custody-sampling
|
||||
func (s *Service) samplingSize() (uint64, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
if flags.Get().SubscribeAllDataSubnets {
|
||||
return beaconConfig.DataColumnSidecarSubnetCount, nil
|
||||
return cfg.DataColumnSidecarSubnetCount, nil
|
||||
}
|
||||
|
||||
// Compute the validators custody requirement.
|
||||
@@ -712,7 +712,7 @@ func (s *Service) samplingSize() (uint64, error) {
|
||||
return 0, errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
return max(beaconConfig.SamplesPerSlot, validatorsCustodyRequirement, custodyGroupCount), nil
|
||||
return max(cfg.SamplesPerSlot, validatorsCustodyRequirement, custodyGroupCount), nil
|
||||
}
|
||||
|
||||
func (s *Service) persistentAndAggregatorSubnetIndices(currentSlot primitives.Slot) map[uint64]bool {
|
||||
|
||||
@@ -209,7 +209,7 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
time.Sleep(delay)
|
||||
helpers.Sleep(ctx, delay)
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/testing/endtoend/components",
|
||||
visibility = ["//testing/endtoend:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//cmd:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
|
||||
@@ -15,6 +15,7 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/testing/endtoend/components/eth1",
|
||||
visibility = ["//testing/endtoend:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//contracts/deposit:go_default_library",
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/endtoend/params"
|
||||
e2etypes "github.com/OffchainLabs/prysm/v6/testing/endtoend/types"
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
@@ -62,7 +63,7 @@ func WaitForBlocks(ctx context.Context, web3 *ethclient.Client, key *keystore.Ke
|
||||
return err
|
||||
}
|
||||
nonce++
|
||||
time.Sleep(timeGapPerMiningTX)
|
||||
helpers.Sleep(ctx, timeGapPerMiningTX)
|
||||
block, err = web3.BlockByNumber(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
coreHelpers "github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/endtoend/helpers"
|
||||
e2e "github.com/OffchainLabs/prysm/v6/testing/endtoend/params"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/endtoend/types"
|
||||
@@ -124,7 +125,7 @@ func (ts *TracingSink) initializeSink(ctx context.Context) {
|
||||
default:
|
||||
// Sleep for 100ms and do nothing while waiting for
|
||||
// cancellation.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
coreHelpers.Sleep(ctx, 100*time.Millisecond)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/io/file"
|
||||
@@ -153,7 +154,7 @@ func (w *Web3RemoteSigner) monitorStart() {
|
||||
close(w.started)
|
||||
return
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
helpers.Sleep(w.ctx, time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/slashings"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/slasher"
|
||||
@@ -146,7 +147,7 @@ func (s *Simulator) Start() {
|
||||
|
||||
// Wait some time and then send a "chain started" event over a notifier
|
||||
// for slasher to pick up a genesis time.
|
||||
time.Sleep(time.Second)
|
||||
helpers.Sleep(s.ctx, time.Second)
|
||||
s.genesisTime = time.Now()
|
||||
var vr [32]byte
|
||||
if err := s.srvConfig.ClockSetter.SetClock(startup.NewClock(s.genesisTime, vr)); err != nil {
|
||||
|
||||
@@ -32,6 +32,7 @@ go_library(
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//cache/lru:go_default_library",
|
||||
"//cmd:go_default_library",
|
||||
|
||||
@@ -12,6 +12,7 @@ go_library(
|
||||
deps = [
|
||||
"//api/client:go_default_library",
|
||||
"//api/client/event:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//config/proposer:go_default_library",
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
api "github.com/OffchainLabs/prysm/v6/api/client"
|
||||
"github.com/OffchainLabs/prysm/v6/api/client/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/config/proposer"
|
||||
@@ -263,7 +264,7 @@ func (*FakeValidator) HasProposerSettings() bool {
|
||||
|
||||
// PushProposerSettings for mocking
|
||||
func (fv *FakeValidator) PushProposerSettings(ctx context.Context, _ primitives.Slot, _ bool) error {
|
||||
time.Sleep(fv.ProposerSettingWait)
|
||||
helpers.Sleep(ctx, fv.ProposerSettingWait)
|
||||
if errors.Is(ctx.Err(), context.DeadlineExceeded) {
|
||||
log.Error("Deadline exceeded")
|
||||
// can't return error as it will trigger a log.fatal
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -64,7 +65,7 @@ func (v *validator) retryWaitForActivation(ctx context.Context, span octrace.Spa
|
||||
attempts := activationAttempts(ctx)
|
||||
log.WithError(err).WithField("attempts", attempts).Error(message)
|
||||
// Reconnection attempt backoff, up to 60s.
|
||||
time.Sleep(time.Second * time.Duration(min(uint64(attempts), 60)))
|
||||
helpers.Sleep(ctx, time.Second*time.Duration(min(uint64(attempts), 60)))
|
||||
// TODO: refactor this to use the health tracker instead for reattempt
|
||||
return v.WaitForActivation(incrementRetries(ctx))
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
@@ -177,7 +178,7 @@ func (km *Keymanager) refreshRemoteKeysFromFileChangesWithRetry(ctx context.Cont
|
||||
km.retriesRemaining--
|
||||
log.WithError(err).Debug("Error occurred on key refresh")
|
||||
log.WithFields(logrus.Fields{"path": km.keyFilePath, "retriesRemaining": km.retriesRemaining, "retryDelay": retryDelay}).Warnf("Could not refresh keys. Retrying...")
|
||||
time.Sleep(retryDelay)
|
||||
helpers.Sleep(ctx, retryDelay)
|
||||
return km.refreshRemoteKeysFromFileChangesWithRetry(ctx, retryDelay)
|
||||
}
|
||||
return nil
|
||||
|
||||
Reference in New Issue
Block a user