mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-02-03 17:45:09 -05:00
Compare commits
12 Commits
fix-loggin
...
deflake-ev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a1c1cc9327 | ||
|
|
013dc17f4c | ||
|
|
7450c117e2 | ||
|
|
80b60d06d5 | ||
|
|
e694571474 | ||
|
|
ca1f386cff | ||
|
|
6bbc9de081 | ||
|
|
e8da68bb0f | ||
|
|
7e33e96605 | ||
|
|
2c4a9bc4ac | ||
|
|
4c32b6a89e | ||
|
|
c69ffbec62 |
@@ -85,7 +85,6 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//io/logs:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/io/logs"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v7/time"
|
||||
@@ -90,39 +89,34 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
parentRoot := block.ParentRoot()
|
||||
|
||||
lessFields := logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
}
|
||||
moreFields := logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"justifiedEpoch": justified.Epoch,
|
||||
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
|
||||
"version": version.String(block.Version()),
|
||||
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
|
||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
|
||||
"dataAvailabilityWaitedTime": daWaitedTime,
|
||||
}
|
||||
|
||||
level := logs.PackageVerbosity("beacon-chain/blockchain")
|
||||
level := log.Logger.GetLevel()
|
||||
if level >= logrus.DebugLevel {
|
||||
log.WithFields(moreFields).Info("Synced new block")
|
||||
parentRoot := block.ParentRoot()
|
||||
lf := logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"justifiedEpoch": justified.Epoch,
|
||||
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
|
||||
"version": version.String(block.Version()),
|
||||
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
|
||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
|
||||
"dataAvailabilityWaitedTime": daWaitedTime,
|
||||
}
|
||||
log.WithFields(lf).Debug("Synced new block")
|
||||
} else {
|
||||
log.WithFields(lessFields).WithField(logs.LogTargetField, logs.LogTargetUser).Info("Synced new block")
|
||||
log.WithFields(moreFields).WithField(logs.LogTargetField, logs.LogTargetEphemeral).Info("Synced new block")
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
}).Info("Synced new block")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- Fixed the logging issue described in #16314.
|
||||
3
changelog/james-prysm_deflake-evaluator.md
Normal file
3
changelog/james-prysm_deflake-evaluator.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- adding some short retries for some end to end evaluators in an attempt to deflake tests.
|
||||
@@ -188,8 +188,8 @@ func before(ctx *cli.Context) error {
|
||||
return errors.Wrap(err, "failed to parse log vmodule")
|
||||
}
|
||||
|
||||
// set the global logging level and data
|
||||
logs.SetLoggingLevelAndData(verbosityLevel, vmodule, maxLevel, ctx.Bool(flags.DisableEphemeralLogFile.Name))
|
||||
// set the global logging level to allow for the highest verbosity requested
|
||||
logs.SetLoggingLevel(max(verbosityLevel, maxLevel))
|
||||
|
||||
format := ctx.String(cmd.LogFormat.Name)
|
||||
switch format {
|
||||
@@ -210,7 +210,6 @@ func before(ctx *cli.Context) error {
|
||||
Formatter: formatter,
|
||||
Writer: os.Stderr,
|
||||
AllowedLevels: logrus.AllLevels[:max(verbosityLevel, maxLevel)+1],
|
||||
Identifier: logs.LogTargetUser,
|
||||
})
|
||||
case "fluentd":
|
||||
f := joonix.NewFormatter()
|
||||
|
||||
@@ -164,8 +164,8 @@ func main() {
|
||||
return errors.Wrap(err, "failed to parse log vmodule")
|
||||
}
|
||||
|
||||
// set the global logging level and data
|
||||
logs.SetLoggingLevelAndData(verbosityLevel, vmodule, maxLevel, ctx.Bool(flags.DisableEphemeralLogFile.Name))
|
||||
// set the global logging level to allow for the highest verbosity requested
|
||||
logs.SetLoggingLevel(max(maxLevel, verbosityLevel))
|
||||
|
||||
logFileName := ctx.String(cmd.LogFileName.Name)
|
||||
|
||||
@@ -188,7 +188,6 @@ func main() {
|
||||
Formatter: formatter,
|
||||
Writer: os.Stderr,
|
||||
AllowedLevels: logrus.AllLevels[:max(verbosityLevel, maxLevel)+1],
|
||||
Identifier: logs.LogTargetUser,
|
||||
})
|
||||
case "fluentd":
|
||||
f := joonix.NewFormatter()
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package logs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -11,7 +10,6 @@ type WriterHook struct {
|
||||
AllowedLevels []logrus.Level
|
||||
Writer io.Writer
|
||||
Formatter logrus.Formatter
|
||||
Identifier string
|
||||
}
|
||||
|
||||
func (hook *WriterHook) Levels() []logrus.Level {
|
||||
@@ -22,11 +20,6 @@ func (hook *WriterHook) Levels() []logrus.Level {
|
||||
}
|
||||
|
||||
func (hook *WriterHook) Fire(entry *logrus.Entry) error {
|
||||
val, ok := entry.Data[LogTargetField]
|
||||
if ok && fmt.Sprint(val) != hook.Identifier {
|
||||
return nil
|
||||
}
|
||||
|
||||
line, err := hook.Formatter.Format(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -17,43 +17,11 @@ import (
|
||||
"gopkg.in/natefinch/lumberjack.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
userVerbosity = logrus.InfoLevel
|
||||
vmodule = make(map[string]logrus.Level)
|
||||
)
|
||||
var ephemeralLogFileVerbosity = logrus.DebugLevel
|
||||
|
||||
const (
|
||||
ephemeralLogFileVerbosity = logrus.DebugLevel
|
||||
LogTargetField = "log_target"
|
||||
LogTargetEphemeral = "ephemeral"
|
||||
LogTargetUser = "user"
|
||||
)
|
||||
|
||||
// SetLoggingLevelAndData sets the base logging level for logrus.
|
||||
func SetLoggingLevelAndData(baseVerbosity logrus.Level, vmoduleMap map[string]logrus.Level, maxVmoduleLevel logrus.Level, disableEphemeral bool) {
|
||||
userVerbosity = baseVerbosity
|
||||
vmodule = vmoduleMap
|
||||
|
||||
globalLevel := max(baseVerbosity, maxVmoduleLevel)
|
||||
if !disableEphemeral {
|
||||
globalLevel = max(globalLevel, ephemeralLogFileVerbosity)
|
||||
}
|
||||
logrus.SetLevel(globalLevel)
|
||||
}
|
||||
|
||||
// PackageVerbosity returns the verbosity of a given package.
|
||||
func PackageVerbosity(packagePath string) logrus.Level {
|
||||
bestLen := 0
|
||||
bestLevel := userVerbosity
|
||||
for k, v := range vmodule {
|
||||
if k == packagePath || strings.HasPrefix(packagePath, k+"/") {
|
||||
if len(k) > bestLen {
|
||||
bestLen = len(k)
|
||||
bestLevel = v
|
||||
}
|
||||
}
|
||||
}
|
||||
return bestLevel
|
||||
// SetLoggingLevel sets the base logging level for logrus.
|
||||
func SetLoggingLevel(lvl logrus.Level) {
|
||||
logrus.SetLevel(max(lvl, ephemeralLogFileVerbosity))
|
||||
}
|
||||
|
||||
func addLogWriter(w io.Writer) {
|
||||
@@ -100,7 +68,6 @@ func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Le
|
||||
Formatter: formatter,
|
||||
Writer: f,
|
||||
AllowedLevels: logrus.AllLevels[:max(lvl, maxVmoduleLevel)+1],
|
||||
Identifier: LogTargetUser,
|
||||
})
|
||||
|
||||
logrus.Debug("File logging initialized")
|
||||
@@ -134,7 +101,6 @@ func ConfigureEphemeralLogFile(datadirPath string, app string) error {
|
||||
Formatter: formatter,
|
||||
Writer: debugWriter,
|
||||
AllowedLevels: logrus.AllLevels[:ephemeralLogFileVerbosity+1],
|
||||
Identifier: LogTargetEphemeral,
|
||||
})
|
||||
|
||||
logrus.WithField("path", logFilePath).Debug("Ephemeral log file initialized")
|
||||
|
||||
@@ -334,7 +334,7 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *logrus.Entry, keys
|
||||
_, err = fmt.Fprintf(b, "%s %s%s "+messageFormat, colorScheme.TimestampColor(timestamp), level, prefix, message)
|
||||
}
|
||||
for _, k := range keys {
|
||||
if k != "package" && k != "log_target" {
|
||||
if k != "package" {
|
||||
v := entry.Data[k]
|
||||
|
||||
format := "%+v"
|
||||
|
||||
@@ -40,6 +40,7 @@ type TransactionGenerator struct {
|
||||
cancel context.CancelFunc
|
||||
paused bool
|
||||
useLargeBlobs bool // Use large blob transactions (6 blobs per tx) for BPO testing
|
||||
blobTxCount int // Number of blob transactions per slot (0 means default of 5)
|
||||
}
|
||||
|
||||
func (t *TransactionGenerator) UnderlyingProcess() *os.Process {
|
||||
@@ -48,8 +49,8 @@ func (t *TransactionGenerator) UnderlyingProcess() *os.Process {
|
||||
return &os.Process{}
|
||||
}
|
||||
|
||||
func NewTransactionGenerator(keystore string, seed int64, useLargeBlobs bool) *TransactionGenerator {
|
||||
return &TransactionGenerator{keystore: keystore, seed: seed, useLargeBlobs: useLargeBlobs}
|
||||
func NewTransactionGenerator(keystore string, seed int64, useLargeBlobs bool, blobTxCount int) *TransactionGenerator {
|
||||
return &TransactionGenerator{keystore: keystore, seed: seed, useLargeBlobs: useLargeBlobs, blobTxCount: blobTxCount}
|
||||
}
|
||||
|
||||
func (t *TransactionGenerator) Start(ctx context.Context) error {
|
||||
@@ -114,7 +115,7 @@ func (t *TransactionGenerator) Start(ctx context.Context) error {
|
||||
continue
|
||||
}
|
||||
backend := ethclient.NewClient(client)
|
||||
err = SendTransaction(client, mineKey.PrivateKey, gasPrice, mineKey.Address.String(), txCount, backend, false, t.useLargeBlobs)
|
||||
err = SendTransaction(client, mineKey.PrivateKey, gasPrice, mineKey.Address.String(), txCount, backend, false, t.useLargeBlobs, t.blobTxCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -128,7 +129,7 @@ func (s *TransactionGenerator) Started() <-chan struct{} {
|
||||
return s.started
|
||||
}
|
||||
|
||||
func SendTransaction(client *rpc.Client, key *ecdsa.PrivateKey, gasPrice *big.Int, addr string, txCount uint64, backend *ethclient.Client, al bool, useLargeBlobs bool) error {
|
||||
func SendTransaction(client *rpc.Client, key *ecdsa.PrivateKey, gasPrice *big.Int, addr string, txCount uint64, backend *ethclient.Client, al bool, useLargeBlobs bool, blobTxCount int) error {
|
||||
sender := common.HexToAddress(addr)
|
||||
nonce, err := backend.PendingNonceAt(context.Background(), fundedAccount.Address)
|
||||
if err != nil {
|
||||
@@ -150,14 +151,19 @@ func SendTransaction(client *rpc.Client, key *ecdsa.PrivateKey, gasPrice *big.In
|
||||
clock := startup.NewClock(e2e.TestParams.CLGenesisTime, [32]byte{})
|
||||
isPostFulu := clock.CurrentEpoch() >= params.BeaconConfig().FuluForkEpoch
|
||||
|
||||
// Default to 5 blob transactions per slot if not configured.
|
||||
numBlobTxs := blobTxCount
|
||||
if numBlobTxs <= 0 {
|
||||
numBlobTxs = 5
|
||||
}
|
||||
|
||||
g, _ := errgroup.WithContext(context.Background())
|
||||
txs := make([]*types.Transaction, 10)
|
||||
txs := make([]*types.Transaction, numBlobTxs)
|
||||
|
||||
// Send blob transactions - use different versions pre/post Fulu
|
||||
if isPostFulu {
|
||||
logrus.Info("Sending blob transactions with cell proofs")
|
||||
// Reduced from 10 to 5 to reduce load and prevent builder/EL timeouts
|
||||
for index := range uint64(5) {
|
||||
for index := range uint64(numBlobTxs) {
|
||||
|
||||
g.Go(func() error {
|
||||
tx, err := RandomBlobCellTx(client, fundedAccount.Address, nonce+index, gasPrice, chainid, al, useLargeBlobs)
|
||||
@@ -176,8 +182,7 @@ func SendTransaction(client *rpc.Client, key *ecdsa.PrivateKey, gasPrice *big.In
|
||||
}
|
||||
} else {
|
||||
logrus.Info("Sending blob transactions with sidecars")
|
||||
// Reduced from 10 to 5 to reduce load and prevent builder/EL timeouts
|
||||
for index := range uint64(5) {
|
||||
for index := range uint64(numBlobTxs) {
|
||||
|
||||
g.Go(func() error {
|
||||
tx, err := RandomBlobTx(client, fundedAccount.Address, nonce+index, gasPrice, chainid, al, useLargeBlobs)
|
||||
|
||||
@@ -252,7 +252,7 @@ func (r *testRunner) testDepositsAndTx(ctx context.Context, g *errgroup.Group,
|
||||
}
|
||||
|
||||
func (r *testRunner) testTxGeneration(ctx context.Context, g *errgroup.Group, keystorePath string, requiredNodes []e2etypes.ComponentRunner) {
|
||||
txGenerator := eth1.NewTransactionGenerator(keystorePath, r.config.Seed, r.config.UseLargeBlobs)
|
||||
txGenerator := eth1.NewTransactionGenerator(keystorePath, r.config.Seed, r.config.UseLargeBlobs, r.config.BlobTxCount)
|
||||
r.comHandler.txGen = txGenerator
|
||||
g.Go(func() error {
|
||||
if err := helpers.ComponentsStarted(ctx, requiredNodes); err != nil {
|
||||
|
||||
@@ -156,19 +156,9 @@ func waitForMidEpoch(conn *grpc.ClientConn) error {
|
||||
}
|
||||
}
|
||||
|
||||
func allNodesHaveSameHead(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn) error {
|
||||
// Wait until we're at least halfway into the epoch to avoid race conditions
|
||||
// at epoch boundaries where nodes may report different epochs.
|
||||
if err := waitForMidEpoch(conns[0]); err != nil {
|
||||
return errors.Wrap(err, "failed waiting for mid-epoch")
|
||||
}
|
||||
|
||||
headEpochs := make([]primitives.Epoch, len(conns))
|
||||
headBlockRoots := make([][]byte, len(conns))
|
||||
justifiedRoots := make([][]byte, len(conns))
|
||||
prevJustifiedRoots := make([][]byte, len(conns))
|
||||
finalizedRoots := make([][]byte, len(conns))
|
||||
chainHeads := make([]*eth.ChainHead, len(conns))
|
||||
// getHeadEpochs fetches the head epoch from all beacon nodes concurrently.
|
||||
func getHeadEpochs(conns []*grpc.ClientConn) ([]primitives.Epoch, error) {
|
||||
epochs := make([]primitives.Epoch, len(conns))
|
||||
g, _ := errgroup.WithContext(context.Background())
|
||||
|
||||
for i, conn := range conns {
|
||||
@@ -180,63 +170,145 @@ func allNodesHaveSameHead(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientCo
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "connection number=%d", conIdx)
|
||||
}
|
||||
headEpochs[conIdx] = chainHead.HeadEpoch
|
||||
headBlockRoots[conIdx] = chainHead.HeadBlockRoot
|
||||
justifiedRoots[conIdx] = chainHead.JustifiedBlockRoot
|
||||
prevJustifiedRoots[conIdx] = chainHead.PreviousJustifiedBlockRoot
|
||||
finalizedRoots[conIdx] = chainHead.FinalizedBlockRoot
|
||||
chainHeads[conIdx] = chainHead
|
||||
epochs[conIdx] = chainHead.HeadEpoch
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err := g.Wait(); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := range conns {
|
||||
if headEpochs[0] != headEpochs[i] {
|
||||
return fmt.Errorf(
|
||||
"received conflicting head epochs on node %d, expected %d, received %d",
|
||||
i,
|
||||
headEpochs[0],
|
||||
headEpochs[i],
|
||||
)
|
||||
}
|
||||
if !bytes.Equal(headBlockRoots[0], headBlockRoots[i]) {
|
||||
return fmt.Errorf(
|
||||
"received conflicting head block roots on node %d, expected %#x, received %#x",
|
||||
i,
|
||||
headBlockRoots[0],
|
||||
headBlockRoots[i],
|
||||
)
|
||||
}
|
||||
if !bytes.Equal(justifiedRoots[0], justifiedRoots[i]) {
|
||||
return fmt.Errorf(
|
||||
"received conflicting justified block roots on node %d, expected %#x, received %#x: %s and %s",
|
||||
i,
|
||||
justifiedRoots[0],
|
||||
justifiedRoots[i],
|
||||
chainHeads[0].String(),
|
||||
chainHeads[i].String(),
|
||||
)
|
||||
}
|
||||
if !bytes.Equal(prevJustifiedRoots[0], prevJustifiedRoots[i]) {
|
||||
return fmt.Errorf(
|
||||
"received conflicting previous justified block roots on node %d, expected %#x, received %#x",
|
||||
i,
|
||||
prevJustifiedRoots[0],
|
||||
prevJustifiedRoots[i],
|
||||
)
|
||||
}
|
||||
if !bytes.Equal(finalizedRoots[0], finalizedRoots[i]) {
|
||||
return fmt.Errorf(
|
||||
"received conflicting finalized epoch roots on node %d, expected %#x, received %#x",
|
||||
i,
|
||||
finalizedRoots[0],
|
||||
finalizedRoots[i],
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return epochs, nil
|
||||
}
|
||||
|
||||
func allNodesHaveSameHead(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn) error {
|
||||
// Wait until we're at least halfway into the epoch to avoid race conditions
|
||||
// at epoch boundaries where nodes may report different epochs.
|
||||
if err := waitForMidEpoch(conns[0]); err != nil {
|
||||
return errors.Wrap(err, "failed waiting for mid-epoch")
|
||||
}
|
||||
|
||||
// First, wait for all nodes to reach the same epoch. Sync nodes may be
|
||||
// behind and need time to catch up. We poll every 2 seconds with a
|
||||
// 60 second timeout - this adapts to actual sync progress rather than
|
||||
// using fixed delays.
|
||||
const epochTimeout = 60 * time.Second
|
||||
const epochPollInterval = 2 * time.Second
|
||||
epochDeadline := time.Now().Add(epochTimeout)
|
||||
|
||||
for time.Now().Before(epochDeadline) {
|
||||
epochs, err := getHeadEpochs(conns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
allSame := true
|
||||
for i := 1; i < len(epochs); i++ {
|
||||
if epochs[0] != epochs[i] {
|
||||
allSame = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if allSame {
|
||||
break
|
||||
}
|
||||
time.Sleep(epochPollInterval)
|
||||
}
|
||||
|
||||
// Now that epochs match (or timeout reached), do detailed head comparison
|
||||
// with a few retries to handle block propagation delays.
|
||||
const maxRetries = 5
|
||||
const retryDelay = 1 * time.Second
|
||||
var lastErr error
|
||||
|
||||
for attempt := range maxRetries {
|
||||
if attempt > 0 {
|
||||
time.Sleep(retryDelay)
|
||||
}
|
||||
|
||||
headEpochs := make([]primitives.Epoch, len(conns))
|
||||
headBlockRoots := make([][]byte, len(conns))
|
||||
justifiedRoots := make([][]byte, len(conns))
|
||||
prevJustifiedRoots := make([][]byte, len(conns))
|
||||
finalizedRoots := make([][]byte, len(conns))
|
||||
chainHeads := make([]*eth.ChainHead, len(conns))
|
||||
g, _ := errgroup.WithContext(context.Background())
|
||||
|
||||
for i, conn := range conns {
|
||||
conIdx := i
|
||||
currConn := conn
|
||||
g.Go(func() error {
|
||||
beaconClient := eth.NewBeaconChainClient(currConn)
|
||||
chainHead, err := beaconClient.GetChainHead(context.Background(), &emptypb.Empty{})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "connection number=%d", conIdx)
|
||||
}
|
||||
headEpochs[conIdx] = chainHead.HeadEpoch
|
||||
headBlockRoots[conIdx] = chainHead.HeadBlockRoot
|
||||
justifiedRoots[conIdx] = chainHead.JustifiedBlockRoot
|
||||
prevJustifiedRoots[conIdx] = chainHead.PreviousJustifiedBlockRoot
|
||||
finalizedRoots[conIdx] = chainHead.FinalizedBlockRoot
|
||||
chainHeads[conIdx] = chainHead
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err := g.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lastErr = nil
|
||||
for i := range conns {
|
||||
if headEpochs[0] != headEpochs[i] {
|
||||
lastErr = fmt.Errorf(
|
||||
"received conflicting head epochs on node %d, expected %d, received %d",
|
||||
i,
|
||||
headEpochs[0],
|
||||
headEpochs[i],
|
||||
)
|
||||
break
|
||||
}
|
||||
if !bytes.Equal(headBlockRoots[0], headBlockRoots[i]) {
|
||||
lastErr = fmt.Errorf(
|
||||
"received conflicting head block roots on node %d, expected %#x, received %#x",
|
||||
i,
|
||||
headBlockRoots[0],
|
||||
headBlockRoots[i],
|
||||
)
|
||||
break
|
||||
}
|
||||
if !bytes.Equal(justifiedRoots[0], justifiedRoots[i]) {
|
||||
lastErr = fmt.Errorf(
|
||||
"received conflicting justified block roots on node %d, expected %#x, received %#x: %s and %s",
|
||||
i,
|
||||
justifiedRoots[0],
|
||||
justifiedRoots[i],
|
||||
chainHeads[0].String(),
|
||||
chainHeads[i].String(),
|
||||
)
|
||||
break
|
||||
}
|
||||
if !bytes.Equal(prevJustifiedRoots[0], prevJustifiedRoots[i]) {
|
||||
lastErr = fmt.Errorf(
|
||||
"received conflicting previous justified block roots on node %d, expected %#x, received %#x",
|
||||
i,
|
||||
prevJustifiedRoots[0],
|
||||
prevJustifiedRoots[i],
|
||||
)
|
||||
break
|
||||
}
|
||||
if !bytes.Equal(finalizedRoots[0], finalizedRoots[i]) {
|
||||
lastErr = fmt.Errorf(
|
||||
"received conflicting finalized epoch roots on node %d, expected %#x, received %#x",
|
||||
i,
|
||||
finalizedRoots[0],
|
||||
finalizedRoots[i],
|
||||
)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if lastErr == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return lastErr
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/altair"
|
||||
@@ -123,6 +124,25 @@ func validatorsAreActive(ec *types.EvaluationContext, conns ...*grpc.ClientConn)
|
||||
|
||||
// validatorsParticipating ensures the validators have an acceptable participation rate.
|
||||
func validatorsParticipating(_ *types.EvaluationContext, conns ...*grpc.ClientConn) error {
|
||||
// Retry up to 3 times with 2 second delays to handle timing flakes where
|
||||
// attestations haven't been fully processed yet due to block propagation delays.
|
||||
const maxRetries = 3
|
||||
const retryDelay = 2 * time.Second
|
||||
var lastErr error
|
||||
|
||||
for attempt := range maxRetries {
|
||||
if attempt > 0 {
|
||||
time.Sleep(retryDelay)
|
||||
}
|
||||
lastErr = checkValidatorsParticipating(conns)
|
||||
if lastErr == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return lastErr
|
||||
}
|
||||
|
||||
func checkValidatorsParticipating(conns []*grpc.ClientConn) error {
|
||||
conn := conns[0]
|
||||
client := ethpb.NewBeaconChainClient(conn)
|
||||
validatorRequest := ðpb.GetValidatorParticipationRequest{}
|
||||
@@ -234,6 +254,25 @@ func validatorsParticipating(_ *types.EvaluationContext, conns ...*grpc.ClientCo
|
||||
// validatorsSyncParticipation ensures the validators have an acceptable participation rate for
|
||||
// sync committee assignments.
|
||||
func validatorsSyncParticipation(_ *types.EvaluationContext, conns ...*grpc.ClientConn) error {
|
||||
// Retry up to 3 times with 2 second delays to handle timing flakes where
|
||||
// sync committee messages haven't fully propagated yet.
|
||||
const maxRetries = 3
|
||||
const retryDelay = 2 * time.Second
|
||||
var lastErr error
|
||||
|
||||
for attempt := range maxRetries {
|
||||
if attempt > 0 {
|
||||
time.Sleep(retryDelay)
|
||||
}
|
||||
lastErr = checkSyncParticipation(conns)
|
||||
if lastErr == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return lastErr
|
||||
}
|
||||
|
||||
func checkSyncParticipation(conns []*grpc.ClientConn) error {
|
||||
conn := conns[0]
|
||||
client := ethpb.NewNodeClient(conn)
|
||||
altairClient := ethpb.NewBeaconChainClient(conn)
|
||||
@@ -272,9 +311,9 @@ func validatorsSyncParticipation(_ *types.EvaluationContext, conns ...*grpc.Clie
|
||||
// Skip fork slot.
|
||||
continue
|
||||
}
|
||||
// Skip slots 1-2 at genesis - validators need time to ramp up after chain start
|
||||
// Skip early slots at genesis - validators need time to ramp up after chain start
|
||||
// due to doppelganger protection. This is a startup timing issue, not a fork transition issue.
|
||||
if b.Block().Slot() < 3 {
|
||||
if b.Block().Slot() < 5 {
|
||||
continue
|
||||
}
|
||||
expectedParticipation := expectedSyncParticipation
|
||||
@@ -289,6 +328,11 @@ func validatorsSyncParticipation(_ *types.EvaluationContext, conns ...*grpc.Clie
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Skip blocks with zero sync bits - these are typically empty/anomalous blocks
|
||||
// where the proposer didn't receive sync committee contributions in time.
|
||||
if syncAgg.SyncCommitteeBits.Count() == 0 {
|
||||
continue
|
||||
}
|
||||
threshold := uint64(float64(syncAgg.SyncCommitteeBits.Len()) * expectedParticipation)
|
||||
if syncAgg.SyncCommitteeBits.Count() < threshold {
|
||||
return errors.Errorf("In block of slot %d ,the aggregate bitvector with length of %d only got a count of %d", b.Block().Slot(), threshold, syncAgg.SyncCommitteeBits.Count())
|
||||
@@ -343,6 +387,11 @@ func validatorsSyncParticipation(_ *types.EvaluationContext, conns ...*grpc.Clie
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Skip blocks with zero sync bits - these are typically empty/anomalous blocks
|
||||
// where the proposer didn't receive sync committee contributions in time.
|
||||
if syncAgg.SyncCommitteeBits.Count() == 0 {
|
||||
continue
|
||||
}
|
||||
threshold := uint64(float64(syncAgg.SyncCommitteeBits.Len()) * expectedSyncParticipation)
|
||||
if syncAgg.SyncCommitteeBits.Count() < threshold {
|
||||
return errors.Errorf("In block of slot %d ,the aggregate bitvector with length of %d only got a count of %d", b.Block().Slot(), threshold, syncAgg.SyncCommitteeBits.Count())
|
||||
|
||||
@@ -9,11 +9,11 @@ import (
|
||||
)
|
||||
|
||||
func TestEndToEnd_MinimalConfig_WithBuilder(t *testing.T) {
|
||||
r := e2eMinimal(t, types.InitForkCfg(version.Bellatrix, version.Electra, params.E2ETestConfig()), types.WithCheckpointSync(), types.WithBuilder())
|
||||
r := e2eMinimal(t, types.InitForkCfg(version.Bellatrix, version.Electra, params.E2ETestConfig()), types.WithCheckpointSync(), types.WithBuilder(), types.WithBlobTxCount(2))
|
||||
r.run()
|
||||
}
|
||||
|
||||
func TestEndToEnd_MinimalConfig_WithBuilder_ValidatorRESTApi(t *testing.T) {
|
||||
r := e2eMinimal(t, types.InitForkCfg(version.Bellatrix, version.Electra, params.E2ETestConfig()), types.WithCheckpointSync(), types.WithBuilder(), types.WithValidatorRESTApi())
|
||||
r := e2eMinimal(t, types.InitForkCfg(version.Bellatrix, version.Electra, params.E2ETestConfig()), types.WithCheckpointSync(), types.WithBuilder(), types.WithValidatorRESTApi(), types.WithBlobTxCount(2))
|
||||
r.run()
|
||||
}
|
||||
|
||||
@@ -68,6 +68,14 @@ func WithLargeBlobs() E2EConfigOpt {
|
||||
}
|
||||
}
|
||||
|
||||
// WithBlobTxCount sets the number of blob transactions sent per slot.
|
||||
// Default is 5 when not specified.
|
||||
func WithBlobTxCount(n int) E2EConfigOpt {
|
||||
return func(cfg *E2EConfig) {
|
||||
cfg.BlobTxCount = n
|
||||
}
|
||||
}
|
||||
|
||||
func WithSSZOnly() E2EConfigOpt {
|
||||
return func(cfg *E2EConfig) {
|
||||
if err := os.Setenv(params.EnvNameOverrideAccept, api.OctetStreamMediaType); err != nil {
|
||||
@@ -108,6 +116,7 @@ type E2EConfig struct {
|
||||
UseBeaconRestApi bool
|
||||
UseBuilder bool
|
||||
UseLargeBlobs bool // Use large blob transactions (6 blobs per tx) for BPO testing
|
||||
BlobTxCount int // Number of blob transactions per slot (0 means default of 5)
|
||||
EpochsToRun uint64
|
||||
ExitEpoch primitives.Epoch // Custom epoch for voluntary exit submission (0 means use default)
|
||||
Seed int64
|
||||
|
||||
Reference in New Issue
Block a user