mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-21 03:08:14 -05:00
Compare commits
3 Commits
hdiff_star
...
longer-e2e
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0d9e4aaaab | ||
|
|
361cf6f617 | ||
|
|
055c6eb784 |
@@ -132,7 +132,7 @@ func TestGetSpec(t *testing.T) {
|
||||
config.MinSyncCommitteeParticipants = 71
|
||||
config.ProposerReorgCutoffBPS = primitives.BP(121)
|
||||
config.AttestationDueBPS = primitives.BP(122)
|
||||
config.AggregrateDueBPS = primitives.BP(123)
|
||||
config.AggregateDueBPS = primitives.BP(123)
|
||||
config.ContributionDueBPS = primitives.BP(124)
|
||||
config.TerminalBlockHash = common.HexToHash("TerminalBlockHash")
|
||||
config.TerminalBlockHashActivationEpoch = 72
|
||||
@@ -470,7 +470,7 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "121", v)
|
||||
case "ATTESTATION_DUE_BPS":
|
||||
assert.Equal(t, "122", v)
|
||||
case "AGGREGRATE_DUE_BPS":
|
||||
case "AGGREGATE_DUE_BPS":
|
||||
assert.Equal(t, "123", v)
|
||||
case "CONTRIBUTION_DUE_BPS":
|
||||
assert.Equal(t, "124", v)
|
||||
|
||||
2
changelog/eth2353_fix-aggregate-typo.md
Normal file
2
changelog/eth2353_fix-aggregate-typo.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Fixed
|
||||
- Fixed a typo: AggregrateDueBPS -> AggregateDueBPS.
|
||||
3
changelog/james-prysm_longer-e2e-sync-evaluator.md
Normal file
3
changelog/james-prysm_longer-e2e-sync-evaluator.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- adding a optimistic check for e2e evlauator on synced head, it may be slower post fulu to sync.
|
||||
@@ -88,7 +88,7 @@ type BeaconChainConfig struct {
|
||||
IntervalsPerSlot uint64 `yaml:"INTERVALS_PER_SLOT"` // IntervalsPerSlot defines the number of fork choice intervals in a slot defined in the fork choice spec.
|
||||
ProposerReorgCutoffBPS primitives.BP `yaml:"PROPOSER_REORG_CUTOFF_BPS" spec:"true"` // ProposerReorgCutoffBPS defines the proposer reorg deadline in basis points of the slot.
|
||||
AttestationDueBPS primitives.BP `yaml:"ATTESTATION_DUE_BPS" spec:"true"` // AttestationDueBPS defines the attestation due time in basis points of the slot.
|
||||
AggregrateDueBPS primitives.BP `yaml:"AGGREGRATE_DUE_BPS" spec:"true"` // AggregrateDueBPS defines the aggregate due time in basis points of the slot.
|
||||
AggregateDueBPS primitives.BP `yaml:"AGGREGATE_DUE_BPS" spec:"true"` // AggregateDueBPS defines the aggregate due time in basis points of the slot.
|
||||
SyncMessageDueBPS primitives.BP `yaml:"SYNC_MESSAGE_DUE_BPS" spec:"true"` // SyncMessageDueBPS defines the sync message due time in basis points of the slot.
|
||||
ContributionDueBPS primitives.BP `yaml:"CONTRIBUTION_DUE_BPS" spec:"true"` // ContributionDueBPS defines the contribution due time in basis points of the slot.
|
||||
|
||||
|
||||
@@ -243,7 +243,7 @@ func ConfigToYaml(cfg *BeaconChainConfig) []byte {
|
||||
fmt.Sprintf("MAX_BLOBS_PER_BLOCK: %d", cfg.DeprecatedMaxBlobsPerBlock),
|
||||
fmt.Sprintf("PROPOSER_REORG_CUTOFF_BPS: %d", cfg.ProposerReorgCutoffBPS),
|
||||
fmt.Sprintf("ATTESTATION_DUE_BPS: %d", cfg.AttestationDueBPS),
|
||||
fmt.Sprintf("AGGREGRATE_DUE_BPS: %d", cfg.AggregrateDueBPS),
|
||||
fmt.Sprintf("AGGREGATE_DUE_BPS: %d", cfg.AggregateDueBPS),
|
||||
fmt.Sprintf("SYNC_MESSAGE_DUE_BPS: %d", cfg.SyncMessageDueBPS),
|
||||
fmt.Sprintf("CONTRIBUTION_DUE_BPS: %d", cfg.ContributionDueBPS),
|
||||
}
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
// These are variables that we don't use in Prysm. (i.e. future hardfork, light client... etc)
|
||||
// IMPORTANT: Use one field per line and sort these alphabetically to reduce conflicts.
|
||||
var placeholderFields = []string{
|
||||
"AGGREGATE_DUE_BPS",
|
||||
"AGGREGATE_DUE_BPS_GLOAS",
|
||||
"ATTESTATION_DEADLINE",
|
||||
"ATTESTATION_DUE_BPS_GLOAS",
|
||||
@@ -99,7 +98,7 @@ func assertEqualConfigs(t *testing.T, name string, fields []string, expected, ac
|
||||
assert.Equal(t, expected.HysteresisDownwardMultiplier, actual.HysteresisDownwardMultiplier, "%s: HysteresisDownwardMultiplier", name)
|
||||
assert.Equal(t, expected.HysteresisUpwardMultiplier, actual.HysteresisUpwardMultiplier, "%s: HysteresisUpwardMultiplier", name)
|
||||
assert.Equal(t, expected.AttestationDueBPS, actual.AttestationDueBPS, "%s: AttestationDueBPS", name)
|
||||
assert.Equal(t, expected.AggregrateDueBPS, actual.AggregrateDueBPS, "%s: AggregrateDueBPS", name)
|
||||
assert.Equal(t, expected.AggregateDueBPS, actual.AggregateDueBPS, "%s: AggregateDueBPS", name)
|
||||
assert.Equal(t, expected.ContributionDueBPS, actual.ContributionDueBPS, "%s: ContributionDueBPS", name)
|
||||
assert.Equal(t, expected.ProposerReorgCutoffBPS, actual.ProposerReorgCutoffBPS, "%s: ProposerReorgCutoffBPS", name)
|
||||
assert.Equal(t, expected.SyncMessageDueBPS, actual.SyncMessageDueBPS, "%s: SyncMessageDueBPS", name)
|
||||
|
||||
@@ -123,7 +123,7 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
// Time-based protocol parameters.
|
||||
ProposerReorgCutoffBPS: primitives.BP(1667),
|
||||
AttestationDueBPS: primitives.BP(3333),
|
||||
AggregrateDueBPS: primitives.BP(6667),
|
||||
AggregateDueBPS: primitives.BP(6667),
|
||||
SyncMessageDueBPS: primitives.BP(3333),
|
||||
ContributionDueBPS: primitives.BP(6667),
|
||||
|
||||
|
||||
@@ -289,12 +289,47 @@ func (r *testRunner) waitForMatchingHead(ctx context.Context, timeout time.Durat
|
||||
return errors.Wrap(err, "unexpected error requesting head block root from 'ref' beacon node")
|
||||
}
|
||||
if bytesutil.ToBytes32(cResp.HeadBlockRoot) == bytesutil.ToBytes32(rResp.HeadBlockRoot) {
|
||||
return nil
|
||||
// Head matches, now wait for node to exit optimistic mode.
|
||||
// For Fulu, the execution client may take additional time to sync and verify payloads.
|
||||
// Give extra time (up to 2 minutes) for optimistic status to clear.
|
||||
return r.waitForNonOptimistic(ctx, 2*time.Minute, check)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// waitForNonOptimistic waits for a node to exit optimistic mode, with a bounded timeout.
|
||||
// If the timeout is reached, it logs a warning but does not fail - the evaluator will
|
||||
// handle optimistic nodes gracefully by skipping finalized/justified checks.
|
||||
func (r *testRunner) waitForNonOptimistic(ctx context.Context, timeout time.Duration, conn *grpc.ClientConn) error {
|
||||
start := time.Now()
|
||||
deadline := start.Add(timeout)
|
||||
checkClient := eth.NewBeaconChainClient(conn)
|
||||
|
||||
for time.Now().Before(deadline) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
cResp, err := checkClient.GetChainHead(ctx, &emptypb.Empty{})
|
||||
if err != nil {
|
||||
// If we can't get chain head, just continue - head already matched
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
if !cResp.OptimisticStatus {
|
||||
log.Infof("Node exited optimistic mode after %s", time.Since(start))
|
||||
return nil
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
// Timeout reached but node is still optimistic - this is OK, evaluator handles it
|
||||
log.Warnf("Node still in optimistic mode after %s, continuing anyway (evaluator will handle)", timeout)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *testRunner) testCheckpointSync(ctx context.Context, g *errgroup.Group, i int, conns []*grpc.ClientConn, bnAPI, enr, minerEnr string) error {
|
||||
matchTimeout := 3 * time.Minute
|
||||
ethNode := eth1.NewNode(i, minerEnr)
|
||||
|
||||
@@ -129,11 +129,12 @@ func finishedSyncing(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn) e
|
||||
}
|
||||
|
||||
func allNodesHaveSameHead(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn) error {
|
||||
headEpochs := make([]primitives.Epoch, len(conns))
|
||||
headSlots := make([]primitives.Slot, len(conns))
|
||||
justifiedRoots := make([][]byte, len(conns))
|
||||
prevJustifiedRoots := make([][]byte, len(conns))
|
||||
finalizedRoots := make([][]byte, len(conns))
|
||||
chainHeads := make([]*eth.ChainHead, len(conns))
|
||||
optimisticStatus := make([]bool, len(conns))
|
||||
g, _ := errgroup.WithContext(context.Background())
|
||||
|
||||
for i, conn := range conns {
|
||||
@@ -145,11 +146,12 @@ func allNodesHaveSameHead(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientCo
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "connection number=%d", conIdx)
|
||||
}
|
||||
headEpochs[conIdx] = chainHead.HeadEpoch
|
||||
headSlots[conIdx] = chainHead.HeadSlot
|
||||
justifiedRoots[conIdx] = chainHead.JustifiedBlockRoot
|
||||
prevJustifiedRoots[conIdx] = chainHead.PreviousJustifiedBlockRoot
|
||||
finalizedRoots[conIdx] = chainHead.FinalizedBlockRoot
|
||||
chainHeads[conIdx] = chainHead
|
||||
optimisticStatus[conIdx] = chainHead.OptimisticStatus
|
||||
return nil
|
||||
})
|
||||
}
|
||||
@@ -158,14 +160,26 @@ func allNodesHaveSameHead(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientCo
|
||||
}
|
||||
|
||||
for i := range conns {
|
||||
if headEpochs[0] != headEpochs[i] {
|
||||
// Allow head slots to differ by at most 2 slots to account for timing
|
||||
// differences when querying nodes and chain advancement during evaluation.
|
||||
slotDiff := headSlots[0] - headSlots[i]
|
||||
if headSlots[i] > headSlots[0] {
|
||||
slotDiff = headSlots[i] - headSlots[0]
|
||||
}
|
||||
if slotDiff > 2 {
|
||||
return fmt.Errorf(
|
||||
"received conflicting head epochs on node %d, expected %d, received %d",
|
||||
"received conflicting head slots on node %d, expected %d (±2), received %d",
|
||||
i,
|
||||
headEpochs[0],
|
||||
headEpochs[i],
|
||||
headSlots[0],
|
||||
headSlots[i],
|
||||
)
|
||||
}
|
||||
// Skip finalized/justified checks for nodes in optimistic mode.
|
||||
// Optimistic nodes haven't verified execution payloads yet, so their
|
||||
// finalized/justified state may lag behind fully verified nodes.
|
||||
if optimisticStatus[i] {
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(justifiedRoots[0], justifiedRoots[i]) {
|
||||
return fmt.Errorf(
|
||||
"received conflicting justified block roots on node %d, expected %#x, received %#x: %s and %s",
|
||||
|
||||
@@ -207,7 +207,7 @@ func (v *validator) signSlotWithSelectionProof(ctx context.Context, pubKey [fiel
|
||||
// such that any attestations from this slot have time to reach the beacon node
|
||||
// before creating the aggregated attestation.
|
||||
func (v *validator) waitToSlotTwoThirds(ctx context.Context, slot primitives.Slot) {
|
||||
v.waitUntilSlotComponent(ctx, slot, params.BeaconConfig().AggregrateDueBPS)
|
||||
v.waitUntilSlotComponent(ctx, slot, params.BeaconConfig().AggregateDueBPS)
|
||||
}
|
||||
|
||||
// This returns the signature of validator signing over aggregate and
|
||||
|
||||
@@ -257,7 +257,7 @@ func TestWaitForSlotTwoThird_WaitCorrectly(t *testing.T) {
|
||||
numOfSlots := primitives.Slot(4)
|
||||
slotDuration := params.BeaconConfig().SlotDuration()
|
||||
validator.genesisTime = currentTime.Add(-slotDuration * time.Duration(numOfSlots))
|
||||
timeToSleep := params.BeaconConfig().SlotComponentDuration(params.BeaconConfig().AggregrateDueBPS)
|
||||
timeToSleep := params.BeaconConfig().SlotComponentDuration(params.BeaconConfig().AggregateDueBPS)
|
||||
|
||||
twoThirdTime := currentTime.Add(timeToSleep)
|
||||
validator.waitToSlotTwoThirds(t.Context(), numOfSlots)
|
||||
|
||||
@@ -51,7 +51,7 @@ func (v *validator) slotComponentSpanName(component primitives.BP) string {
|
||||
switch component {
|
||||
case cfg.AttestationDueBPS:
|
||||
return "validator.waitAttestationWindow"
|
||||
case cfg.AggregrateDueBPS:
|
||||
case cfg.AggregateDueBPS:
|
||||
return "validator.waitAggregateWindow"
|
||||
case cfg.SyncMessageDueBPS:
|
||||
return "validator.waitSyncMessageWindow"
|
||||
|
||||
@@ -47,7 +47,7 @@ func TestSlotComponentSpanName(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "aggregate",
|
||||
component: cfg.AggregrateDueBPS,
|
||||
component: cfg.AggregateDueBPS,
|
||||
expected: "validator.waitAggregateWindow",
|
||||
},
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user