mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
4 Commits
3e7cd8c2f1
...
kzg-verif-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c4eeb043a4 | ||
|
|
1a6252ade4 | ||
|
|
27c009e7ff | ||
|
|
ffad861e2c |
@@ -17,6 +17,7 @@ go_library(
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto/kzg4844:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
package kzg
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
ckzg4844 "github.com/ethereum/c-kzg-4844/v2/bindings/go"
|
||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||
@@ -25,6 +28,9 @@ type Cell [BytesPerCell]byte
|
||||
// Commitment represent a KZG commitment to a Blob.
|
||||
type Commitment [48]byte
|
||||
|
||||
// errInvalidProof is returned when KZG proof verification fails.
|
||||
var errInvalidProof = errors.New("invalid KZG proof")
|
||||
|
||||
// Proof represents a KZG proof that attests to the validity of a Blob or parts of it.
|
||||
type Proof [BytesPerProof]byte
|
||||
|
||||
@@ -103,16 +109,69 @@ func ComputeCellsAndKZGProofs(blob *Blob) ([]Cell, []Proof, error) {
|
||||
return cells, proofs, nil
|
||||
}
|
||||
|
||||
// VerifyCellKZGProofBatch verifies the KZG proofs for a given slice of commitments, cells indices, cells and proofs.
|
||||
// Note: It is way more efficient to call once this function with big slices than calling it multiple times with small slices.
|
||||
func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, cells []Cell, proofsBytes []Bytes48) (bool, error) {
|
||||
// Convert `Cell` type to `ckzg4844.Cell`
|
||||
ckzgCells := make([]ckzg4844.Cell, len(cells))
|
||||
// chunkBounds represents the start and end indices of a chunk.
|
||||
type chunkBounds struct {
|
||||
start, end int
|
||||
}
|
||||
|
||||
// VerifyCellKZGProofBatch verifies the KZG proofs for a given slice of commitments, cells indices, cells and proofs.
|
||||
// The verification is parallelized across CPU cores by splitting the input into chunks.
|
||||
func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, cells []Cell, proofsBytes []Bytes48) (bool, error) {
|
||||
count := len(cells)
|
||||
|
||||
// Validate all input slices have the same length
|
||||
if len(commitmentsBytes) != count || len(cellIndices) != count || len(proofsBytes) != count {
|
||||
return false, errors.New("input slices must have equal length")
|
||||
}
|
||||
|
||||
// Convert `Cell` type to `ckzg4844.Cell`
|
||||
ckzgCells := make([]ckzg4844.Cell, count)
|
||||
for i := range cells {
|
||||
copy(ckzgCells[i][:], cells[i][:])
|
||||
}
|
||||
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
|
||||
|
||||
if count == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
workerCount := min(count, runtime.GOMAXPROCS(0))
|
||||
chunks := computeChunkBounds(count, workerCount)
|
||||
|
||||
var wg errgroup.Group
|
||||
for workerIdx := range workerCount {
|
||||
bounds := chunks[workerIdx]
|
||||
|
||||
wg.Go(func() error {
|
||||
// Verify this chunk
|
||||
valid, err := ckzg4844.VerifyCellKZGProofBatch(
|
||||
commitmentsBytes[bounds.start:bounds.end],
|
||||
cellIndices[bounds.start:bounds.end],
|
||||
ckzgCells[bounds.start:bounds.end],
|
||||
proofsBytes[bounds.start:bounds.end],
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !valid {
|
||||
return errInvalidProof
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Wait for all workers to complete
|
||||
if err := wg.Wait(); err != nil {
|
||||
if errors.Is(err, errInvalidProof) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// RecoverCells recovers the complete cells from a given set of cell indices and partial cells.
|
||||
@@ -164,3 +223,30 @@ func RecoverCellsAndKZGProofs(cellIndices []uint64, partialCells []Cell) ([]Cell
|
||||
|
||||
return cells, proofs, nil
|
||||
}
|
||||
|
||||
// computeChunkBounds calculates evenly distributed chunk boundaries for parallel processing.
|
||||
// It splits itemsCount into chunks, distributing any remainder across the first chunks.
|
||||
func computeChunkBounds(itemsCount, workerCount int) []chunkBounds {
|
||||
actualWorkers := min(itemsCount, workerCount)
|
||||
|
||||
if actualWorkers == 0 {
|
||||
return []chunkBounds{}
|
||||
}
|
||||
|
||||
chunkSize := itemsCount / actualWorkers
|
||||
remainder := itemsCount % actualWorkers
|
||||
|
||||
chunks := make([]chunkBounds, 0, actualWorkers)
|
||||
offset := 0
|
||||
for i := range actualWorkers {
|
||||
size := chunkSize
|
||||
if i < remainder {
|
||||
size++
|
||||
}
|
||||
|
||||
chunks = append(chunks, chunkBounds{start: offset, end: offset + size})
|
||||
offset += size
|
||||
}
|
||||
|
||||
return chunks
|
||||
}
|
||||
|
||||
@@ -111,6 +111,48 @@ func TestVerifyCellKZGProofBatch(t *testing.T) {
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, false, valid)
|
||||
})
|
||||
|
||||
t.Run("empty inputs should return true", func(t *testing.T) {
|
||||
// Empty slices should be considered valid
|
||||
commitmentsBytes := []Bytes48{}
|
||||
cellIndices := []uint64{}
|
||||
cells := []Cell{}
|
||||
proofsBytes := []Bytes48{}
|
||||
|
||||
valid, err := VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, cells, proofsBytes)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, valid)
|
||||
})
|
||||
|
||||
t.Run("mismatched input lengths should fail", func(t *testing.T) {
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
cells, proofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create mismatched length inputs
|
||||
cellIndices := []uint64{0, 1, 2}
|
||||
selectedCells := []Cell{cells[0], cells[1], cells[2]}
|
||||
commitmentsBytes := make([]Bytes48, 3)
|
||||
for i := range commitmentsBytes {
|
||||
copy(commitmentsBytes[i][:], commitment[:])
|
||||
}
|
||||
|
||||
// Only 2 proofs instead of 3
|
||||
proofsBytes := make([]Bytes48, 2)
|
||||
copy(proofsBytes[0][:], proofs[0][:])
|
||||
copy(proofsBytes[1][:], proofs[1][:])
|
||||
|
||||
valid, err := VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, selectedCells, proofsBytes)
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, false, valid)
|
||||
require.Equal(t, "input slices must have equal length", err.Error())
|
||||
})
|
||||
}
|
||||
|
||||
func TestRecoverCells(t *testing.T) {
|
||||
@@ -234,3 +276,41 @@ func TestBlobToKZGCommitment(t *testing.T) {
|
||||
require.Equal(t, commitment, commitment2)
|
||||
})
|
||||
}
|
||||
|
||||
func TestComputeChunkBounds(t *testing.T) {
|
||||
t.Run("evenly divisible items", func(t *testing.T) {
|
||||
chunks := computeChunkBounds(100, 4)
|
||||
require.Equal(t, 4, len(chunks))
|
||||
require.Equal(t, chunkBounds{start: 0, end: 25}, chunks[0])
|
||||
require.Equal(t, chunkBounds{start: 25, end: 50}, chunks[1])
|
||||
require.Equal(t, chunkBounds{start: 50, end: 75}, chunks[2])
|
||||
require.Equal(t, chunkBounds{start: 75, end: 100}, chunks[3])
|
||||
})
|
||||
|
||||
t.Run("items with remainder distributed to first chunks", func(t *testing.T) {
|
||||
chunks := computeChunkBounds(10, 3)
|
||||
require.Equal(t, 3, len(chunks))
|
||||
require.Equal(t, chunkBounds{start: 0, end: 4}, chunks[0]) // gets extra item
|
||||
require.Equal(t, chunkBounds{start: 4, end: 7}, chunks[1]) // gets extra item
|
||||
require.Equal(t, chunkBounds{start: 7, end: 10}, chunks[2]) // normal size
|
||||
})
|
||||
|
||||
t.Run("fewer items than workers returns min(items, workers) chunks", func(t *testing.T) {
|
||||
chunks := computeChunkBounds(3, 5)
|
||||
require.Equal(t, 3, len(chunks)) // Only 3 chunks, not 5
|
||||
require.Equal(t, chunkBounds{start: 0, end: 1}, chunks[0])
|
||||
require.Equal(t, chunkBounds{start: 1, end: 2}, chunks[1])
|
||||
require.Equal(t, chunkBounds{start: 2, end: 3}, chunks[2])
|
||||
})
|
||||
|
||||
t.Run("single worker gets all items", func(t *testing.T) {
|
||||
chunks := computeChunkBounds(100, 1)
|
||||
require.Equal(t, 1, len(chunks))
|
||||
require.Equal(t, chunkBounds{start: 0, end: 100}, chunks[0])
|
||||
})
|
||||
|
||||
t.Run("no items produces no chunks", func(t *testing.T) {
|
||||
chunks := computeChunkBounds(0, 4)
|
||||
require.Equal(t, 0, len(chunks)) // No chunks when no items
|
||||
})
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpbv1 "github.com/OffchainLabs/prysm/v7/proto/eth/v1"
|
||||
@@ -130,12 +131,10 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
block: genFullBlock(t, util.DefaultBlockGenConfig(), 1 /*slot*/),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
// Hacky sleep, should use a better way to be able to resolve the race
|
||||
// between event being sent out and processed.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
||||
}
|
||||
notifier := s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier)
|
||||
require.Eventually(t, func() bool {
|
||||
return len(notifier.ReceivedEvents()) >= 1
|
||||
}, 2*time.Second, 10*time.Millisecond, "Expected at least 1 state notification")
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -222,10 +221,10 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
require.NoError(t, s.ReceiveBlock(ctx, wsb, root, nil))
|
||||
})
|
||||
wg.Wait()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
||||
}
|
||||
notifier := s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier)
|
||||
require.Eventually(t, func() bool {
|
||||
return len(notifier.ReceivedEvents()) >= 1
|
||||
}, 2*time.Second, 10*time.Millisecond, "Expected at least 1 state notification")
|
||||
// Verify fork choice has processed the block. (Genesis block and the new block)
|
||||
assert.Equal(t, 2, s.cfg.ForkChoiceStore.NodeCount())
|
||||
}
|
||||
@@ -265,10 +264,10 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
block: genFullBlock(t, util.DefaultBlockGenConfig(), 1 /*slot*/),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
||||
}
|
||||
notifier := s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier)
|
||||
require.Eventually(t, func() bool {
|
||||
return len(notifier.ReceivedEvents()) >= 1
|
||||
}, 2*time.Second, 10*time.Millisecond, "Expected at least 1 state notification")
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -512,8 +511,9 @@ func Test_executePostFinalizationTasks(t *testing.T) {
|
||||
s.cfg.StateNotifier = notifier
|
||||
s.executePostFinalizationTasks(s.ctx, headState)
|
||||
|
||||
time.Sleep(1 * time.Second) // sleep for a second because event is in a separate go routine
|
||||
require.Equal(t, 1, len(notifier.ReceivedEvents()))
|
||||
require.Eventually(t, func() bool {
|
||||
return len(notifier.ReceivedEvents()) == 1
|
||||
}, 5*time.Second, 50*time.Millisecond, "Expected exactly 1 state notification")
|
||||
e := notifier.ReceivedEvents()[0]
|
||||
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
|
||||
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
|
||||
@@ -552,8 +552,9 @@ func Test_executePostFinalizationTasks(t *testing.T) {
|
||||
s.cfg.StateNotifier = notifier
|
||||
s.executePostFinalizationTasks(s.ctx, headState)
|
||||
|
||||
time.Sleep(1 * time.Second) // sleep for a second because event is in a separate go routine
|
||||
require.Equal(t, 1, len(notifier.ReceivedEvents()))
|
||||
require.Eventually(t, func() bool {
|
||||
return len(notifier.ReceivedEvents()) == 1
|
||||
}, 5*time.Second, 50*time.Millisecond, "Expected exactly 1 state notification")
|
||||
e := notifier.ReceivedEvents()[0]
|
||||
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
|
||||
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
|
||||
@@ -596,13 +597,13 @@ func TestProcessLightClientBootstrap(t *testing.T) {
|
||||
|
||||
s.executePostFinalizationTasks(s.ctx, l.AttestedState)
|
||||
|
||||
// wait for the goroutine to finish processing
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// Check that the light client bootstrap is saved
|
||||
b, err := s.lcStore.LightClientBootstrap(ctx, [32]byte(cp.Root))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b)
|
||||
// Wait for the light client bootstrap to be saved (runs in goroutine)
|
||||
var b interfaces.LightClientBootstrap
|
||||
require.Eventually(t, func() bool {
|
||||
var err error
|
||||
b, err = s.lcStore.LightClientBootstrap(ctx, [32]byte(cp.Root))
|
||||
return err == nil && b != nil
|
||||
}, 5*time.Second, 50*time.Millisecond, "Light client bootstrap was not saved within timeout")
|
||||
|
||||
btst, err := lightClient.NewLightClientBootstrapFromBeaconState(ctx, l.FinalizedState.Slot(), l.FinalizedState, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -75,7 +75,6 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
p2p := p2pTesting.NewTestP2P(t)
|
||||
lcStore := NewLightClientStore(p2p, new(event.Feed), testDB.SetupDB(t))
|
||||
|
||||
timeForGoroutinesToFinish := 20 * time.Microsecond
|
||||
// update 0 with basic data and no supermajority following an empty lastFinalityUpdate - should save and broadcast
|
||||
l0 := util.NewTestLightClient(t, version.Altair)
|
||||
update0, err := NewLightClientFinalityUpdateFromBeaconState(l0.Ctx, l0.State, l0.Block, l0.AttestedState, l0.AttestedBlock, l0.FinalizedBlock)
|
||||
@@ -87,8 +86,9 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update0, true)
|
||||
require.Equal(t, update0, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update when previous is nil")
|
||||
require.Eventually(t, func() bool {
|
||||
return p2p.BroadcastCalled.Load()
|
||||
}, time.Second, 10*time.Millisecond, "Broadcast should have been called after setting a new last finality update when previous is nil")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 1 with same finality slot, increased attested slot, and no supermajority - should save but not broadcast
|
||||
@@ -102,7 +102,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update1, true)
|
||||
require.Equal(t, update1, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
time.Sleep(50 * time.Millisecond) // Wait briefly to verify broadcast is not called
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called after setting a new last finality update without supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
@@ -117,8 +117,9 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update2, true)
|
||||
require.Equal(t, update2, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update with supermajority")
|
||||
require.Eventually(t, func() bool {
|
||||
return p2p.BroadcastCalled.Load()
|
||||
}, time.Second, 10*time.Millisecond, "Broadcast should have been called after setting a new last finality update with supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 3 with same finality slot, increased attested slot, and supermajority - should save but not broadcast
|
||||
@@ -132,7 +133,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update3, true)
|
||||
require.Equal(t, update3, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
time.Sleep(50 * time.Millisecond) // Wait briefly to verify broadcast is not called
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been when previous was already broadcast")
|
||||
|
||||
// update 4 with increased finality slot, increased attested slot, and supermajority - should save and broadcast
|
||||
@@ -146,8 +147,9 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update4, true)
|
||||
require.Equal(t, update4, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after a new finality update with increased finality slot")
|
||||
require.Eventually(t, func() bool {
|
||||
return p2p.BroadcastCalled.Load()
|
||||
}, time.Second, 10*time.Millisecond, "Broadcast should have been called after a new finality update with increased finality slot")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 5 with the same new finality slot, increased attested slot, and supermajority - should save but not broadcast
|
||||
@@ -161,7 +163,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update5, true)
|
||||
require.Equal(t, update5, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
time.Sleep(50 * time.Millisecond) // Wait briefly to verify broadcast is not called
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
|
||||
// update 6 with the same new finality slot, increased attested slot, and no supermajority - should save but not broadcast
|
||||
@@ -175,7 +177,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update6, true)
|
||||
require.Equal(t, update6, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
time.Sleep(50 * time.Millisecond) // Wait briefly to verify broadcast is not called
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
}
|
||||
|
||||
|
||||
@@ -72,7 +72,10 @@ func TestService_Broadcast(t *testing.T) {
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
|
||||
// Wait for libp2p mesh to establish
|
||||
require.Eventually(t, func() bool {
|
||||
return len(p.pubsub.ListPeers(topic)) > 0
|
||||
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
var wg sync.WaitGroup
|
||||
@@ -186,7 +189,10 @@ func TestService_BroadcastAttestation(t *testing.T) {
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
|
||||
// Wait for libp2p mesh to establish
|
||||
require.Eventually(t, func() bool {
|
||||
return len(p.pubsub.ListPeers(topic)) > 0
|
||||
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
var wg sync.WaitGroup
|
||||
@@ -375,7 +381,15 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
_, err = tpHandle.Subscribe()
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(500 * time.Millisecond) // libp2p fails without this delay...
|
||||
// This test specifically tests discovery-based peer finding, which requires
|
||||
// time for nodes to discover each other. Using a fixed sleep here is intentional
|
||||
// as we're testing the discovery timing behavior.
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
// Verify mesh establishment after discovery
|
||||
require.Eventually(t, func() bool {
|
||||
return len(p.pubsub.ListPeers(topic)) > 0 && len(p2.pubsub.ListPeers(topic)) > 0
|
||||
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
||||
|
||||
nodePeers := p.pubsub.ListPeers(topic)
|
||||
nodePeers2 := p2.pubsub.ListPeers(topic)
|
||||
@@ -444,7 +458,10 @@ func TestService_BroadcastSyncCommittee(t *testing.T) {
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
|
||||
// Wait for libp2p mesh to establish
|
||||
require.Eventually(t, func() bool {
|
||||
return len(p.pubsub.ListPeers(topic)) > 0
|
||||
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
var wg sync.WaitGroup
|
||||
@@ -521,7 +538,10 @@ func TestService_BroadcastBlob(t *testing.T) {
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
|
||||
// Wait for libp2p mesh to establish
|
||||
require.Eventually(t, func() bool {
|
||||
return len(p.pubsub.ListPeers(topic)) > 0
|
||||
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
var wg sync.WaitGroup
|
||||
@@ -584,7 +604,10 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
|
||||
// Wait for libp2p mesh to establish
|
||||
require.Eventually(t, func() bool {
|
||||
return len(p.pubsub.ListPeers(topic)) > 0
|
||||
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
var wg sync.WaitGroup
|
||||
@@ -660,7 +683,10 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
|
||||
// Wait for libp2p mesh to establish
|
||||
require.Eventually(t, func() bool {
|
||||
return len(p.pubsub.ListPeers(topic)) > 0
|
||||
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
var wg sync.WaitGroup
|
||||
@@ -771,8 +797,10 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
// libp2p fails without this delay
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
// Wait for libp2p mesh to establish
|
||||
require.Eventually(t, func() bool {
|
||||
return len(service.pubsub.ListPeers(topic)) > 0
|
||||
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
||||
|
||||
// Broadcast to peers and wait.
|
||||
err = service.BroadcastDataColumnSidecars(ctx, []blocks.VerifiedRODataColumn{verifiedRoSidecar})
|
||||
|
||||
@@ -482,12 +482,12 @@ func TestStaticPeering_PeersAreAdded(t *testing.T) {
|
||||
s.Start()
|
||||
<-exitRoutine
|
||||
}()
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
time.Sleep(50 * time.Millisecond) // Wait for service initialization
|
||||
var vr [32]byte
|
||||
require.NoError(t, cs.SetClock(startup.NewClock(time.Now(), vr)))
|
||||
time.Sleep(4 * time.Second)
|
||||
ps := s.host.Network().Peers()
|
||||
assert.Equal(t, 5, len(ps), "Not all peers added to peerstore")
|
||||
require.Eventually(t, func() bool {
|
||||
return len(s.host.Network().Peers()) == 5
|
||||
}, 10*time.Second, 100*time.Millisecond, "Not all peers added to peerstore")
|
||||
require.NoError(t, s.Stop())
|
||||
exitRoutine <- true
|
||||
}
|
||||
|
||||
@@ -80,8 +80,9 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) {
|
||||
}()
|
||||
var vr [32]byte
|
||||
require.NoError(t, cs.SetClock(startup.NewClock(time.Now(), vr)))
|
||||
time.Sleep(time.Second * 2)
|
||||
assert.Equal(t, true, s.started, "Expected service to be started")
|
||||
require.Eventually(t, func() bool {
|
||||
return s.started
|
||||
}, 5*time.Second, 100*time.Millisecond, "Expected service to be started")
|
||||
s.Start()
|
||||
require.LogsContain(t, hook, "Attempted to start p2p service when it was already started")
|
||||
require.NoError(t, s.Stop())
|
||||
@@ -260,17 +261,9 @@ func TestListenForNewNodes(t *testing.T) {
|
||||
err = cs.SetClock(startup.NewClock(genesisTime, gvr))
|
||||
require.NoError(t, err, "Could not set clock in service")
|
||||
|
||||
actualPeerCount := len(s.host.Network().Peers())
|
||||
for range 40 {
|
||||
if actualPeerCount == peerCount {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
actualPeerCount = len(s.host.Network().Peers())
|
||||
}
|
||||
|
||||
assert.Equal(t, peerCount, actualPeerCount, "Not all peers added to peerstore")
|
||||
require.Eventually(t, func() bool {
|
||||
return len(s.host.Network().Peers()) == peerCount
|
||||
}, 5*time.Second, 100*time.Millisecond, "Not all peers added to peerstore")
|
||||
|
||||
err = s.Stop()
|
||||
require.NoError(t, err, "Failed to stop service")
|
||||
|
||||
@@ -657,8 +657,9 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Source.Epoch)
|
||||
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(broadcaster.BroadcastAttestations[0].GetData().Target.Root))
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Target.Epoch)
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 1, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
require.Eventually(t, func() bool {
|
||||
return s.AttestationsPool.UnaggregatedAttestationCount() == 1
|
||||
}, time.Second, 10*time.Millisecond, "Expected 1 attestation in pool")
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
@@ -677,8 +678,9 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
assert.Equal(t, 2, broadcaster.NumAttestations())
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 2, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
require.Eventually(t, func() bool {
|
||||
return s.AttestationsPool.UnaggregatedAttestationCount() == 2
|
||||
}, time.Second, 10*time.Millisecond, "Expected 2 attestations in pool")
|
||||
})
|
||||
t.Run("phase0 att post electra", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
@@ -798,8 +800,9 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Source.Epoch)
|
||||
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(broadcaster.BroadcastAttestations[0].GetData().Target.Root))
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Target.Epoch)
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 1, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
require.Eventually(t, func() bool {
|
||||
return s.AttestationsPool.UnaggregatedAttestationCount() == 1
|
||||
}, time.Second, 10*time.Millisecond, "Expected 1 attestation in pool")
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
@@ -818,8 +821,9 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
assert.Equal(t, 2, broadcaster.NumAttestations())
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 2, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
require.Eventually(t, func() bool {
|
||||
return s.AttestationsPool.UnaggregatedAttestationCount() == 2
|
||||
}, time.Second, 10*time.Millisecond, "Expected 2 attestations in pool")
|
||||
})
|
||||
t.Run("no body", func(t *testing.T) {
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", nil)
|
||||
@@ -1375,9 +1379,9 @@ func TestSubmitSignedBLSToExecutionChanges_Ok(t *testing.T) {
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.SubmitBLSToExecutionChanges(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
time.Sleep(100 * time.Millisecond) // Delay to let the routine start
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
assert.Equal(t, numValidators, len(broadcaster.BroadcastMessages))
|
||||
require.Eventually(t, func() bool {
|
||||
return broadcaster.BroadcastCalled.Load() && len(broadcaster.BroadcastMessages) == numValidators
|
||||
}, time.Second, 10*time.Millisecond, "Broadcast should be called with all messages")
|
||||
|
||||
poolChanges, err := s.BLSChangesPool.PendingBLSToExecChanges()
|
||||
require.Equal(t, len(poolChanges), len(signedChanges))
|
||||
@@ -1591,10 +1595,10 @@ func TestSubmitSignedBLSToExecutionChanges_Failures(t *testing.T) {
|
||||
|
||||
s.SubmitBLSToExecutionChanges(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
time.Sleep(10 * time.Millisecond) // Delay to allow the routine to start
|
||||
require.StringContains(t, "One or more messages failed validation", writer.Body.String())
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
assert.Equal(t, numValidators, len(broadcaster.BroadcastMessages)+1)
|
||||
require.Eventually(t, func() bool {
|
||||
return broadcaster.BroadcastCalled.Load() && len(broadcaster.BroadcastMessages)+1 == numValidators
|
||||
}, time.Second, 10*time.Millisecond, "Broadcast should be called with expected messages")
|
||||
|
||||
poolChanges, err := s.BLSChangesPool.PendingBLSToExecChanges()
|
||||
require.Equal(t, len(poolChanges)+1, len(signedChanges))
|
||||
|
||||
@@ -70,7 +70,6 @@ func TestSyncHandlers_WaitToSync(t *testing.T) {
|
||||
|
||||
topic := "/eth2/%x/beacon_block"
|
||||
go r.startDiscoveryAndSubscriptions()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
var vr [32]byte
|
||||
require.NoError(t, gs.SetClock(startup.NewClock(time.Now(), vr)))
|
||||
@@ -83,9 +82,11 @@ func TestSyncHandlers_WaitToSync(t *testing.T) {
|
||||
msg.Block.ParentRoot = util.Random32Bytes(t)
|
||||
msg.Signature = sk.Sign([]byte("data")).Marshal()
|
||||
p2p.ReceivePubSub(topic, msg)
|
||||
// wait for chainstart to be sent
|
||||
time.Sleep(400 * time.Millisecond)
|
||||
require.Equal(t, true, r.chainStarted.IsSet(), "Did not receive chain start event.")
|
||||
|
||||
// Wait for chainstart event to be processed
|
||||
require.Eventually(t, func() bool {
|
||||
return r.chainStarted.IsSet()
|
||||
}, 5*time.Second, 50*time.Millisecond, "Did not receive chain start event.")
|
||||
}
|
||||
|
||||
func TestSyncHandlers_WaitForChainStart(t *testing.T) {
|
||||
@@ -217,20 +218,18 @@ func TestSyncService_StopCleanly(t *testing.T) {
|
||||
p2p.Digest, err = r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
// wait for chainstart to be sent
|
||||
time.Sleep(2 * time.Second)
|
||||
require.Equal(t, true, r.chainStarted.IsSet(), "Did not receive chain start event.")
|
||||
|
||||
require.NotEqual(t, 0, len(r.cfg.p2p.PubSub().GetTopics()))
|
||||
require.NotEqual(t, 0, len(r.cfg.p2p.Host().Mux().Protocols()))
|
||||
// Wait for chainstart and topics to be registered
|
||||
require.Eventually(t, func() bool {
|
||||
return r.chainStarted.IsSet() && len(r.cfg.p2p.PubSub().GetTopics()) > 0 && len(r.cfg.p2p.Host().Mux().Protocols()) > 0
|
||||
}, 5*time.Second, 50*time.Millisecond, "Did not receive chain start event or topics not registered.")
|
||||
|
||||
// Both pubsub and rpc topics should be unsubscribed.
|
||||
require.NoError(t, r.Stop())
|
||||
|
||||
// Sleep to allow pubsub topics to be deregistered.
|
||||
time.Sleep(1 * time.Second)
|
||||
require.Equal(t, 0, len(r.cfg.p2p.PubSub().GetTopics()))
|
||||
require.Equal(t, 0, len(r.cfg.p2p.Host().Mux().Protocols()))
|
||||
// Wait for pubsub topics to be deregistered.
|
||||
require.Eventually(t, func() bool {
|
||||
return len(r.cfg.p2p.PubSub().GetTopics()) == 0 && len(r.cfg.p2p.Host().Mux().Protocols()) == 0
|
||||
}, 5*time.Second, 50*time.Millisecond, "Pubsub topics were not deregistered")
|
||||
}
|
||||
|
||||
func TestService_Stop_SendsGoodbyeMessages(t *testing.T) {
|
||||
|
||||
@@ -614,11 +614,10 @@ func TestVerifyIndexInCommittee_SeenAggregatorEpoch(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
time.Sleep(10 * time.Millisecond) // Wait for cached value to pass through buffers.
|
||||
if res, err := r.validateAggregateAndProof(t.Context(), "", msg); res == pubsub.ValidationAccept {
|
||||
_ = err
|
||||
t.Fatal("Validated status is true")
|
||||
}
|
||||
require.Eventually(t, func() bool {
|
||||
res, _ := r.validateAggregateAndProof(t.Context(), "", msg)
|
||||
return res != pubsub.ValidationAccept
|
||||
}, time.Second, 10*time.Millisecond, "Expected validation to reject duplicate aggregate")
|
||||
}
|
||||
|
||||
func TestValidateAggregateAndProof_BadBlock(t *testing.T) {
|
||||
|
||||
@@ -992,7 +992,6 @@ func TestValidateBeaconBlockPubSub_SeenProposerSlot(t *testing.T) {
|
||||
|
||||
// Mark the proposer/slot as seen
|
||||
r.setSeenBlockIndexSlot(msg.Block.Slot, msg.Block.ProposerIndex)
|
||||
time.Sleep(10 * time.Millisecond) // Wait for cached value to pass through buffers
|
||||
|
||||
// Prepare and validate the second message (clone)
|
||||
buf := new(bytes.Buffer)
|
||||
@@ -1010,9 +1009,11 @@ func TestValidateBeaconBlockPubSub_SeenProposerSlot(t *testing.T) {
|
||||
}
|
||||
|
||||
// Since this is not an equivocation (same signature), it should be ignored
|
||||
res, err := r.validateBeaconBlockPubSub(ctx, "", m)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, pubsub.ValidationIgnore, res, "block with same signature should be ignored")
|
||||
// Wait for the cached value to propagate through buffers
|
||||
require.Eventually(t, func() bool {
|
||||
res, err := r.validateBeaconBlockPubSub(ctx, "", m)
|
||||
return err == nil && res == pubsub.ValidationIgnore
|
||||
}, time.Second, 10*time.Millisecond, "block with same signature should be ignored")
|
||||
|
||||
// Verify no slashings were created
|
||||
assert.Equal(t, 0, len(slashingPool.PendingPropSlashings), "Expected no slashings for same signature")
|
||||
|
||||
3
changelog/james-prysm_is-ready.md
Normal file
3
changelog/james-prysm_is-ready.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- changed IsHealthy check to IsReady for validator client's interpretation from /eth/v1/node/health, 206 will now return false as the node is syncing.
|
||||
3
changelog/jrhea_duplicate_tracer_provider_setting.md
Normal file
3
changelog/jrhea_duplicate_tracer_provider_setting.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Don't call trace.WithMaxExportBatchSize(trace.DefaultMaxExportBatchSize) twice.
|
||||
3
changelog/manu_parallelize_kzg_verification.md
Normal file
3
changelog/manu_parallelize_kzg_verification.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Parallelized KZG proof batch verification across CPU cores.
|
||||
3
changelog/pvl_fix-flaky-tests-polling.md
Normal file
3
changelog/pvl_fix-flaky-tests-polling.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Replaced `time.Sleep` with `require.Eventually` polling in tests to fix flaky behavior caused by race conditions between goroutines and assertions.
|
||||
@@ -45,7 +45,6 @@ func Setup(ctx context.Context, serviceName, processName, endpoint string, sampl
|
||||
exporter,
|
||||
trace.WithMaxExportBatchSize(trace.DefaultMaxExportBatchSize),
|
||||
trace.WithBatchTimeout(trace.DefaultScheduleDelay*time.Millisecond),
|
||||
trace.WithMaxExportBatchSize(trace.DefaultMaxExportBatchSize),
|
||||
),
|
||||
trace.WithResource(
|
||||
resource.NewWithAttributes(
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/ssz/equality"
|
||||
"github.com/d4l3k/messagediff"
|
||||
@@ -138,12 +139,21 @@ func StringContains(loggerFn assertionLoggerFn, expected, actual string, flag bo
|
||||
|
||||
// NoError asserts that error is nil.
|
||||
func NoError(loggerFn assertionLoggerFn, err error, msg ...any) {
|
||||
// reflect.ValueOf is needed for nil instances of custom types implementing Error
|
||||
if err != nil && !reflect.ValueOf(err).IsNil() {
|
||||
errMsg := parseMsg("Unexpected error", msg...)
|
||||
_, file, line, _ := runtime.Caller(2)
|
||||
loggerFn("%s:%d %s: %v", filepath.Base(file), line, errMsg, err)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
// reflect.ValueOf is needed for nil instances of custom types implementing Error.
|
||||
// Only check IsNil for types that support it to avoid panics on struct types.
|
||||
v := reflect.ValueOf(err)
|
||||
switch v.Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
|
||||
if v.IsNil() {
|
||||
return
|
||||
}
|
||||
}
|
||||
errMsg := parseMsg("Unexpected error", msg...)
|
||||
_, file, line, _ := runtime.Caller(2)
|
||||
loggerFn("%s:%d %s: %v", filepath.Base(file), line, errMsg, err)
|
||||
}
|
||||
|
||||
// ErrorIs uses Errors.Is to recursively unwrap err looking for target in the chain.
|
||||
@@ -341,3 +351,18 @@ func (tb *TBMock) Errorf(format string, args ...any) {
|
||||
func (tb *TBMock) Fatalf(format string, args ...any) {
|
||||
tb.FatalfMsg = fmt.Sprintf(format, args...)
|
||||
}
|
||||
|
||||
// Eventually asserts that given condition will be met within waitFor time,
|
||||
// periodically checking target function each tick.
|
||||
func Eventually(loggerFn assertionLoggerFn, condition func() bool, waitFor, tick time.Duration, msg ...any) {
|
||||
deadline := time.Now().Add(waitFor)
|
||||
for time.Now().Before(deadline) {
|
||||
if condition() {
|
||||
return
|
||||
}
|
||||
time.Sleep(tick)
|
||||
}
|
||||
errMsg := parseMsg("Condition never satisfied", msg...)
|
||||
_, file, line, _ := runtime.Caller(2)
|
||||
loggerFn("%s:%d %s (waited %v)", filepath.Base(file), line, errMsg, waitFor)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package require
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assertions"
|
||||
"github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
@@ -87,3 +89,9 @@ func ErrorIs(tb assertions.AssertionTestingTB, err, target error, msg ...any) {
|
||||
func StringContains(tb assertions.AssertionTestingTB, expected, actual string, msg ...any) {
|
||||
assertions.StringContains(tb.Fatalf, expected, actual, true, msg)
|
||||
}
|
||||
|
||||
// Eventually asserts that given condition will be met within waitFor time,
|
||||
// periodically checking target function each tick.
|
||||
func Eventually(tb assertions.AssertionTestingTB, condition func() bool, waitFor, tick time.Duration, msg ...any) {
|
||||
assertions.Eventually(tb.Fatalf, condition, waitFor, tick, msg...)
|
||||
}
|
||||
|
||||
12
testing/validator-mock/node_client_mock.go
generated
12
testing/validator-mock/node_client_mock.go
generated
@@ -56,18 +56,18 @@ func (mr *MockNodeClientMockRecorder) Genesis(arg0, arg1 any) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Genesis", reflect.TypeOf((*MockNodeClient)(nil).Genesis), arg0, arg1)
|
||||
}
|
||||
|
||||
// IsHealthy mocks base method.
|
||||
func (m *MockNodeClient) IsHealthy(arg0 context.Context) bool {
|
||||
// IsReady mocks base method.
|
||||
func (m *MockNodeClient) IsReady(arg0 context.Context) bool {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "IsHealthy", arg0)
|
||||
ret := m.ctrl.Call(m, "IsReady", arg0)
|
||||
ret0, _ := ret[0].(bool)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// IsHealthy indicates an expected call of IsHealthy.
|
||||
func (mr *MockNodeClientMockRecorder) IsHealthy(arg0 any) *gomock.Call {
|
||||
// IsReady indicates an expected call of IsReady.
|
||||
func (mr *MockNodeClientMockRecorder) IsReady(arg0 any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsHealthy", reflect.TypeOf((*MockNodeClient)(nil).IsHealthy), arg0)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsReady", reflect.TypeOf((*MockNodeClient)(nil).IsReady), arg0)
|
||||
}
|
||||
|
||||
// Peers mocks base method.
|
||||
|
||||
@@ -2,6 +2,7 @@ package beacon_api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
@@ -101,12 +102,17 @@ func (c *beaconApiNodeClient) Peers(ctx context.Context, in *empty.Empty) (*ethp
|
||||
return nil, errors.New("beaconApiNodeClient.Peers is not implemented. To use a fallback client, pass a fallback client as the last argument of NewBeaconApiNodeClientWithFallback.")
|
||||
}
|
||||
|
||||
func (c *beaconApiNodeClient) IsHealthy(ctx context.Context) bool {
|
||||
if err := c.jsonRestHandler.Get(ctx, "/eth/v1/node/health", nil); err != nil {
|
||||
// IsReady returns true only if the node is fully synced (200 OK).
|
||||
// A 206 Partial Content response indicates the node is syncing and not ready.
|
||||
func (c *beaconApiNodeClient) IsReady(ctx context.Context) bool {
|
||||
statusCode, err := c.jsonRestHandler.GetStatusCode(ctx, "/eth/v1/node/health")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to get health of node")
|
||||
return false
|
||||
}
|
||||
return true
|
||||
// Only 200 OK means the node is fully synced and ready.
|
||||
// 206 Partial Content means syncing, 503 means unavailable.
|
||||
return statusCode == http.StatusOK
|
||||
}
|
||||
|
||||
func NewNodeClientWithFallback(jsonRestHandler RestHandler, fallbackClient iface.NodeClient) iface.NodeClient {
|
||||
|
||||
@@ -2,6 +2,7 @@ package beacon_api
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
@@ -287,3 +288,59 @@ func TestGetVersion(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsReady(t *testing.T) {
|
||||
const healthEndpoint = "/eth/v1/node/health"
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
statusCode int
|
||||
err error
|
||||
expectedResult bool
|
||||
}{
|
||||
{
|
||||
name: "returns true for 200 OK (fully synced)",
|
||||
statusCode: http.StatusOK,
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "returns false for 206 Partial Content (syncing)",
|
||||
statusCode: http.StatusPartialContent,
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
name: "returns false for 503 Service Unavailable",
|
||||
statusCode: http.StatusServiceUnavailable,
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
name: "returns false for 500 Internal Server Error",
|
||||
statusCode: http.StatusInternalServerError,
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
name: "returns false on error",
|
||||
err: errors.New("request failed"),
|
||||
expectedResult: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
ctx := t.Context()
|
||||
|
||||
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
|
||||
jsonRestHandler.EXPECT().GetStatusCode(
|
||||
gomock.Any(),
|
||||
healthEndpoint,
|
||||
).Return(tc.statusCode, tc.err)
|
||||
|
||||
nodeClient := &beaconApiNodeClient{jsonRestHandler: jsonRestHandler}
|
||||
result := nodeClient.IsReady(ctx)
|
||||
|
||||
assert.Equal(t, tc.expectedResult, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: validator/client/beacon-api/json_rest_handler.go
|
||||
// Source: validator/client/beacon-api/rest_handler_client.go
|
||||
//
|
||||
// Generated by this command:
|
||||
//
|
||||
// mockgen -package=mock -source=validator/client/beacon-api/json_rest_handler.go -destination=validator/client/beacon-api/mock/json_rest_handler_mock.go
|
||||
// mockgen -package=mock -source=validator/client/beacon-api/rest_handler_client.go -destination=validator/client/beacon-api/mock/json_rest_handler_mock.go RestHandler
|
||||
//
|
||||
|
||||
// Package mock is a generated GoMock package.
|
||||
@@ -18,32 +18,37 @@ import (
|
||||
gomock "go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
// MockJsonRestHandler is a mock of JsonRestHandler interface.
|
||||
type MockJsonRestHandler struct {
|
||||
// Backward compatibility aliases for the renamed mock type.
|
||||
type MockJsonRestHandler = MockRestHandler
|
||||
type MockJsonRestHandlerMockRecorder = MockRestHandlerMockRecorder
|
||||
|
||||
var NewMockJsonRestHandler = NewMockRestHandler
|
||||
|
||||
// MockRestHandler is a mock of RestHandler interface.
|
||||
type MockRestHandler struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockJsonRestHandlerMockRecorder
|
||||
isgomock struct{}
|
||||
recorder *MockRestHandlerMockRecorder
|
||||
}
|
||||
|
||||
// MockJsonRestHandlerMockRecorder is the mock recorder for MockJsonRestHandler.
|
||||
type MockJsonRestHandlerMockRecorder struct {
|
||||
mock *MockJsonRestHandler
|
||||
// MockRestHandlerMockRecorder is the mock recorder for MockRestHandler.
|
||||
type MockRestHandlerMockRecorder struct {
|
||||
mock *MockRestHandler
|
||||
}
|
||||
|
||||
// NewMockJsonRestHandler creates a new mock instance.
|
||||
func NewMockJsonRestHandler(ctrl *gomock.Controller) *MockJsonRestHandler {
|
||||
mock := &MockJsonRestHandler{ctrl: ctrl}
|
||||
mock.recorder = &MockJsonRestHandlerMockRecorder{mock}
|
||||
// NewMockRestHandler creates a new mock instance.
|
||||
func NewMockRestHandler(ctrl *gomock.Controller) *MockRestHandler {
|
||||
mock := &MockRestHandler{ctrl: ctrl}
|
||||
mock.recorder = &MockRestHandlerMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockJsonRestHandler) EXPECT() *MockJsonRestHandlerMockRecorder {
|
||||
func (m *MockRestHandler) EXPECT() *MockRestHandlerMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// Get mocks base method.
|
||||
func (m *MockJsonRestHandler) Get(ctx context.Context, endpoint string, resp any) error {
|
||||
func (m *MockRestHandler) Get(ctx context.Context, endpoint string, resp any) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Get", ctx, endpoint, resp)
|
||||
ret0, _ := ret[0].(error)
|
||||
@@ -51,13 +56,13 @@ func (m *MockJsonRestHandler) Get(ctx context.Context, endpoint string, resp any
|
||||
}
|
||||
|
||||
// Get indicates an expected call of Get.
|
||||
func (mr *MockJsonRestHandlerMockRecorder) Get(ctx, endpoint, resp any) *gomock.Call {
|
||||
func (mr *MockRestHandlerMockRecorder) Get(ctx, endpoint, resp any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockJsonRestHandler)(nil).Get), ctx, endpoint, resp)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockRestHandler)(nil).Get), ctx, endpoint, resp)
|
||||
}
|
||||
|
||||
// GetSSZ mocks base method.
|
||||
func (m *MockJsonRestHandler) GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error) {
|
||||
func (m *MockRestHandler) GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetSSZ", ctx, endpoint)
|
||||
ret0, _ := ret[0].([]byte)
|
||||
@@ -67,13 +72,28 @@ func (m *MockJsonRestHandler) GetSSZ(ctx context.Context, endpoint string) ([]by
|
||||
}
|
||||
|
||||
// GetSSZ indicates an expected call of GetSSZ.
|
||||
func (mr *MockJsonRestHandlerMockRecorder) GetSSZ(ctx, endpoint any) *gomock.Call {
|
||||
func (mr *MockRestHandlerMockRecorder) GetSSZ(ctx, endpoint any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSSZ", reflect.TypeOf((*MockJsonRestHandler)(nil).GetSSZ), ctx, endpoint)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSSZ", reflect.TypeOf((*MockRestHandler)(nil).GetSSZ), ctx, endpoint)
|
||||
}
|
||||
|
||||
// GetStatusCode mocks base method.
|
||||
func (m *MockRestHandler) GetStatusCode(ctx context.Context, endpoint string) (int, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetStatusCode", ctx, endpoint)
|
||||
ret0, _ := ret[0].(int)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetStatusCode indicates an expected call of GetStatusCode.
|
||||
func (mr *MockRestHandlerMockRecorder) GetStatusCode(ctx, endpoint any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatusCode", reflect.TypeOf((*MockRestHandler)(nil).GetStatusCode), ctx, endpoint)
|
||||
}
|
||||
|
||||
// Host mocks base method.
|
||||
func (m *MockJsonRestHandler) Host() string {
|
||||
func (m *MockRestHandler) Host() string {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Host")
|
||||
ret0, _ := ret[0].(string)
|
||||
@@ -81,13 +101,13 @@ func (m *MockJsonRestHandler) Host() string {
|
||||
}
|
||||
|
||||
// Host indicates an expected call of Host.
|
||||
func (mr *MockJsonRestHandlerMockRecorder) Host() *gomock.Call {
|
||||
func (mr *MockRestHandlerMockRecorder) Host() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Host", reflect.TypeOf((*MockJsonRestHandler)(nil).Host))
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Host", reflect.TypeOf((*MockRestHandler)(nil).Host))
|
||||
}
|
||||
|
||||
// HttpClient mocks base method.
|
||||
func (m *MockJsonRestHandler) HttpClient() *http.Client {
|
||||
func (m *MockRestHandler) HttpClient() *http.Client {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "HttpClient")
|
||||
ret0, _ := ret[0].(*http.Client)
|
||||
@@ -95,13 +115,13 @@ func (m *MockJsonRestHandler) HttpClient() *http.Client {
|
||||
}
|
||||
|
||||
// HttpClient indicates an expected call of HttpClient.
|
||||
func (mr *MockJsonRestHandlerMockRecorder) HttpClient() *gomock.Call {
|
||||
func (mr *MockRestHandlerMockRecorder) HttpClient() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HttpClient", reflect.TypeOf((*MockJsonRestHandler)(nil).HttpClient))
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HttpClient", reflect.TypeOf((*MockRestHandler)(nil).HttpClient))
|
||||
}
|
||||
|
||||
// Post mocks base method.
|
||||
func (m *MockJsonRestHandler) Post(ctx context.Context, endpoint string, headers map[string]string, data *bytes.Buffer, resp any) error {
|
||||
func (m *MockRestHandler) Post(ctx context.Context, endpoint string, headers map[string]string, data *bytes.Buffer, resp any) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Post", ctx, endpoint, headers, data, resp)
|
||||
ret0, _ := ret[0].(error)
|
||||
@@ -109,13 +129,13 @@ func (m *MockJsonRestHandler) Post(ctx context.Context, endpoint string, headers
|
||||
}
|
||||
|
||||
// Post indicates an expected call of Post.
|
||||
func (mr *MockJsonRestHandlerMockRecorder) Post(ctx, endpoint, headers, data, resp any) *gomock.Call {
|
||||
func (mr *MockRestHandlerMockRecorder) Post(ctx, endpoint, headers, data, resp any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Post", reflect.TypeOf((*MockJsonRestHandler)(nil).Post), ctx, endpoint, headers, data, resp)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Post", reflect.TypeOf((*MockRestHandler)(nil).Post), ctx, endpoint, headers, data, resp)
|
||||
}
|
||||
|
||||
// Post mocks base method.
|
||||
func (m *MockJsonRestHandler) PostSSZ(ctx context.Context, endpoint string, headers map[string]string, data *bytes.Buffer) ([]byte, http.Header, error) {
|
||||
// PostSSZ mocks base method.
|
||||
func (m *MockRestHandler) PostSSZ(ctx context.Context, endpoint string, headers map[string]string, data *bytes.Buffer) ([]byte, http.Header, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "PostSSZ", ctx, endpoint, headers, data)
|
||||
ret0, _ := ret[0].([]byte)
|
||||
@@ -124,20 +144,20 @@ func (m *MockJsonRestHandler) PostSSZ(ctx context.Context, endpoint string, head
|
||||
return ret0, ret1, ret2
|
||||
}
|
||||
|
||||
// Post indicates an expected call of Post.
|
||||
func (mr *MockJsonRestHandlerMockRecorder) PostSSZ(ctx, endpoint, headers, data any) *gomock.Call {
|
||||
// PostSSZ indicates an expected call of PostSSZ.
|
||||
func (mr *MockRestHandlerMockRecorder) PostSSZ(ctx, endpoint, headers, data any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PostSSZ", reflect.TypeOf((*MockJsonRestHandler)(nil).PostSSZ), ctx, endpoint, headers, data)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PostSSZ", reflect.TypeOf((*MockRestHandler)(nil).PostSSZ), ctx, endpoint, headers, data)
|
||||
}
|
||||
|
||||
// SetHost mocks base method.
|
||||
func (m *MockJsonRestHandler) SetHost(host string) {
|
||||
func (m *MockRestHandler) SetHost(host string) {
|
||||
m.ctrl.T.Helper()
|
||||
m.ctrl.Call(m, "SetHost", host)
|
||||
}
|
||||
|
||||
// SetHost indicates an expected call of SetHost.
|
||||
func (mr *MockJsonRestHandlerMockRecorder) SetHost(host any) *gomock.Call {
|
||||
func (mr *MockRestHandlerMockRecorder) SetHost(host any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHost", reflect.TypeOf((*MockJsonRestHandler)(nil).SetHost), host)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHost", reflect.TypeOf((*MockRestHandler)(nil).SetHost), host)
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ type reqOption func(*http.Request)
|
||||
|
||||
type RestHandler interface {
|
||||
Get(ctx context.Context, endpoint string, resp any) error
|
||||
GetStatusCode(ctx context.Context, endpoint string) (int, error)
|
||||
GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error)
|
||||
Post(ctx context.Context, endpoint string, headers map[string]string, data *bytes.Buffer, resp any) error
|
||||
PostSSZ(ctx context.Context, endpoint string, headers map[string]string, data *bytes.Buffer) ([]byte, http.Header, error)
|
||||
@@ -90,6 +91,28 @@ func (c *BeaconApiRestHandler) Get(ctx context.Context, endpoint string, resp an
|
||||
return decodeResp(httpResp, resp)
|
||||
}
|
||||
|
||||
// GetStatusCode sends a GET request and returns only the HTTP status code.
|
||||
// This is useful for endpoints like /eth/v1/node/health that communicate status via HTTP codes
|
||||
// (200 = ready, 206 = syncing, 503 = unavailable) rather than response bodies.
|
||||
func (c *BeaconApiRestHandler) GetStatusCode(ctx context.Context, endpoint string) (int, error) {
|
||||
url := c.host + endpoint
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "failed to create request for endpoint %s", url)
|
||||
}
|
||||
req.Header.Set("User-Agent", version.BuildData())
|
||||
httpResp, err := c.client.Do(req)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "failed to perform request for endpoint %s", url)
|
||||
}
|
||||
defer func() {
|
||||
if err := httpResp.Body.Close(); err != nil {
|
||||
return
|
||||
}
|
||||
}()
|
||||
return httpResp.StatusCode, nil
|
||||
}
|
||||
|
||||
func (c *BeaconApiRestHandler) GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error) {
|
||||
url := c.host + endpoint
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
|
||||
@@ -359,3 +359,66 @@ func Test_decodeResp(t *testing.T) {
|
||||
assert.ErrorContains(t, "HTTP request unsuccessful (500: foo)", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetStatusCode(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
const endpoint = "/eth/v1/node/health"
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
serverStatusCode int
|
||||
expectedStatusCode int
|
||||
}{
|
||||
{
|
||||
name: "returns 200 OK",
|
||||
serverStatusCode: http.StatusOK,
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
name: "returns 206 Partial Content",
|
||||
serverStatusCode: http.StatusPartialContent,
|
||||
expectedStatusCode: http.StatusPartialContent,
|
||||
},
|
||||
{
|
||||
name: "returns 503 Service Unavailable",
|
||||
serverStatusCode: http.StatusServiceUnavailable,
|
||||
expectedStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
{
|
||||
name: "returns 500 Internal Server Error",
|
||||
serverStatusCode: http.StatusInternalServerError,
|
||||
expectedStatusCode: http.StatusInternalServerError,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc(endpoint, func(w http.ResponseWriter, r *http.Request) {
|
||||
assert.Equal(t, version.BuildData(), r.Header.Get("User-Agent"))
|
||||
w.WriteHeader(tc.serverStatusCode)
|
||||
})
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
jsonRestHandler := BeaconApiRestHandler{
|
||||
client: http.Client{Timeout: time.Second * 5},
|
||||
host: server.URL,
|
||||
}
|
||||
|
||||
statusCode, err := jsonRestHandler.GetStatusCode(ctx, endpoint)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedStatusCode, statusCode)
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("returns error on connection failure", func(t *testing.T) {
|
||||
jsonRestHandler := BeaconApiRestHandler{
|
||||
client: http.Client{Timeout: time.Millisecond * 100},
|
||||
host: "http://localhost:99999", // Invalid port
|
||||
}
|
||||
|
||||
_, err := jsonRestHandler.GetStatusCode(ctx, endpoint)
|
||||
require.ErrorContains(t, "failed to perform request", err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ func (c *grpcNodeClient) Peers(ctx context.Context, in *empty.Empty) (*ethpb.Pee
|
||||
return c.nodeClient.ListPeers(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcNodeClient) IsHealthy(ctx context.Context) bool {
|
||||
func (c *grpcNodeClient) IsReady(ctx context.Context) bool {
|
||||
_, err := c.nodeClient.GetHealth(ctx, ðpb.HealthRequest{})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get health of node")
|
||||
|
||||
@@ -12,5 +12,5 @@ type NodeClient interface {
|
||||
Genesis(ctx context.Context, in *empty.Empty) (*ethpb.Genesis, error)
|
||||
Version(ctx context.Context, in *empty.Empty) (*ethpb.Version, error)
|
||||
Peers(ctx context.Context, in *empty.Empty) (*ethpb.Peers, error)
|
||||
IsHealthy(ctx context.Context) bool
|
||||
IsReady(ctx context.Context) bool
|
||||
}
|
||||
|
||||
@@ -1274,7 +1274,7 @@ func (v *validator) FindHealthyHost(ctx context.Context) bool {
|
||||
// Tail-recursive closure keeps retry count private.
|
||||
var check func(remaining int) bool
|
||||
check = func(remaining int) bool {
|
||||
if v.nodeClient.IsHealthy(ctx) { // healthy → done
|
||||
if v.nodeClient.IsReady(ctx) { // ready → done
|
||||
return true
|
||||
}
|
||||
if len(v.beaconNodeHosts) == 1 && features.Get().EnableBeaconRESTApi {
|
||||
|
||||
Reference in New Issue
Block a user