mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-02-01 00:28:16 -05:00
Compare commits
1 Commits
develop
...
event-stre
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cde4f3b009 |
@@ -34,6 +34,18 @@ type Event struct {
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// PublishEvent enqueues an event without blocking the producer. If the channel is full,
|
||||
// the event is dropped since only the most recent heads are relevant.
|
||||
func PublishEvent(eventsChannel chan<- *Event, event *Event) {
|
||||
if eventsChannel == nil || event == nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case eventsChannel <- event:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// EventStream is responsible for subscribing to the Beacon API events endpoint
|
||||
// and dispatching received events to subscribers.
|
||||
type EventStream struct {
|
||||
@@ -67,19 +79,20 @@ func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
|
||||
fullUrl := h.host + "/eth/v1/events?topics=" + allTopics
|
||||
req, err := http.NewRequestWithContext(h.ctx, http.MethodGet, fullUrl, nil)
|
||||
if err != nil {
|
||||
eventsChannel <- &Event{
|
||||
PublishEvent(eventsChannel, &Event{
|
||||
EventType: EventConnectionError,
|
||||
Data: []byte(errors.Wrap(err, "failed to create HTTP request").Error()),
|
||||
}
|
||||
})
|
||||
return
|
||||
}
|
||||
req.Header.Set("Accept", api.EventStreamMediaType)
|
||||
req.Header.Set("Connection", api.KeepAlive)
|
||||
resp, err := h.httpClient.Do(req)
|
||||
if err != nil {
|
||||
eventsChannel <- &Event{
|
||||
PublishEvent(eventsChannel, &Event{
|
||||
EventType: EventConnectionError,
|
||||
Data: []byte(errors.Wrap(err, client.ErrConnectionIssue.Error()).Error()),
|
||||
}
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -100,7 +113,6 @@ func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
|
||||
select {
|
||||
case <-h.ctx.Done():
|
||||
log.Info("Context canceled, stopping event stream")
|
||||
close(eventsChannel)
|
||||
return
|
||||
default:
|
||||
line := scanner.Text()
|
||||
@@ -109,7 +121,7 @@ func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
|
||||
// Empty line indicates the end of an event
|
||||
if eventType != "" && data != "" {
|
||||
// Process the event when both eventType and data are set
|
||||
eventsChannel <- &Event{EventType: eventType, Data: []byte(data)}
|
||||
PublishEvent(eventsChannel, &Event{EventType: eventType, Data: []byte(data)})
|
||||
}
|
||||
|
||||
// Reset eventType and data for the next event
|
||||
@@ -130,9 +142,9 @@ func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
eventsChannel <- &Event{
|
||||
PublishEvent(eventsChannel, &Event{
|
||||
EventType: EventConnectionError,
|
||||
Data: []byte(errors.Wrap(err, errors.Wrap(client.ErrConnectionIssue, "scanner failed").Error()).Error()),
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -53,7 +53,7 @@ func TestEventStream(t *testing.T) {
|
||||
defer server.Close()
|
||||
|
||||
topics := []string{"head"}
|
||||
eventsChannel := make(chan *Event, 1)
|
||||
eventsChannel := make(chan *Event, 4)
|
||||
stream, err := NewEventStream(t.Context(), http.DefaultClient, server.URL, topics)
|
||||
require.NoError(t, err)
|
||||
go stream.Subscribe(eventsChannel)
|
||||
@@ -80,7 +80,7 @@ func TestEventStream(t *testing.T) {
|
||||
|
||||
func TestEventStreamRequestError(t *testing.T) {
|
||||
topics := []string{"head"}
|
||||
eventsChannel := make(chan *Event, 1)
|
||||
eventsChannel := make(chan *Event, 4)
|
||||
ctx := t.Context()
|
||||
|
||||
// use valid url that will result in failed request with nil body
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- Added README for maintaining specrefs.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- The ability to download the nightly reference tests from a specific day.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Ignored
|
||||
|
||||
- Add handy documentation for SSZ Query package (`encoding/ssz/query`).
|
||||
@@ -1,190 +0,0 @@
|
||||
# SSZ Query Package
|
||||
|
||||
The `encoding/ssz/query` package provides a system for analyzing and querying SSZ ([Simple Serialize](https://github.com/ethereum/consensus-specs/blob/master/ssz/simple-serialize.md)) data structures, as well as generating Merkle proofs from them. It enables runtime analysis of SSZ-serialized Go objects with reflection, path-based queries through nested structures, generalized index calculation, and Merkle proof generation.
|
||||
|
||||
This package is designed to be generic. It operates on arbitrary SSZ-serialized Go values at runtime, so the same query/proof machinery applies equally to any SSZ type, including the BeaconState/BeaconBlock.
|
||||
|
||||
## Usage Example
|
||||
|
||||
```go
|
||||
// 1. Analyze an SSZ object
|
||||
block := ðpb.BeaconBlock{...}
|
||||
info, err := query.AnalyzeObject(block)
|
||||
|
||||
// 2. Parse a path
|
||||
path, err := query.ParsePath(".body.attestations[0].data.slot")
|
||||
|
||||
// 3. Get the generalized index
|
||||
gindex, err := query.GetGeneralizedIndexFromPath(info, path)
|
||||
|
||||
// 4. Generate a Merkle proof
|
||||
proof, err := info.Prove(gindex)
|
||||
|
||||
// 5. Get offset and length to slice the SSZ-encoded bytes
|
||||
sszBytes, _ := block.MarshalSSZ()
|
||||
_, offset, length, err := query.CalculateOffsetAndLength(info, path)
|
||||
// slotBytes contains the SSZ-encoded value at the queried path
|
||||
slotBytes := sszBytes[offset : offset+length]
|
||||
```
|
||||
|
||||
## Exported API
|
||||
|
||||
The main exported API consists of:
|
||||
|
||||
```go
|
||||
// AnalyzeObject analyzes an SSZ object and returns its structural information
|
||||
func AnalyzeObject(obj SSZObject) (*SszInfo, error)
|
||||
|
||||
// ParsePath parses a path string like ".field1.field2[0].field3"
|
||||
func ParsePath(rawPath string) (Path, error)
|
||||
|
||||
// CalculateOffsetAndLength computes byte offset and length for a path within an SSZ object
|
||||
func CalculateOffsetAndLength(sszInfo *SszInfo, path Path) (*SszInfo, uint64, uint64, error)
|
||||
|
||||
// GetGeneralizedIndexFromPath calculates the generalized index for a given path
|
||||
func GetGeneralizedIndexFromPath(info *SszInfo, path Path) (uint64, error)
|
||||
|
||||
// Prove generates a Merkle proof for a target generalized index
|
||||
func (s *SszInfo) Prove(gindex uint64) (*fastssz.Proof, error)
|
||||
```
|
||||
|
||||
## Type System
|
||||
|
||||
### SSZ Types
|
||||
|
||||
The package now supports [all standard SSZ types](https://github.com/ethereum/consensus-specs/blob/master/ssz/simple-serialize.md#typing) except `ProgressiveList`, `ProgressiveContainer`, `ProgressiveBitlist`, `Union`, and `CompatibleUnion`.
|
||||
|
||||
### Core Data Structures
|
||||
|
||||
#### `SszInfo`
|
||||
|
||||
The `SszInfo` structure contains complete structural metadata for an SSZ type:
|
||||
|
||||
```go
|
||||
type SszInfo struct {
|
||||
sszType SszType // SSZ Type classification
|
||||
typ reflect.Type // Go reflect.Type
|
||||
source SSZObject // Original SSZObject reference. Mostly used for reusing SSZ methods like `HashTreeRoot`.
|
||||
isVariable bool // True if contains variable-size fields
|
||||
|
||||
// Composite types have corresponding metadata. Other fields would be nil except for the current type.
|
||||
containerInfo *containerInfo
|
||||
listInfo *listInfo
|
||||
vectorInfo *vectorInfo
|
||||
bitlistInfo *bitlistInfo
|
||||
bitvectorInfo *bitvectorInfo
|
||||
}
|
||||
```
|
||||
|
||||
#### `Path`
|
||||
|
||||
The `Path` structure represents navigation paths through SSZ structures. It supports accessing a field by field name, accessing an element by index (list/vector type), and finding the length of homogenous collection types. The `ParsePath` function parses a raw string into a `Path` instance, which is commonly used in other APIs like `CalculateOffsetAndLength` and `GetGeneralizedIndexFromPath`.
|
||||
|
||||
```go
|
||||
type Path struct {
|
||||
Length bool // Flag for length queries (e.g., len(.field))
|
||||
Elements []PathElement // Sequence of field accesses and indices
|
||||
}
|
||||
|
||||
type PathElement struct {
|
||||
Name string // Field name
|
||||
Index *uint64 // list/vector index (nil if not an index access)
|
||||
}
|
||||
```
|
||||
|
||||
## Implementation Details
|
||||
|
||||
### Type Analysis (`analyzer.go`)
|
||||
|
||||
The `AnalyzeObject` function performs recursive type introspection using Go reflection:
|
||||
|
||||
1. **Type Inspection** - Examines Go `reflect.Value` to determine SSZ type
|
||||
- Basic types (`uint8`, `uint16`, `uint32`, `uint64`, `bool`): `SSZType` constants
|
||||
- Slices: Determined from struct tags (`ssz-size` for vectors, `ssz-max` for lists). There is a related [write-up](https://hackmd.io/@junsong/H101DKnwxl) regarding struct tags.
|
||||
- Structs: Analyzed as Containers with field ordering from JSON tags
|
||||
- Pointers: Dereferenced automatically
|
||||
|
||||
2. **Variable-Length Population** - Determines actual sizes at runtime
|
||||
- For lists: Iterates elements, caches sizes for variable-element lists
|
||||
- For containers: Recursively populates variable fields, adjusts offsets
|
||||
- For bitlists: Decodes bit length from bitvector
|
||||
|
||||
3. **Offset Calculation** - Computes byte positions within serialized data
|
||||
- Fixed-size fields: Offset = sum of preceding field sizes
|
||||
- Variable-size fields: Offset stored as 4-byte pointer entries
|
||||
|
||||
### Path Parsing (`path.go`)
|
||||
|
||||
The `ParsePath` function parses path strings with the following rules:
|
||||
|
||||
- **Dot notation**: `.field1.field2` for field access
|
||||
- **Array indexing**: `[0]`, `[42]` for element access
|
||||
- **Length queries**: `len(.field)` for list/vector lengths
|
||||
- **Character set**: Only `[A-Za-z0-9._\[\]\(\)]` allowed
|
||||
|
||||
Example:
|
||||
```go
|
||||
path, _ := ParsePath(".nested.array_field[5].inner_field")
|
||||
// Returns: Path{
|
||||
// Elements: [
|
||||
// PathElement{Name: "nested"},
|
||||
// PathElement{Name: "array_field", Index: <Pointer to uint64(5)>},
|
||||
// PathElement{Name: "inner_field"}
|
||||
// ]
|
||||
// }
|
||||
```
|
||||
|
||||
### Generalized Index Calculation (`generalized_index.go`)
|
||||
|
||||
The generalized index is a tree position identifier. This package follows the [Ethereum consensus-specs](https://github.com/ethereum/consensus-specs/blob/master/ssz/merkle-proofs.md#generalized-merkle-tree-index) to calculate the generalized index.
|
||||
|
||||
### Merkle Proof Generation (`merkle_proof.go`, `proof_collector.go`)
|
||||
|
||||
The `Prove` method generates Merkle proofs using a single-sweep merkleization algorithm:
|
||||
|
||||
#### Algorithm Overview
|
||||
|
||||
**Key Terms:**
|
||||
|
||||
- **Target gindex** (generalized index): The position of the SSZ element you want to prove, expressed as a generalized Merkle tree index. Stored in `Proof.Index`.
|
||||
- Note: The generalized index for root is 1.
|
||||
- **Registered gindices**: The set of tree positions whose node hashes must be captured during merkleization in order to later assemble the proof.
|
||||
- **Sibling node**: The node that shares the same parent as another node.
|
||||
- **Leaf value**: The 32-byte hash of the target node (the node being proven). Stored in `Proof.Leaf`.
|
||||
|
||||
**Phases:**
|
||||
|
||||
1. **Registration Phase** (`addTarget`)
|
||||
> Goal: determine exactly which sibling hashes are needed for the proof.
|
||||
|
||||
- Record the target gindex as the proof target.
|
||||
- Starting from the target node, walk the Merkle tree from the leaf (target gindex) to the root (gindex = 1).
|
||||
- At each step:
|
||||
- Compute and register the sibling gindex (`i XOR 1`) as “must collect”.
|
||||
- Move to the parent (`i = i/2`).
|
||||
- This produces the full set of registered gindices (the sibling nodes on the target-to-root path).
|
||||
|
||||
2. **Merkleization Phase** (`merkleize`)
|
||||
> Goal: recursively merkleize the tree and capture the needed hashes.
|
||||
|
||||
- Recursively traverse the SSZ structure and compute Merkle tree node hashes from leaves to root.
|
||||
- Whenever the traversal computes a node whose gindex is in registered gindices, store that node’s hash for later proof construction.
|
||||
|
||||
3. **Proof Assembly Phase** (`toProof`)
|
||||
> Goal: create the final `fastssz.Proof` object in the correct format and order.
|
||||
|
||||
```go
|
||||
// Proof represents a merkle proof against a general index.
|
||||
type Proof struct {
|
||||
Index int
|
||||
Leaf []byte
|
||||
Hashes [][]byte
|
||||
}
|
||||
```
|
||||
|
||||
- Set `Proof.Index` to the target gindex.
|
||||
- Set `Proof.Leaf` to the 32-byte hash of the target node.
|
||||
- Build `Proof.Hashes` by walking from the target node up to (but not including) the root:
|
||||
- At node `i`, append the stored hash for the sibling (`i XOR 1`).
|
||||
- Move to the parent (`i = i/2`).
|
||||
- The resulting `Proof.Hashes` is ordered from the target level upward, containing one sibling hash per tree level on the path to the root.
|
||||
@@ -1,35 +0,0 @@
|
||||
# Specification References
|
||||
|
||||
This directory contains specification reference tracking files managed by
|
||||
[ethspecify](https://github.com/jtraglia/ethspecify).
|
||||
|
||||
## Installation
|
||||
|
||||
Install `ethspecify` with the following command:
|
||||
|
||||
```bash
|
||||
pipx install ethspecify
|
||||
```
|
||||
|
||||
> [!NOTE]
|
||||
> You can run `ethspecify <cmd>` in the `specrefs` directory or
|
||||
> `ethspecify <cmd> --path=specrefs` from the project's root directory.
|
||||
|
||||
## Maintenance
|
||||
|
||||
When adding support for a new specification version, follow these steps:
|
||||
|
||||
0. Change directory into the `specrefs` directory.
|
||||
1. Update the version in `.ethspecify.yml` configuration.
|
||||
2. Run `ethspecify process` to update/populate specrefs.
|
||||
3. Run `ethspecify check` to check specrefs.
|
||||
4. If there are errors, use the error message as a guide to fix the issue. If
|
||||
there are new specrefs with empty sources, implement/locate each item and
|
||||
update each specref source list. If you choose not to implement an item,
|
||||
add an exception to the appropriate section the the `.ethspecify.yml`
|
||||
configuration.
|
||||
5. Repeat steps 3 and 4 until `ethspecify check` passes.
|
||||
6. Run `git diff` to view updated specrefs. If an object/function/etc has
|
||||
changed, make the necessary updates to the implementation.
|
||||
7. Lastly, in the project's root directory, run `act -j check-specrefs` to
|
||||
ensure everything is correct.
|
||||
@@ -21,14 +21,10 @@ There are tests for mainnet and minimal config, so for each config we will add a
|
||||
|
||||
## Running nightly spectests
|
||||
|
||||
Since [PR 15312](https://github.com/OffchainLabs/prysm/pull/15312), Prysm has support to download "nightly" spectests from github via a starlark rule configuration by environment variable.
|
||||
Set `--repo_env=CONSENSUS_SPEC_TESTS_VERSION=nightly` or `--repo_env=CONSENSUS_SPEC_TESTS_VERSION=nightly-<run_id>` when running spectest to download the "nightly" spectests.
|
||||
Note: A GITHUB_TOKEN environment variable is required to be set. The github token does not need to be associated with your main account; it can be from a "burner account". And the token does not need to be a fine-grained token; it can be a classic token.
|
||||
Since [PR 15312](https://github.com/OffchainLabs/prysm/pull/15312), Prysm has support to download "nightly" spectests from github via a starlark rule configuration by environment variable.
|
||||
Set `--repo_env=CONSENSUS_SPEC_TESTS_VERSION=nightly` when running spectest to download the "nightly" spectests.
|
||||
Note: A GITHUB_TOKEN environment variable is required to be set. The github token must be a [fine grained token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#creating-a-fine-grained-personal-access-token).
|
||||
|
||||
```
|
||||
bazel test //... --test_tag_filters=spectest --repo_env=CONSENSUS_SPEC_TESTS_VERSION=nightly
|
||||
```
|
||||
|
||||
```
|
||||
bazel test //... --test_tag_filters=spectest --repo_env=CONSENSUS_SPEC_TESTS_VERSION=nightly-21422848633
|
||||
```
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
# bazel build @consensus_spec_tests//:test_data
|
||||
# bazel build @consensus_spec_tests//:test_data --repo_env=CONSENSUS_SPEC_TESTS_VERSION=nightly
|
||||
# bazel build @consensus_spec_tests//:test_data --repo_env=CONSENSUS_SPEC_TESTS_VERSION=nightly-<run_id>
|
||||
|
||||
def _get_redirected_url(repository_ctx, url, headers):
|
||||
if not repository_ctx.which("curl"):
|
||||
@@ -25,7 +24,7 @@ def _impl(repository_ctx):
|
||||
version = repository_ctx.getenv("CONSENSUS_SPEC_TESTS_VERSION") or repository_ctx.attr.version
|
||||
token = repository_ctx.getenv("GITHUB_TOKEN") or ""
|
||||
|
||||
if version == "nightly" or version.startswith("nightly-"):
|
||||
if version == "nightly":
|
||||
print("Downloading nightly tests")
|
||||
if not token:
|
||||
fail("Error GITHUB_TOKEN is not set")
|
||||
@@ -35,22 +34,16 @@ def _impl(repository_ctx):
|
||||
"Accept": "application/vnd.github+json",
|
||||
}
|
||||
|
||||
if version.startswith("nightly-"):
|
||||
run_id = version.split("nightly-", 1)[1]
|
||||
if not run_id:
|
||||
fail("Error invalid run id")
|
||||
else:
|
||||
repository_ctx.download(
|
||||
"https://api.github.com/repos/%s/actions/workflows/%s/runs?branch=%s&status=success&per_page=1"
|
||||
% (repository_ctx.attr.repo, repository_ctx.attr.workflow, repository_ctx.attr.branch),
|
||||
headers = headers,
|
||||
output = "runs.json"
|
||||
)
|
||||
repository_ctx.download(
|
||||
"https://api.github.com/repos/%s/actions/workflows/%s/runs?branch=%s&status=success&per_page=1"
|
||||
% (repository_ctx.attr.repo, repository_ctx.attr.workflow, repository_ctx.attr.branch),
|
||||
headers = headers,
|
||||
output = "runs.json"
|
||||
)
|
||||
|
||||
run_id = json.decode(repository_ctx.read("runs.json"))["workflow_runs"][0]["id"]
|
||||
repository_ctx.delete("runs.json")
|
||||
run_id = json.decode(repository_ctx.read("runs.json"))["workflow_runs"][0]["id"]
|
||||
repository_ctx.delete("runs.json")
|
||||
|
||||
print("Run id:", run_id)
|
||||
repository_ctx.download(
|
||||
"https://api.github.com/repos/%s/actions/runs/%s/artifacts"
|
||||
% (repository_ctx.attr.repo, run_id),
|
||||
@@ -115,8 +108,8 @@ consensus_spec_tests = repository_rule(
|
||||
"version": attr.string(mandatory = True),
|
||||
"flavors": attr.string_dict(mandatory = True),
|
||||
"repo": attr.string(default = "ethereum/consensus-specs"),
|
||||
"workflow": attr.string(default = "nightly-reftests.yml"),
|
||||
"branch": attr.string(default = "master"),
|
||||
"workflow": attr.string(default = "generate_vectors.yml"),
|
||||
"branch": attr.string(default = "dev"),
|
||||
"release_url_template": attr.string(default = "https://github.com/ethereum/consensus-specs/releases/download/%s"),
|
||||
},
|
||||
)
|
||||
|
||||
@@ -285,10 +285,10 @@ func (c *grpcValidatorClient) StartEventStream(ctx context.Context, topics []str
|
||||
ctx, span := trace.StartSpan(ctx, "validator.gRPCClient.StartEventStream")
|
||||
defer span.End()
|
||||
if len(topics) == 0 {
|
||||
eventsChannel <- &eventClient.Event{
|
||||
eventClient.PublishEvent(eventsChannel, &eventClient.Event{
|
||||
EventType: eventClient.EventError,
|
||||
Data: []byte(errors.New("no topics were added").Error()),
|
||||
}
|
||||
})
|
||||
return
|
||||
}
|
||||
// TODO(13563): ONLY WORKS WITH HEAD TOPIC.
|
||||
@@ -299,10 +299,10 @@ func (c *grpcValidatorClient) StartEventStream(ctx context.Context, topics []str
|
||||
}
|
||||
}
|
||||
if !containsHead {
|
||||
eventsChannel <- &eventClient.Event{
|
||||
eventClient.PublishEvent(eventsChannel, &eventClient.Event{
|
||||
EventType: eventClient.EventConnectionError,
|
||||
Data: []byte(errors.Wrap(client.ErrConnectionIssue, "gRPC only supports the head topic, and head topic was not passed").Error()),
|
||||
}
|
||||
})
|
||||
}
|
||||
if containsHead && len(topics) > 1 {
|
||||
log.Warn("gRPC only supports the head topic, other topics will be ignored")
|
||||
@@ -310,10 +310,10 @@ func (c *grpcValidatorClient) StartEventStream(ctx context.Context, topics []str
|
||||
|
||||
stream, err := c.beaconNodeValidatorClient.StreamSlots(ctx, ðpb.StreamSlotsRequest{VerifiedOnly: true})
|
||||
if err != nil {
|
||||
eventsChannel <- &eventClient.Event{
|
||||
eventClient.PublishEvent(eventsChannel, &eventClient.Event{
|
||||
EventType: eventClient.EventConnectionError,
|
||||
Data: []byte(errors.Wrap(client.ErrConnectionIssue, err.Error()).Error()),
|
||||
}
|
||||
})
|
||||
return
|
||||
}
|
||||
c.isEventStreamRunning = true
|
||||
@@ -327,25 +327,25 @@ func (c *grpcValidatorClient) StartEventStream(ctx context.Context, topics []str
|
||||
if ctx.Err() != nil {
|
||||
c.isEventStreamRunning = false
|
||||
if errors.Is(ctx.Err(), context.Canceled) {
|
||||
eventsChannel <- &eventClient.Event{
|
||||
eventClient.PublishEvent(eventsChannel, &eventClient.Event{
|
||||
EventType: eventClient.EventConnectionError,
|
||||
Data: []byte(errors.Wrap(client.ErrConnectionIssue, ctx.Err().Error()).Error()),
|
||||
}
|
||||
})
|
||||
return
|
||||
}
|
||||
eventsChannel <- &eventClient.Event{
|
||||
eventClient.PublishEvent(eventsChannel, &eventClient.Event{
|
||||
EventType: eventClient.EventError,
|
||||
Data: []byte(ctx.Err().Error()),
|
||||
}
|
||||
})
|
||||
return
|
||||
}
|
||||
res, err := stream.Recv()
|
||||
if err != nil {
|
||||
c.isEventStreamRunning = false
|
||||
eventsChannel <- &eventClient.Event{
|
||||
eventClient.PublishEvent(eventsChannel, &eventClient.Event{
|
||||
EventType: eventClient.EventConnectionError,
|
||||
Data: []byte(errors.Wrap(client.ErrConnectionIssue, err.Error()).Error()),
|
||||
}
|
||||
})
|
||||
return
|
||||
}
|
||||
if res == nil {
|
||||
@@ -357,15 +357,15 @@ func (c *grpcValidatorClient) StartEventStream(ctx context.Context, topics []str
|
||||
CurrentDutyDependentRoot: hexutil.Encode(res.CurrentDutyDependentRoot),
|
||||
})
|
||||
if err != nil {
|
||||
eventsChannel <- &eventClient.Event{
|
||||
eventClient.PublishEvent(eventsChannel, &eventClient.Event{
|
||||
EventType: eventClient.EventError,
|
||||
Data: []byte(errors.Wrap(err, "failed to marshal Head Event").Error()),
|
||||
}
|
||||
})
|
||||
}
|
||||
eventsChannel <- &eventClient.Event{
|
||||
eventClient.PublishEvent(eventsChannel, &eventClient.Event{
|
||||
EventType: eventClient.EventHead,
|
||||
Data: b,
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -223,7 +223,7 @@ func TestStartEventStream(t *testing.T) {
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
eventsChannel := make(chan *eventClient.Event, 1) // Buffer to prevent blocking
|
||||
eventsChannel := make(chan *eventClient.Event, 4) // Buffer to prevent blocking
|
||||
tc.prepare() // Setup mock expectations
|
||||
|
||||
go grpcClient.StartEventStream(ctx, tc.topics, eventsChannel)
|
||||
|
||||
@@ -441,7 +441,6 @@ func TestRunnerPushesProposerSettings_ValidContext(t *testing.T) {
|
||||
defer assertValidContext(t, timedCtx, ctx)
|
||||
delay(t)
|
||||
})
|
||||
vcm.EXPECT().EventStreamIsRunning().Return(true).AnyTimes().Do(func() { delay(t) })
|
||||
vcm.EXPECT().SubmitValidatorRegistrations(liveCtx, gomock.Any()).Do(func(ctx context.Context, _ any) {
|
||||
defer assertValidContext(t, timedCtx, ctx) // This is the specific regression test assertion for PR 15369.
|
||||
delay(t)
|
||||
|
||||
@@ -66,6 +66,8 @@ type ValidatorService struct {
|
||||
closeClientFunc func() // validator client stop function is used here
|
||||
}
|
||||
|
||||
const eventChannelBufferSize = 32
|
||||
|
||||
// Config for the validator service.
|
||||
type Config struct {
|
||||
Validator iface.Validator
|
||||
@@ -234,7 +236,7 @@ func (v *ValidatorService) Start() {
|
||||
distributed: v.distributed,
|
||||
disableDutiesPolling: v.disableDutiesPolling,
|
||||
accountsChangedChannel: make(chan [][fieldparams.BLSPubkeyLength]byte, 1),
|
||||
eventsChannel: make(chan *eventClient.Event, 1),
|
||||
eventsChannel: make(chan *eventClient.Event, eventChannelBufferSize),
|
||||
}
|
||||
|
||||
hm := newHealthMonitor(v.ctx, v.cancel, v.maxHealthChecks, v.validator)
|
||||
|
||||
@@ -64,6 +64,11 @@ var (
|
||||
msgNoKeysFetched = "No validating keys fetched. Waiting for keys..."
|
||||
)
|
||||
|
||||
const (
|
||||
eventStreamStopped uint32 = iota
|
||||
eventStreamRunning
|
||||
)
|
||||
|
||||
type validator struct {
|
||||
logValidatorPerformance bool
|
||||
distributed bool
|
||||
@@ -82,6 +87,7 @@ type validator struct {
|
||||
cachedAttestationData *ethpb.AttestationData
|
||||
accountsChangedChannel chan [][fieldparams.BLSPubkeyLength]byte
|
||||
eventsChannel chan *eventClient.Event
|
||||
eventStreamState atomic.Uint32
|
||||
highestValidSlot primitives.Slot
|
||||
submittedAggregates map[submittedAttKey]*submittedAtt
|
||||
graffitiStruct *graffiti.Graffiti
|
||||
@@ -1211,12 +1217,40 @@ func (v *validator) PushProposerSettings(ctx context.Context, slot primitives.Sl
|
||||
}
|
||||
|
||||
func (v *validator) StartEventStream(ctx context.Context, topics []string) {
|
||||
if v.EventStreamIsRunning() {
|
||||
if !v.eventStreamState.CompareAndSwap(eventStreamStopped, eventStreamRunning) {
|
||||
log.Debug("EventStream is already running")
|
||||
return
|
||||
}
|
||||
log.WithField("topics", topics).Info("Starting event stream")
|
||||
v.validatorClient.StartEventStream(ctx, topics, v.eventsChannel)
|
||||
go v.runEventStream(ctx, topics)
|
||||
}
|
||||
|
||||
func (v *validator) runEventStream(ctx context.Context, topics []string) {
|
||||
defer v.eventStreamState.Store(eventStreamStopped)
|
||||
backoff := time.Second
|
||||
const maxBackoff = 30 * time.Second
|
||||
|
||||
for {
|
||||
v.validatorClient.StartEventStream(ctx, topics, v.eventsChannel)
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
log.WithField("retryIn", backoff).Warn("Event stream ended unexpectedly, attempting to resubscribe")
|
||||
timer := time.NewTimer(backoff)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
timer.Stop()
|
||||
return
|
||||
case <-timer.C:
|
||||
}
|
||||
if backoff < maxBackoff {
|
||||
backoff *= 2
|
||||
if backoff > maxBackoff {
|
||||
backoff = maxBackoff
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (v *validator) checkDependentRoots(ctx context.Context, head *structs.HeadEvent) error {
|
||||
@@ -1303,7 +1337,7 @@ func (v *validator) ProcessEvent(ctx context.Context, event *eventClient.Event)
|
||||
}
|
||||
|
||||
func (v *validator) EventStreamIsRunning() bool {
|
||||
return v.validatorClient.EventStreamIsRunning()
|
||||
return v.eventStreamState.Load() == eventStreamRunning
|
||||
}
|
||||
|
||||
func (v *validator) Host() string {
|
||||
|
||||
Reference in New Issue
Block a user